diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 922688484..e0178c8ae 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -449,6 +449,7 @@ target_sources (rippled PRIVATE src/ripple/basics/impl/UptimeClock.cpp src/ripple/basics/impl/make_SSLContext.cpp src/ripple/basics/impl/mulDiv.cpp + src/ripple/basics/impl/partitioned_unordered_map.cpp #[===============================[ main sources: subdir: conditions @@ -485,7 +486,6 @@ target_sources (rippled PRIVATE src/ripple/ledger/impl/ApplyViewBase.cpp src/ripple/ledger/impl/ApplyViewImpl.cpp src/ripple/ledger/impl/BookDirs.cpp - src/ripple/ledger/impl/CachedSLEs.cpp src/ripple/ledger/impl/CachedView.cpp src/ripple/ledger/impl/Directory.cpp src/ripple/ledger/impl/OpenView.cpp diff --git a/Builds/containers/gitlab-ci/build_package.sh b/Builds/containers/gitlab-ci/build_package.sh index 31d043778..9d815be45 100644 --- a/Builds/containers/gitlab-ci/build_package.sh +++ b/Builds/containers/gitlab-ci/build_package.sh @@ -22,7 +22,6 @@ time cmake \ -Dpackages_only=ON \ -Dcontainer_label="${container_tag}" \ -Dhave_package_container=ON \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DCMAKE_VERBOSE_MAKEFILE=OFF \ -G Ninja ../.. -time cmake --build . --target ${pkgtype} -- -v - +time cmake --build . --target ${pkgtype} diff --git a/Builds/containers/packaging/dpkg/debian/rules b/Builds/containers/packaging/dpkg/debian/rules index a49041e97..bdf3c9a33 100755 --- a/Builds/containers/packaging/dpkg/debian/rules +++ b/Builds/containers/packaging/dpkg/debian/rules @@ -22,17 +22,17 @@ override_dh_auto_configure: cmake .. -G Ninja \ -DCMAKE_INSTALL_PREFIX=/opt/ripple \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_UNITY_BUILD_BATCH_SIZE=10 \ -Dstatic=ON \ + -Dunity=OFF \ -Dvalidator_keys=ON \ - -DCMAKE_VERBOSE_MAKEFILE=ON + -DCMAKE_VERBOSE_MAKEFILE=OFF override_dh_auto_build: cd bld && \ - cmake --build . --target rippled --target validator-keys --parallel -- -v + cmake --build . --target rippled --target validator-keys --parallel override_dh_auto_install: - cd bld && DESTDIR=../debian/tmp cmake --build . --target install -- -v + cd bld && DESTDIR=../debian/tmp cmake --build . --target install install -D bld/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys install -D Builds/containers/shared/update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo diff --git a/Builds/containers/packaging/rpm/rippled.spec b/Builds/containers/packaging/rpm/rippled.spec index 87e3ca7e3..8514e18d9 100644 --- a/Builds/containers/packaging/rpm/rippled.spec +++ b/Builds/containers/packaging/rpm/rippled.spec @@ -32,15 +32,15 @@ core library for development of standalone applications that sign transactions. cd rippled mkdir -p bld.release cd bld.release -cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -DCMAKE_UNITY_BUILD_BATCH_SIZE=10 -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=ON -Dvalidator_keys=ON -cmake --build . --parallel --target rippled --target validator-keys -- -v +cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dstatic=true -Dunity=OFF -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON +cmake --build . --parallel --target rippled --target validator-keys %pre test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; } %install rm -rf $RPM_BUILD_ROOT -DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install -- -v +DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date install -d ${RPM_BUILD_ROOT}/etc/opt/ripple install -d ${RPM_BUILD_ROOT}/usr/local/bin @@ -110,4 +110,3 @@ chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron * Thu Jun 02 2016 Brandon Wilson - Install validators.txt - diff --git a/Builds/containers/shared/build_deps.sh b/Builds/containers/shared/build_deps.sh index 14c007b56..dc91e99bd 100755 --- a/Builds/containers/shared/build_deps.sh +++ b/Builds/containers/shared/build_deps.sh @@ -29,7 +29,7 @@ cd openssl-${OPENSSL_VER} # NOTE: add -g to the end of the following line if we want debug symbols for openssl SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") ./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared -make -j$(nproc) +make -j$(nproc) >> make_output.txt 2>&1 make install cd .. rm -f openssl-${OPENSSL_VER}.tar.gz @@ -42,7 +42,7 @@ tar xzf libarchive-3.4.1.tar.gz cd libarchive-3.4.1 mkdir _bld && cd _bld cmake -DCMAKE_BUILD_TYPE=Release .. -make -j$(nproc) +make -j$(nproc) >> make_output.txt 2>&1 make install cd ../.. rm -f libarchive-3.4.1.tar.gz @@ -54,7 +54,7 @@ tar xf protobuf-all-3.10.1.tar.gz cd protobuf-3.10.1 ./autogen.sh ./configure -make -j$(nproc) +make -j$(nproc) >> make_output.txt 2>&1 make install ldconfig cd .. @@ -77,7 +77,7 @@ cmake \ -DCARES_BUILD_TESTS=OFF \ -DCARES_BUILD_CONTAINER_TESTS=OFF \ .. -make -j$(nproc) +make -j$(nproc) >> make_output.txt 2>&1 make install cd ../.. rm -f c-ares-1.15.0.tar.gz @@ -97,7 +97,7 @@ cmake \ -DgRPC_PROTOBUF_PROVIDER=package \ -DProtobuf_USE_STATIC_LIBS=ON \ .. -make -j$(nproc) +make -j$(nproc) >> make_output.txt 2>&1 make install cd ../.. rm -f xf v1.25.0.tar.gz @@ -114,7 +114,7 @@ if [ "${CI_USE}" = true ] ; then mkdir build cd build cmake -G "Unix Makefiles" .. - make -j$(nproc) + make -j$(nproc) >> make_output.txt 2>&1 make install cd ../.. rm -f Release_1_8_16.tar.gz diff --git a/CMakeLists.txt b/CMakeLists.txt index de41201d2..87e59432c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,15 @@ endif () project (rippled) +# make GIT_COMMIT_HASH define available to all sources +find_package(Git) +if(Git_FOUND) + execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=40 + OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE GIT_COMMIT_HASH) + message(STATUS gch: ${GIT_COMMIT_HASH}) + add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}") +endif() #git + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps") diff --git a/README.md b/README.md index c35ccffc3..e6c49dc26 100644 --- a/README.md +++ b/README.md @@ -134,19 +134,19 @@ Examples: # The XRP Ledger -The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer servers. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator. +The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator. ## XRP [XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. Its creators gifted 80 billion XRP to a company, now called [Ripple](https://ripple.com/), to develop the XRP Ledger and its ecosystem. Ripple uses XRP to help build the Internet of Value, ushering in a world in which money moves as fast and efficiently as information does today. ## rippled -The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE). The `rippled` server is written primarily in C++ and runs on a variety of platforms. +The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). ### Build from Source * [Linux](Builds/linux/README.md) -* [Mac](Builds/macos/README.md) -* [Windows](Builds/VisualStudio2017/README.md) +* [Mac](Builds/macos/README.md) (Not recommended for production) +* [Windows](Builds/VisualStudio2017/README.md) (Not recommended for production) ## Key Features of the XRP Ledger diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 2004cb01e..2c0a072ad 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -13,6 +13,67 @@ Have new ideas? Need help with setting up your node? Come visit us [here](https: # Releases +## Version 1.8.2 +Ripple has released version 1.8.2 of rippled, the reference server implementation of the XRP Ledger protocol. This release addresses the full transaction queues and elevated transaction fees issue observed on the XRP ledger, and also provides some optimizations and small fixes to improve the server's performance overall. + +### Summary of Issues +Recently, servers in the XRP Ledger network have had full transaction queues and transactions paying low fees have mostly not been able to be confirmed through the queue. After investigation, it was discovered that a large influx of transactions to the network caused it to raise the transaction costs to be proposed in the next ledger block, and defer transactions paying lower costs to later ledgers. The first part worked as designed, but deferred transactions were not being confirmed as the ledger had capacity to process them. + +The root cause was that there were very many low-cost transactions that different servers in the network received in a different order due to incidental differences in timing or network topology, which caused validators to propose different sets of low-cost transactions from the queue. Since none of these transactions had support from a majority of validators, they were removed from the proposed transaction set. Normally, any transactions removed from a proposed transaction set are supposed to be retried in the next ledger, but servers attempted to put these deferred transactions into their transaction queues first, which had filled up. As a result, the deferred transactions were discarded, and the network was only able to confirm transactions that paid high costs. + +### Bug Fixes + +- **Address elevated transaction fees**: This change addresses the full queue problems in two ways. First, it puts deferred transactions directly into the open ledger, rather than transaction queue. This reverts a subset of the changes from [ximinez@62127d7](https://github.com/ximinez/rippled/commit/62127d725d801641bfaa61dee7d88c95e48820c5). A transaction that is in the open ledger but doesn't get validated should stay in the open ledger so that it can be proposed again right away. Second, it changes the order in which transactions are pulled from the transaction queue to increase the overlap in servers' initial transaction consensus proposals. Like the old rules, transactions paying higher fee levels are selected first. Unlike the old rules, transactions paying the same fee level are ordered by transaction ID / hash ascending. (Previously, transactions paying the same fee level were unsorted, resulting in each server having a different order.) + +- **Add ignore_default option to account_lines API**: This flag, if present, suppresses the output of incoming trust lines in the default state. This is primarily motivated by observing that users often have many unwanted incoming trust lines in a default state, which are not useful in the vast majority of cases. Being able to suppress those when doing `account_lines` saves bandwidth and resources. ([#3980](https://github.com/ripple/rippled/pull/3980)) + +- **Make I/O and prefetch worker threads configurable**: This commit adds the ability to specify **io_workers** and **prefetch_workers** in the config file which can be used to specify the number of threads for processing raw inbound and outbound IO and configure the number of threads for performing node store prefetching. ([#3994](https://github.com/ripple/rippled/pull/3994)) + +- **Enforce account RPC limits by objects traversed**: This changes the way the account_objects API method counts and limits the number of objects it returns. Instead of limiting results by the number of objects found, it counts by the number of objects traversed. Additionally, the default and maximum limits for non-admin connections have been decreased. This reduces the amount of work that one API call can do so that public API servers can share load more effectively. ([#4032](https://github.com/ripple/rippled/pull/4032)) + +- **Fix a crash on shutdown**: The NuDB backend class could throw an error in its destructor, resulting in a crash while the server was shutting down gracefully. This crash was harmless but resulted in false alarms and noise when tracking down other possible crashes. ([#4017](https://github.com/ripple/rippled/pull/4017)) + +- **Improve reporting of job queue in admin server_info**: The server_info command, when run with admin permissions, provides information about jobs in the server's job queue. This commit provides more descriptive names and more granular categories for many jobs that were previously all identified as "clientCommand". ([#4031](https://github.com/ripple/rippled/pull/4031)) + +- **Improve full & compressed inner node deserialization**: Remove a redundant copy operation from low-level SHAMap deserialization. ([#4004](https://github.com/ripple/rippled/pull/4004)) + +- **Reporting mode: only forward to P2P nodes that are synced**: Previously, reporting mode servers forwarded to any of their configured P2P nodes at random. This commit improves the selection so that it only chooses from P2P nodes that are fully synced with the network. ([#4028](https://github.com/ripple/rippled/pull/4028)) + +- **Improve handling of HTTP X-Forwarded-For and Forwarded headers**: Fixes the way the server handles IPv6 addresses in these HTTP headers. ([#4009](https://github.com/ripple/rippled/pull/4009), [#4030](https://github.com/ripple/rippled/pull/4030)) + +- **Other minor improvements to logging and Reporting Mode.** + + +## Version 1.8.0 +Ripple has released version 1.8.0 of rippled, the reference server implementation of the XRP Ledger protocol. This release brings several features and improvements. + +### New and Improved Features + +- **Improve History Sharding**: Shards of ledger history are now assembled in a deterministic way so that any server can make a binary-identical shard for a given range of ledgers. This makes it possible to retrieve a shard from multiple sources in parallel, then verify its integrity by comparing checksums with peers' checksums for the same shard. Additionally, there's a new admin RPC command to import ledger history from the shard store, and the crawl_shards command has been expanded with more info. ([#2688](https://github.com/ripple/rippled/issues/2688), [#3726](https://github.com/ripple/rippled/pull/3726), [#3875](https://github.com/ripple/rippled/pull/3875)) +- **New CheckCashMakesTrustLine Amendment**: If enabled, this amendment will change the CheckCash transaction type so that cashing a check for an issued token automatically creates a trust line to hold the token, similar to how purchasing a token in the decentralized exchange creates a trust line to hold the token. This change provides a way for issuers to send tokens to a user before that user has set up a trust line, but without forcing anyone to hold tokens they don't want. ([#3823](https://github.com/ripple/rippled/pull/3823)) +- **Automatically determine the node size**: The server now selects an appropriate `[node_size]` configuration value by default if it is not explicitly specified. This parameter tunes various settings to the specs of the hardware that the server is running on, especially the amount of RAM and the number of CPU threads available in the system. Previously the server always chose the smallest value by default. +- **Improve transaction relaying logic**: Previously, the server relayed every transaction to all its peers (except the one that it received the transaction from). To reduce redundant messages, the server now relays transactions to a subset of peers using a randomized algorithm. Peers can determine whether there are transactions they have not seen and can request them from a peer that has them. It is expected that this feature will further reduce the bandwidth needed to operate a server. +- **Improve the Byzantine validator detector**: This expands the detection capabilities of the Byzantine validation detector. Previously, the server only monitored validators on its own UNL. Now, the server monitors for Byzantine behavior in all validations it sees. +- **Experimental tx stream with history for sidechains**: Adds an experimental subscription stream for sidechain federators to track messages on the main chain in canonical order. This stream is expected to change or be replaced in future versions as work on sidechains matures. +- **Support Debian 11 Bullseye**: This is the first release that is compatible with Debian Linux version 11.x, "Bullseye." The .deb packages now use absolute paths only, for compatibility with Bullseye's stricter package requirements. ([#3909](https://github.com/ripple/rippled/pull/3909)) +- **Improve Cache Performance**: The server uses a new storage structure for several in-memory caches for greatly improved overall performance. The process of purging old data from these caches, called "sweeping", was time-consuming and blocked other important activities necessary for maintaining ledger state and participating in consensus. The new structure divides the caches into smaller partitions that can be swept in parallel. +- **Amendment default votes:** Introduces variable default votes per amendment. Previously the server always voted "yes" on any new amendment unless an admin explicitly configured a voting preference for that amendment. Now the server's default vote can be "yes" or "no" in the source code. This should allow a safer, more gradual roll-out of new amendments, as new releases can be configured to understand a new amendment but not vote for it by default. ([#3877](https://github.com/ripple/rippled/pull/3877)) +- **More fields in the `validations` stream:** The `validations` subscription stream in the API now reports additional fields that were added to validation messages by the HardenedValidations amendment. These fields make it easier to detect misconfigurations such as multiple servers sharing a validation key pair. ([#3865](https://github.com/ripple/rippled/pull/3865)) +- **Reporting mode supports `validations` and `manifests` streams:** In the API it is now possible to connect to these streams when connected to a servers running in reporting. Previously, attempting to subscribe to these streams on a reporting server failed with the error `reportingUnsupported`. ([#3905](https://github.com/ripple/rippled/pull/3905)) + +### Bug Fixes + +- **Clarify the safety of NetClock::time_point arithmetic**: * NetClock::rep is uint32_t and can be error-prone when used with subtraction. * Fixes [#3656](https://github.com/ripple/rippled/pull/3656) +- **Fix out-of-bounds reserve, and some minor optimizations** +- **Fix nested locks in ValidatorSite** +- **Fix clang warnings about copies vs references** +- **Fix reporting mode build issue** +- **Fix potential deadlock in Validator sites** +- **Use libsecp256k1 instead of OpenSSL for key derivation**: The deterministic key derivation code was still using calls to OpenSSL. This replaces the OpenSSL-based routines with new libsecp256k1-based implementations +- **Improve NodeStore to ShardStore imports**: This runs the import process in a background thread while preventing online_delete from removing ledgers pending import +- **Simplify SHAMapItem construction**: The existing class offered several constructors which were mostly unnecessary. This eliminates all existing constructors and introduces a single new one, taking a `Slice`. The internal buffer is switched from `std::vector` to `Buffer` to save a minimum of 8 bytes (plus the buffer slack that is inherent in `std::vector`) per SHAMapItem instance. +- **Redesign stoppable objects**: Stoppable is no longer an abstract base class, but a pattern, modeled after the well-understood `std::thread`. The immediate benefits are less code, less synchronization, less runtime work, and (subjectively) more readable code. The end goal is to adhere to RAII in our object design, and this is one necessary step on that path. + ## Version 1.7.3 This is the 1.7.3 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release addresses an OOB memory read identified by Guido Vranken, as well as an unrelated issue identified by the Ripple C++ team that could result in incorrect use of SLEs. Additionally, this version also introduces the `NegativeUNL` amendment, which corresponds to the feature which was introduced with the 1.6.0 release. diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index b9d16c3bf..4c8b22d50 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -622,18 +622,28 @@ # # [relay_proposals] # -# Controls the relaying behavior for proposals received by this server that -# are issued by validators that are not on the server's UNL. +# Controls the relay and processing behavior for proposals received by this +# server that are issued by validators that are not on the server's UNL. # -# Legal values are: "trusted" and "all". The default is "trusted". +# Legal values are: +# "all" - Relay and process all incoming proposals +# "trusted" - Relay only trusted proposals, but locally process all +# "drop_untrusted" - Relay only trusted proposals, do not process untrusted +# +# The default is "trusted". # # # [relay_validations] # -# Controls the relaying behavior for validations received by this server that -# are issued by validators that are not on the server's UNL. +# Controls the relay and processing behavior for validations received by this +# server that are issued by validators that are not on the server's UNL. # -# Legal values are: "trusted" and "all". The default is "all". +# Legal values are: +# "all" - Relay and process all incoming validations +# "trusted" - Relay only trusted validations, but locally process all +# "drop_untrusted" - Relay only trusted validations, do not process untrusted +# +# The default is "all". # # # @@ -769,6 +779,14 @@ # number of processor threads plus 2 for networked nodes. Nodes running in # stand alone mode default to 1 worker. # +# [io_workers] +# +# Configures the number of threads for processing raw inbound and outbound IO. +# +# [prefetch_workers] +# +# Configures the number of threads for performing nodestore prefetching. +# # # # [network_id] @@ -1132,6 +1150,9 @@ # cluster. Setting this option can help eliminate # write timeouts and other write errors due to the # cluster being overloaded. +# io_threads +# Set the number of IO threads used by the +# Cassandra driver. Defaults to 4. # # Notes: # The 'node_db' entry configures the primary, persistent storage. diff --git a/docs/consensus.md b/docs/consensus.md index c811b6f37..1b0063663 100644 --- a/docs/consensus.md +++ b/docs/consensus.md @@ -469,7 +469,7 @@ struct Ledger // Whether the ledger's close time was a non-trivial consensus result bool closeAgree() const; - // The close time resolution used in determing the close time + // The close time resolution used in determining the close time NetClock::duration closeTimeResolution() const; // The (effective) close time, based on the closeTimeResolution diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index b22b73c4e..0fcf0660b 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -87,7 +87,8 @@ RCLValidatedLedger::operator[](Seq const& s) const -> ID JLOG(j_.warn()) << "Unable to determine hash of ancestor seq=" << s << " from ledger hash=" << ledgerID_ - << " seq=" << ledgerSeq_; + << " seq=" << ledgerSeq_ << " (available: " << minSeq() + << "-" << seq() << ")"; // Default ID that is less than all others return ID{0}; } @@ -189,8 +190,8 @@ handleNewValidation( // so that our peers will also observe them and take independent notice of // such validators, informing their operators. if (auto const ls = val->isTrusted() - ? validations.adaptor().journal().fatal() - : validations.adaptor().journal().warn(); + ? validations.adaptor().journal().error() + : validations.adaptor().journal().info(); ls.active()) { auto const id = [&masterKey, &signingKey]() { diff --git a/src/ripple/app/ledger/OpenLedger.h b/src/ripple/app/ledger/OpenLedger.h index c3471ba49..5ec15dce6 100644 --- a/src/ripple/app/ledger/OpenLedger.h +++ b/src/ripple/app/ledger/OpenLedger.h @@ -189,7 +189,6 @@ private: FwdRange const& txs, OrderedTxs& retries, ApplyFlags flags, - std::map& shouldRecover, beast::Journal j); enum Result { success, failure, retry }; @@ -204,7 +203,6 @@ private: std::shared_ptr const& tx, bool retry, ApplyFlags flags, - bool shouldRecover, beast::Journal j); }; @@ -219,7 +217,6 @@ OpenLedger::apply( FwdRange const& txs, OrderedTxs& retries, ApplyFlags flags, - std::map& shouldRecover, beast::Journal j) { for (auto iter = txs.begin(); iter != txs.end(); ++iter) @@ -231,8 +228,7 @@ OpenLedger::apply( auto const txId = tx->getTransactionID(); if (check.txExists(txId)) continue; - auto const result = - apply_one(app, view, tx, true, flags, shouldRecover[txId], j); + auto const result = apply_one(app, view, tx, true, flags, j); if (result == Result::retry) retries.insert(tx); } @@ -249,14 +245,7 @@ OpenLedger::apply( auto iter = retries.begin(); while (iter != retries.end()) { - switch (apply_one( - app, - view, - iter->second, - retry, - flags, - shouldRecover[iter->second->getTransactionID()], - j)) + switch (apply_one(app, view, iter->second, retry, flags, j)) { case Result::success: ++changes; diff --git a/src/ripple/app/ledger/OrderBookDB.cpp b/src/ripple/app/ledger/OrderBookDB.cpp index a81331d04..4b598de1b 100644 --- a/src/ripple/app/ledger/OrderBookDB.cpp +++ b/src/ripple/app/ledger/OrderBookDB.cpp @@ -62,17 +62,16 @@ OrderBookDB::setup(std::shared_ptr const& ledger) mSeq = seq; } - if (app_.config().PATH_SEARCH_MAX == 0) + if (app_.config().PATH_SEARCH_MAX != 0) { - // nothing to do + if (app_.config().standalone()) + update(ledger); + else + app_.getJobQueue().addJob( + jtUPDATE_PF, "OrderBookDB::update", [this, ledger](Job&) { + update(ledger); + }); } - else if (app_.config().standalone()) - update(ledger); - else - app_.getJobQueue().addJob( - jtUPDATE_PF, "OrderBookDB::update", [this, ledger](Job&) { - update(ledger); - }); } void diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index 2f8a07138..8ee3443a2 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -30,6 +30,7 @@ #include #include #include +#include namespace ripple { @@ -347,27 +348,29 @@ public: void sweep() override { - clock_type::time_point const now(m_clock.now()); + auto const start = m_clock.now(); // Make a list of things to sweep, while holding the lock std::vector stuffToSweep; std::size_t total; + { ScopedLockType sl(mLock); MapType::iterator it(mLedgers.begin()); total = mLedgers.size(); + stuffToSweep.reserve(total); while (it != mLedgers.end()) { - if (it->second->getLastAction() > now) + auto const la = it->second->getLastAction(); + + if (la > start) { it->second->touch(); ++it; } - else if ( - (it->second->getLastAction() + std::chrono::minutes(1)) < - now) + else if ((la + std::chrono::minutes(1)) < start) { stuffToSweep.push_back(it->second); // shouldn't cause the actual final delete @@ -383,8 +386,13 @@ public: beast::expire(mRecentFailures, kReacquireInterval); } - JLOG(j_.debug()) << "Swept " << stuffToSweep.size() << " out of " - << total << " inbound ledgers."; + JLOG(j_.debug()) + << "Swept " << stuffToSweep.size() << " out of " << total + << " inbound ledgers. Duration: " + << std::chrono::duration_cast( + m_clock.now() - start) + .count() + << "ms"; } void diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 8290b4150..082f74524 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -1084,6 +1084,16 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) if (!fees.empty()) { std::sort(fees.begin(), fees.end()); + if (auto stream = m_journal.debug()) + { + std::stringstream s; + s << "Received fees from validations: (" << fees.size() << ") "; + for (auto const fee1 : fees) + { + s << " " << fee1; + } + stream << s.str(); + } fee = fees[fees.size() / 2]; // median } else diff --git a/src/ripple/app/ledger/impl/LedgerReplayer.cpp b/src/ripple/app/ledger/impl/LedgerReplayer.cpp index 9ec5c6f2c..c7aa5d9ca 100644 --- a/src/ripple/app/ledger/impl/LedgerReplayer.cpp +++ b/src/ripple/app/ledger/impl/LedgerReplayer.cpp @@ -218,39 +218,47 @@ LedgerReplayer::gotReplayDelta( void LedgerReplayer::sweep() { - std::lock_guard lock(mtx_); - JLOG(j_.debug()) << "Sweeping, LedgerReplayer has " << tasks_.size() - << " tasks, " << skipLists_.size() << " skipLists, and " - << deltas_.size() << " deltas."; + auto const start = std::chrono::steady_clock::now(); + { + std::lock_guard lock(mtx_); + JLOG(j_.debug()) << "Sweeping, LedgerReplayer has " << tasks_.size() + << " tasks, " << skipLists_.size() + << " skipLists, and " << deltas_.size() << " deltas."; - tasks_.erase( - std::remove_if( - tasks_.begin(), - tasks_.end(), - [this](auto const& t) -> bool { - if (t->finished()) - { - JLOG(j_.debug()) - << "Sweep task " << t->getTaskParameter().finishHash_; - return true; - } - return false; - }), - tasks_.end()); + tasks_.erase( + std::remove_if( + tasks_.begin(), + tasks_.end(), + [this](auto const& t) -> bool { + if (t->finished()) + { + JLOG(j_.debug()) << "Sweep task " + << t->getTaskParameter().finishHash_; + return true; + } + return false; + }), + tasks_.end()); - auto removeCannotLocked = [](auto& subTasks) { - for (auto it = subTasks.begin(); it != subTasks.end();) - { - if (auto item = it->second.lock(); !item) + auto removeCannotLocked = [](auto& subTasks) { + for (auto it = subTasks.begin(); it != subTasks.end();) { - it = subTasks.erase(it); + if (auto item = it->second.lock(); !item) + { + it = subTasks.erase(it); + } + else + ++it; } - else - ++it; - } - }; - removeCannotLocked(skipLists_); - removeCannotLocked(deltas_); + }; + removeCannotLocked(skipLists_); + removeCannotLocked(deltas_); + } + JLOG(j_.debug()) << " LedgerReplayer sweep lock duration " + << std::chrono::duration_cast( + std::chrono::steady_clock::now() - start) + .count() + << "ms"; } void diff --git a/src/ripple/app/ledger/impl/OpenLedger.cpp b/src/ripple/app/ledger/impl/OpenLedger.cpp index 4fd9d7324..4f15a5024 100644 --- a/src/ripple/app/ledger/impl/OpenLedger.cpp +++ b/src/ripple/app/ledger/impl/OpenLedger.cpp @@ -87,17 +87,11 @@ OpenLedger::accept( { JLOG(j_.trace()) << "accept ledger " << ledger->seq() << " " << suffix; auto next = create(rules, ledger); - std::map shouldRecover; if (retriesFirst) { - for (auto const& tx : retries) - { - auto const txID = tx.second->getTransactionID(); - shouldRecover[txID] = app.getHashRouter().shouldRecover(txID); - } // Handle disputed tx, outside lock using empty = std::vector>; - apply(app, *next, *ledger, empty{}, retries, flags, shouldRecover, j_); + apply(app, *next, *ledger, empty{}, retries, flags, j_); } // Block calls to modify, otherwise // new tx going into the open ledger @@ -106,17 +100,6 @@ OpenLedger::accept( // Apply tx from the current open view if (!current_->txs.empty()) { - for (auto const& tx : current_->txs) - { - auto const txID = tx.first->getTransactionID(); - auto iter = shouldRecover.lower_bound(txID); - if (iter != shouldRecover.end() && iter->first == txID) - // already had a chance via disputes - iter->second = false; - else - shouldRecover.emplace_hint( - iter, txID, app.getHashRouter().shouldRecover(txID)); - } apply( app, *next, @@ -130,7 +113,6 @@ OpenLedger::accept( }), retries, flags, - shouldRecover, j_); } // Call the modifier @@ -185,21 +167,12 @@ OpenLedger::apply_one( std::shared_ptr const& tx, bool retry, ApplyFlags flags, - bool shouldRecover, beast::Journal j) -> Result { if (retry) flags = flags | tapRETRY; - auto const result = [&] { - auto const queueResult = - app.getTxQ().apply(app, view, tx, flags | tapPREFER_QUEUE, j); - // If the transaction can't get into the queue for intrinsic - // reasons, and it can still be recovered, try to put it - // directly into the open ledger, else drop it. - if (queueResult.first == telCAN_NOT_QUEUE && shouldRecover) - return ripple::apply(app, view, *tx, flags, j); - return queueResult; - }(); + // If it's in anybody's proposed set, try to keep it in the ledger + auto const result = ripple::apply(app, view, *tx, flags, j); if (result.second || result.first == terQUEUED) return Result::success; if (isTefFailure(result.first) || isTemMalformed(result.first) || diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index b9b60767b..60381b158 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -79,12 +79,14 @@ #include +#include #include #include #include #include #include #include +#include #include #include @@ -244,6 +246,10 @@ public: #if RIPPLE_SINGLE_IO_SERVICE_THREAD return 1; #else + + if (config.IO_WORKERS > 0) + return config.IO_WORKERS; + auto const cores = std::thread::hardware_concurrency(); // Use a single thread when running on under-provisioned systems @@ -283,6 +289,29 @@ public: logs_->journal("Collector"))) , m_jobQueue(std::make_unique( + [](std::unique_ptr const& config) { + if (config->standalone() && !config->reporting() && + !config->FORCE_MULTI_THREAD) + return 1; + + if (config->WORKERS) + return config->WORKERS; + + auto count = + static_cast(std::thread::hardware_concurrency()); + + // Be more aggressive about the number of threads to use + // for the job queue if the server is configured as "large" + // or "huge" if there are enough cores. + if (config->NODE_SIZE >= 4 && count >= 16) + count = 6 + std::min(count, 8); + else if (config->NODE_SIZE >= 3 && count >= 8) + count = 4 + std::min(count, 6); + else + count = 2 + std::min(count, 4); + + return count; + }(config_), m_collectorManager->group("jobq"), logs_->journal("JobQueue"), *logs_, @@ -304,14 +333,21 @@ public: stopwatch(), logs_->journal("TaggedCache")) - , cachedSLEs_(std::chrono::minutes(1), stopwatch()) + , cachedSLEs_( + "Cached SLEs", + 0, + std::chrono::minutes(1), + stopwatch(), + logs_->journal("CachedSLEs")) + , validatorKeys_(*config_, m_journal) , m_resourceManager(Resource::make_Manager( m_collectorManager->collector(), logs_->journal("Resource"))) - , m_nodeStore(m_shaMapStore->makeNodeStore(4)) + , m_nodeStore(m_shaMapStore->makeNodeStore( + config_->PREFETCH_WORKERS > 0 ? config_->PREFETCH_WORKERS : 4)) , nodeFamily_(*this, *m_collectorManager) @@ -412,8 +448,7 @@ public: , hashRouter_(std::make_unique( stopwatch(), - HashRouter::getDefaultHoldTime(), - HashRouter::getDefaultRecoverLimit())) + HashRouter::getDefaultHoldTime())) , mValidations( ValidationParms(), @@ -1065,7 +1100,8 @@ public: { using namespace std::chrono; sweepTimer_.expires_from_now( - seconds{config_->getValueFor(SizedItem::sweepInterval)}); + seconds{config_->SWEEP_INTERVAL.value_or( + config_->getValueFor(SizedItem::sweepInterval))}); sweepTimer_.async_wait(std::move(*optionalCountedHandler)); } } @@ -1121,11 +1157,11 @@ public: shardStore_->sweep(); getLedgerMaster().sweep(); getTempNodeCache().sweep(); - getValidations().expire(); + getValidations().expire(m_journal); getInboundLedgers().sweep(); getLedgerReplayer().sweep(); m_acceptedLedgerCache.sweep(); - cachedSLEs_.expire(); + cachedSLEs_.sweep(); #ifdef RIPPLED_REPORTING if (auto pg = dynamic_cast( @@ -1221,9 +1257,6 @@ ApplicationImp::setup() // Optionally turn off logging to console. logs_->silent(config_->silent()); - m_jobQueue->setThreadCount( - config_->WORKERS, config_->standalone() && !config_->reporting()); - if (!config_->standalone()) timeKeeper_->run(config_->SNTP_SERVERS); diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 3c31e10bb..0fc927ff6 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -52,7 +52,19 @@ class ShardArchiveHandler; // VFALCO TODO Fix forward declares required for header dependency loops class AmendmentTable; -class CachedSLEs; + +template < + class Key, + class T, + bool IsKeyCache, + class Hash, + class KeyEqual, + class Mutex> +class TaggedCache; +class STLedgerEntry; +using SLE = STLedgerEntry; +using CachedSLEs = TaggedCache; + class CollectorManager; class Family; class HashRouter; diff --git a/src/ripple/app/main/GRPCServer.cpp b/src/ripple/app/main/GRPCServer.cpp index 7279bc32b..ddaea14c2 100644 --- a/src/ripple/app/main/GRPCServer.cpp +++ b/src/ripple/app/main/GRPCServer.cpp @@ -144,11 +144,11 @@ GRPCServerImpl::CallData::process( { auto usage = getUsage(); bool isUnlimited = clientIsUnlimited(); - if (!isUnlimited && usage.disconnect()) + if (!isUnlimited && usage.disconnect(app_.journal("gRPCServer"))) { grpc::Status status{ grpc::StatusCode::RESOURCE_EXHAUSTED, - "usage balance exceeds threshhold"}; + "usage balance exceeds threshold"}; responder_.FinishWithError(status, this); } else diff --git a/src/ripple/app/main/LoadManager.cpp b/src/ripple/app/main/LoadManager.cpp index be4b7ec97..5e87063f0 100644 --- a/src/ripple/app/main/LoadManager.cpp +++ b/src/ripple/app/main/LoadManager.cpp @@ -108,86 +108,86 @@ LoadManager::run() while (true) { + t += 1s; + + std::unique_lock sl(mutex_); + if (cv_.wait_until(sl, t, [this] { return stop_; })) + break; + + // Copy out shared data under a lock. Use copies outside lock. + auto const deadLock = deadLock_; + auto const armed = armed_; + sl.unlock(); + + // Measure the amount of time we have been deadlocked, in seconds. + using namespace std::chrono; + auto const timeSpentDeadlocked = + duration_cast(steady_clock::now() - deadLock); + + constexpr auto reportingIntervalSeconds = 10s; + constexpr auto deadlockFatalLogMessageTimeLimit = 90s; + constexpr auto deadlockLogicErrorTimeLimit = 600s; + + if (armed && (timeSpentDeadlocked >= reportingIntervalSeconds)) { - t += 1s; - std::unique_lock sl(mutex_); - if (cv_.wait_until(sl, t, [this] { return stop_; })) + // Report the deadlocked condition every + // reportingIntervalSeconds + if ((timeSpentDeadlocked % reportingIntervalSeconds) == 0s) { - break; - } - // Copy out shared data under a lock. Use copies outside lock. - auto const deadLock = deadLock_; - auto const armed = armed_; - sl.unlock(); - - // Measure the amount of time we have been deadlocked, in seconds. - using namespace std::chrono; - auto const timeSpentDeadlocked = - duration_cast(steady_clock::now() - deadLock); - - constexpr auto reportingIntervalSeconds = 10s; - constexpr auto deadlockFatalLogMessageTimeLimit = 90s; - constexpr auto deadlockLogicErrorTimeLimit = 600s; - if (armed && (timeSpentDeadlocked >= reportingIntervalSeconds)) - { - // Report the deadlocked condition every - // reportingIntervalSeconds - if ((timeSpentDeadlocked % reportingIntervalSeconds) == 0s) + if (timeSpentDeadlocked < deadlockFatalLogMessageTimeLimit) { - if (timeSpentDeadlocked < deadlockFatalLogMessageTimeLimit) - { - JLOG(journal_.warn()) - << "Server stalled for " - << timeSpentDeadlocked.count() << " seconds."; - } - else - { - JLOG(journal_.fatal()) - << "Deadlock detected. Deadlocked time: " - << timeSpentDeadlocked.count() << "s"; - if (app_.getJobQueue().isOverloaded()) - { - JLOG(journal_.fatal()) - << app_.getJobQueue().getJson(0); - } - } - } - - // If we go over the deadlockTimeLimit spent deadlocked, it - // means that the deadlock resolution code has failed, which - // qualifies as undefined behavior. - // - if (timeSpentDeadlocked >= deadlockLogicErrorTimeLimit) - { - JLOG(journal_.fatal()) - << "LogicError: Deadlock detected. Deadlocked time: " - << timeSpentDeadlocked.count() << "s"; + JLOG(journal_.warn()) + << "Server stalled for " << timeSpentDeadlocked.count() + << " seconds."; if (app_.getJobQueue().isOverloaded()) { - JLOG(journal_.fatal()) << app_.getJobQueue().getJson(0); + JLOG(journal_.warn()) << app_.getJobQueue().getJson(0); } - LogicError("Deadlock detected"); + } + else + { + JLOG(journal_.fatal()) + << "Deadlock detected. Deadlocked time: " + << timeSpentDeadlocked.count() << "s"; + JLOG(journal_.fatal()) + << "JobQueue: " << app_.getJobQueue().getJson(0); } } - } - bool change = false; - if (app_.getJobQueue().isOverloaded()) - { - JLOG(journal_.info()) << app_.getJobQueue().getJson(0); - change = app_.getFeeTrack().raiseLocalFee(); - } - else - { - change = app_.getFeeTrack().lowerLocalFee(); + // If we go over the deadlockTimeLimit spent deadlocked, it + // means that the deadlock resolution code has failed, which + // qualifies as undefined behavior. + // + if (timeSpentDeadlocked >= deadlockLogicErrorTimeLimit) + { + JLOG(journal_.fatal()) + << "LogicError: Deadlock detected. Deadlocked time: " + << timeSpentDeadlocked.count() << "s"; + JLOG(journal_.fatal()) + << "JobQueue: " << app_.getJobQueue().getJson(0); + LogicError("Deadlock detected"); + } } + } - if (change) - { - // VFALCO TODO replace this with a Listener / observer and - // subscribe in NetworkOPs or Application. - app_.getOPs().reportFeeChange(); - } + bool change; + + if (app_.getJobQueue().isOverloaded()) + { + JLOG(journal_.info()) << "Raising local fee (JQ overload): " + << app_.getJobQueue().getJson(0); + change = app_.getFeeTrack().raiseLocalFee(); + } + else + { + change = app_.getFeeTrack().lowerLocalFee(); + } + + if (change) + { + // VFALCO TODO replace this with a Listener / observer and + // subscribe in NetworkOPs or Application. + app_.getOPs().reportFeeChange(); } } diff --git a/src/ripple/app/misc/FeeEscalation.md b/src/ripple/app/misc/FeeEscalation.md index c200ae075..30f4dc278 100644 --- a/src/ripple/app/misc/FeeEscalation.md +++ b/src/ripple/app/misc/FeeEscalation.md @@ -92,7 +92,7 @@ traffic periods, and give those transactions a much better chance to succeed. 1. If an incoming transaction meets both the base [fee -level](#fee-level) and the load fee minimum, but does not have a high +level](#fee-level) and the [load fee](#load-fee) minimum, but does not have a high enough [fee level](#fee-level) to immediately go into the open ledger, it is instead put into the queue and broadcast to peers. Each peer will then make an independent decision about whether to put the transaction @@ -173,6 +173,10 @@ This demonstrates that a simpler transaction paying less XRP can be more likely to get into the open ledger, or be sorted earlier in the queue than a more complex transaction paying more XRP. +### Load Fee + +Each rippled server maintains a minimum cost threshold based on its current load. If you submit a transaction with a fee that is lower than the current load-based transaction cost of the rippled server, the server neither applies nor relays the transaction to its peers. A transaction is very unlikely to survive the consensus process unless its transaction fee value meets the requirements of a majority of servers. + ### Reference Transaction In this document, a "Reference Transaction" is any currently implemented diff --git a/src/ripple/app/misc/HashRouter.cpp b/src/ripple/app/misc/HashRouter.cpp index 8a8170f48..8085d6892 100644 --- a/src/ripple/app/misc/HashRouter.cpp +++ b/src/ripple/app/misc/HashRouter.cpp @@ -128,14 +128,4 @@ HashRouter::shouldRelay(uint256 const& key) return s.releasePeerSet(); } -bool -HashRouter::shouldRecover(uint256 const& key) -{ - std::lock_guard lock(mutex_); - - auto& s = emplace(key).first; - - return s.shouldRecover(recoverLimit_); -} - } // namespace ripple diff --git a/src/ripple/app/misc/HashRouter.h b/src/ripple/app/misc/HashRouter.h index a43bc1278..8c546b2c5 100644 --- a/src/ripple/app/misc/HashRouter.h +++ b/src/ripple/app/misc/HashRouter.h @@ -116,20 +116,6 @@ private: return true; } - /** Determines if this item should be recovered from the open ledger. - - Counts the number of times the item has been recovered. - Every `limit` times the function is called, return false. - Else return true. - - @note The limit must be > 0 - */ - bool - shouldRecover(std::uint32_t limit) - { - return ++recoveries_ % limit != 0; - } - bool shouldProcess(Stopwatch::time_point now, std::chrono::seconds interval) { @@ -146,7 +132,6 @@ private: // than one flag needs to expire independently. std::optional relayed_; std::optional processed_; - std::uint32_t recoveries_ = 0; }; public: @@ -158,19 +143,8 @@ public: return 300s; } - static inline std::uint32_t - getDefaultRecoverLimit() - { - return 1; - } - - HashRouter( - Stopwatch& clock, - std::chrono::seconds entryHoldTimeInSeconds, - std::uint32_t recoverLimit) - : suppressionMap_(clock) - , holdTime_(entryHoldTimeInSeconds) - , recoverLimit_(recoverLimit + 1u) + HashRouter(Stopwatch& clock, std::chrono::seconds entryHoldTimeInSeconds) + : suppressionMap_(clock), holdTime_(entryHoldTimeInSeconds) { } @@ -231,15 +205,6 @@ public: std::optional> shouldRelay(uint256 const& key); - /** Determines whether the hashed item should be recovered - from the open ledger into the next open ledger or the transaction - queue. - - @return `bool` indicates whether the item should be recovered - */ - bool - shouldRecover(uint256 const& key); - private: // pair.second indicates whether the entry was created std::pair @@ -256,8 +221,6 @@ private: suppressionMap_; std::chrono::seconds const holdTime_; - - std::uint32_t const recoverLimit_; }; } // namespace ripple diff --git a/src/ripple/app/misc/LoadFeeTrack.h b/src/ripple/app/misc/LoadFeeTrack.h index 30c8766a9..0109468cb 100644 --- a/src/ripple/app/misc/LoadFeeTrack.h +++ b/src/ripple/app/misc/LoadFeeTrack.h @@ -21,6 +21,7 @@ #define RIPPLE_CORE_LOADFEETRACK_H_INCLUDED #include +#include #include #include #include @@ -58,6 +59,7 @@ public: void setRemoteFee(std::uint32_t f) { + JLOG(j_.trace()) << "setRemoteFee: " << f; std::lock_guard sl(lock_); remoteTxnLoadFee_ = f; } @@ -110,6 +112,7 @@ public: void setClusterFee(std::uint32_t fee) { + JLOG(j_.trace()) << "setClusterFee: " << fee; std::lock_guard sl(lock_); clusterTxnLoadFee_ = fee; } diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 1c5ff409f..813ce54a7 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -72,6 +72,7 @@ #include #include #include +#include #include namespace ripple { @@ -2305,7 +2306,7 @@ NetworkOPsImp::recvValidation( // We will always relay trusted validations; if configured, we will // also relay all untrusted validations. - return app_.config().RELAY_UNTRUSTED_VALIDATIONS || val->isTrusted(); + return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted(); } Json::Value @@ -2629,6 +2630,11 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (std::abs(closeOffset.count()) >= 60) l[jss::close_time_offset] = closeOffset.count(); +#if RIPPLED_REPORTING + std::int64_t const dbAge = + std::max(m_ledgerMaster.getValidatedLedgerAge().count(), 0L); + l[jss::age] = Json::UInt(dbAge); +#else constexpr std::chrono::seconds highAgeThreshold{1000000}; if (m_ledgerMaster.haveValidated()) { @@ -2648,6 +2654,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) Json::UInt(age < highAgeThreshold ? age.count() : 0); } } +#endif } if (valid) @@ -2992,7 +2999,7 @@ NetworkOPsImp::reportFeeChange() if (f != mLastFeeSummary) { m_job_queue.addJob( - jtCLIENT, "reportFeeChange->pubServer", [this](Job&) { + jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this](Job&) { pubServer(); }); } @@ -3002,7 +3009,7 @@ void NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase) { m_job_queue.addJob( - jtCLIENT, + jtCLIENT_CONSENSUS, "reportConsensusStateChange->pubConsensus", [this, phase](Job&) { pubConsensus(phase); }); } @@ -3395,7 +3402,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) } app_.getJobQueue().addJob( - jtCLIENT, + jtCLIENT_ACCT_HIST, "AccountHistoryTxStream", [this, dbType = databaseType, subInfo](Job&) { auto const& accountId = subInfo.index_->accountId_; @@ -4079,7 +4086,7 @@ NetworkOPsImp::getBookPage( Json::Value& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue)); - std::map umBalance; + std::unordered_map umBalance; const uint256 uBookBase = getBookBase(book); const uint256 uBookEnd = getQualityNext(uBookBase); uint256 uTipIndex = uBookBase; diff --git a/src/ripple/app/misc/TxQ.h b/src/ripple/app/misc/TxQ.h index a8283f20d..8c1cb2688 100644 --- a/src/ripple/app/misc/TxQ.h +++ b/src/ripple/app/misc/TxQ.h @@ -96,13 +96,13 @@ public: FeeLevel64 minimumEscalationMultiplier = baseLevel * 500; /// Minimum number of transactions to allow into the ledger /// before escalation, regardless of the prior ledger's size. - std::uint32_t minimumTxnInLedger = 5; + std::uint32_t minimumTxnInLedger = 32; /// Like @ref minimumTxnInLedger for standalone mode. /// Primarily so that tests don't need to worry about queuing. std::uint32_t minimumTxnInLedgerSA = 1000; /// Number of transactions per ledger that fee escalation "works /// towards". - std::uint32_t targetTxnInLedger = 50; + std::uint32_t targetTxnInLedger = 256; /** Optional maximum allowed value of transactions per ledger before fee escalation kicks in. By default, the maximum is an emergent property of network, validator, and consensus performance. This @@ -616,17 +616,30 @@ private: } }; - /// Used for sorting @ref MaybeTx by `feeLevel` - class GreaterFee + /// Used for sorting @ref MaybeTx + class OrderCandidates { public: /// Default constructor - explicit GreaterFee() = default; + explicit OrderCandidates() = default; - /// Is the fee level of `lhs` greater than the fee level of `rhs`? + /** Sort @ref MaybeTx by `feeLevel` descending, then by + * transaction ID ascending + * + * The transaction queue is ordered such that transactions + * paying a higher fee are in front of transactions paying + * a lower fee, giving them an opportunity to be processed into + * the open ledger first. Within transactions paying the same + * fee, order by the arbitrary but consistent transaction ID. + * This allows validators to build similar queues in the same + * order, and thus have more similar initial proposals. + * + */ bool operator()(const MaybeTx& lhs, const MaybeTx& rhs) const { + if (lhs.feeLevel == rhs.feeLevel) + return lhs.txID < rhs.txID; return lhs.feeLevel > rhs.feeLevel; } }; @@ -725,7 +738,7 @@ private: &MaybeTx::byFeeListHook>; using FeeMultiSet = boost::intrusive:: - multiset>; + multiset>; using AccountMap = std::map; diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index 595429e45..367f9660f 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -107,12 +107,11 @@ TxQ::FeeMetrics::update( std::sort(feeLevels.begin(), feeLevels.end()); assert(size == feeLevels.size()); - JLOG(j_.debug()) << "Ledger " << view.info().seq << " has " << size - << " transactions. " - << "Ledgers are processing " - << (timeLeap ? "slowly" : "as expected") - << ". Expected transactions is currently " << txnsExpected_ - << " and multiplier is " << escalationMultiplier_; + JLOG((timeLeap ? j_.warn() : j_.debug())) + << "Ledger " << view.info().seq << " has " << size << " transactions. " + << "Ledgers are processing " << (timeLeap ? "slowly" : "as expected") + << ". Expected transactions is currently " << txnsExpected_ + << " and multiplier is " << escalationMultiplier_; if (timeLeap) { @@ -480,30 +479,15 @@ TxQ::eraseAndAdvance(TxQ::FeeMultiSet::const_iterator_type candidateIter) assert(byFee_.iterator_to(accountIter->second) == candidateIter); auto const accountNextIter = std::next(accountIter); - // Check if the next transaction for this account has a greater - // SeqProxy, and a higher fee level, which means we skipped it - // earlier, and need to try it again. - // - // Edge cases: - // o If the next account tx has a lower fee level, it's going to be - // later in the fee queue, so we haven't skipped it yet. - // - // o If the next tx has an equal fee level, it was... - // - // * EITHER submitted later, so it's also going to be later in the - // fee queue, - // - // * OR the current was resubmitted to bump up the fee level, and - // we have skipped that next tx. - // - // In the latter case, continue through the fee queue anyway - // to head off potential ordering manipulation problems. + // Check if the next transaction for this account is earlier in the queue, + // which means we skipped it earlier, and need to try it again. + OrderCandidates o; auto const feeNextIter = std::next(candidateIter); bool const useAccountNext = accountNextIter != txQAccount.transactions.end() && accountNextIter->first > candidateIter->seqProxy && (feeNextIter == byFee_.end() || - accountNextIter->second.feeLevel > feeNextIter->feeLevel); + o(accountNextIter->second, *feeNextIter)); auto const candidateNextIter = byFee_.erase(candidateIter); txQAccount.transactions.erase(accountIter); @@ -1190,8 +1174,7 @@ TxQ::apply( for some other reason. Tx is allowed to queue in case conditions change, but don't waste the effort to clear). */ - if (!(flags & tapPREFER_QUEUE) && txSeqProx.isSeq() && txIter && - multiTxn.has_value() && + if (txSeqProx.isSeq() && txIter && multiTxn.has_value() && txIter->first->second.retriesRemaining == MaybeTx::retriesAllowed && feeLevelPaid > requiredFeeLevel && requiredFeeLevel > baseLevel) { @@ -1240,9 +1223,19 @@ TxQ::apply( if (!replacedTxIter && isFull()) { auto lastRIter = byFee_.rbegin(); - if (lastRIter->account == account) + while (lastRIter != byFee_.rend() && lastRIter->account == account) { - JLOG(j_.warn()) + ++lastRIter; + } + if (lastRIter == byFee_.rend()) + { + // The only way this condition can happen is if the entire + // queue is filled with transactions from this account. This + // is impossible with default settings - minimum queue size + // is 2000, and an account can only have 10 transactions + // queued. However, it can occur if settings are changed, + // and there is unit test coverage. + JLOG(j_.info()) << "Queue is full, and transaction " << transactionID << " would kick a transaction from the same account (" << account << ") out of the queue."; @@ -1283,7 +1276,7 @@ TxQ::apply( // valuable, so kick out the cheapest transaction. auto dropRIter = endAccount.transactions.rbegin(); assert(dropRIter->second.account == lastRIter->account); - JLOG(j_.warn()) + JLOG(j_.info()) << "Removing last item of account " << lastRIter->account << " from queue with average fee of " << endEffectiveFeeLevel << " in favor of " << transactionID << " with fee of " @@ -1292,7 +1285,7 @@ TxQ::apply( } else { - JLOG(j_.warn()) + JLOG(j_.info()) << "Queue is full, and transaction " << transactionID << " fee is lower than end item's account average fee"; return {telCAN_NOT_QUEUE_FULL, false}; @@ -1322,9 +1315,6 @@ TxQ::apply( // Don't allow soft failures, which can lead to retries flags &= ~tapRETRY; - // Don't queue because we're already in the queue - flags &= ~tapPREFER_QUEUE; - auto& candidate = accountIter->second.add( {tx, transactionID, feeLevelPaid, flags, pfresult}); @@ -1641,7 +1631,7 @@ TxQ::accept(Application& app, OpenView& view) { // Since the failed transaction has a ticket, order // doesn't matter. Drop this one. - JLOG(j_.warn()) + JLOG(j_.info()) << "Queue is nearly full, and transaction " << candidateIter->txID << " failed with " << transToken(txnResult) @@ -1660,7 +1650,7 @@ TxQ::accept(Application& app, OpenView& view) dropRIter->second.account == candidateIter->account); - JLOG(j_.warn()) + JLOG(j_.info()) << "Queue is nearly full, and transaction " << candidateIter->txID << " failed with " << transToken(txnResult) @@ -1755,13 +1745,7 @@ TxQ::getRequiredFeeLevel( FeeMetrics::Snapshot const& metricsSnapshot, std::lock_guard const& lock) const { - FeeLevel64 const feeLevel = - FeeMetrics::scaleFeeLevel(metricsSnapshot, view); - - if ((flags & tapPREFER_QUEUE) && !byFee_.empty()) - return std::max(feeLevel, byFee_.begin()->feeLevel); - - return feeLevel; + return FeeMetrics::scaleFeeLevel(metricsSnapshot, view); } std::optional> diff --git a/src/ripple/app/reporting/ETLHelpers.h b/src/ripple/app/reporting/ETLHelpers.h index 12c1e785d..04f282ca5 100644 --- a/src/ripple/app/reporting/ETLHelpers.h +++ b/src/ripple/app/reporting/ETLHelpers.h @@ -43,7 +43,7 @@ class NetworkValidatedLedgers mutable std::mutex m_; - std::condition_variable cv_; + mutable std::condition_variable cv_; bool stopping_ = false; @@ -64,13 +64,23 @@ public: /// @return sequence of most recently validated ledger. empty optional if /// the datastructure has been stopped std::optional - getMostRecent() + getMostRecent() const { std::unique_lock lck(m_); cv_.wait(lck, [this]() { return max_ || stopping_; }); return max_; } + /// Get most recently validated sequence. + /// @return sequence of most recently validated ledger, or empty optional + /// if no ledgers are known to have been validated. + std::optional + tryGetMostRecent() const + { + std::unique_lock lk(m_); + return max_; + } + /// Waits for the sequence to be validated by the network /// @param sequence to wait for /// @return true if sequence was validated, false otherwise diff --git a/src/ripple/app/reporting/ETLSource.cpp b/src/ripple/app/reporting/ETLSource.cpp index 5c6d1d07b..be31f4fdf 100644 --- a/src/ripple/app/reporting/ETLSource.cpp +++ b/src/ripple/app/reporting/ETLSource.cpp @@ -760,13 +760,24 @@ ETLLoadBalancer::forwardToP2p(RPC::JsonContext& context) const srand((unsigned)time(0)); auto sourceIdx = rand() % sources_.size(); auto numAttempts = 0; + + auto mostRecent = etl_.getNetworkValidatedLedgers().tryGetMostRecent(); while (numAttempts < sources_.size()) { - res = sources_[sourceIdx]->forwardToP2p(context); - if (!res.isMember("forwarded") || res["forwarded"] != true) - { + auto increment = [&]() { sourceIdx = (sourceIdx + 1) % sources_.size(); ++numAttempts; + }; + auto& src = sources_[sourceIdx]; + if (mostRecent && !src->hasLedger(*mostRecent)) + { + increment(); + continue; + } + res = src->forwardToP2p(context); + if (!res.isMember("forwarded") || res["forwarded"] != true) + { + increment(); continue; } return res; @@ -941,7 +952,7 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) << "Error executing function. " << " Tried all sources, but ledger was found in db." << " Sequence = " << ledgerSequence; - break; + return false; } JLOG(journal_.error()) << __func__ << " : " diff --git a/src/ripple/app/reporting/ReportingETL.cpp b/src/ripple/app/reporting/ReportingETL.cpp index 4db578b54..3008ae782 100644 --- a/src/ripple/app/reporting/ReportingETL.cpp +++ b/src/ripple/app/reporting/ReportingETL.cpp @@ -524,14 +524,6 @@ ReportingETL::runETLPipeline(uint32_t startSequence) auto start = std::chrono::system_clock::now(); std::optional fetchResponse{ fetchLedgerDataAndDiff(currentSequence)}; - auto end = std::chrono::system_clock::now(); - - auto time = ((end - start).count()) / 1000000000.0; - auto tps = - fetchResponse->transactions_list().transactions_size() / time; - - JLOG(journal_.debug()) << "Extract phase time = " << time - << " . Extract phase tps = " << tps; // if the fetch is unsuccessful, stop. fetchLedger only returns // false if the server is shutting down, or if the ledger was // found in the database (which means another process already @@ -543,6 +535,14 @@ ReportingETL::runETLPipeline(uint32_t startSequence) { break; } + auto end = std::chrono::system_clock::now(); + + auto time = ((end - start).count()) / 1000000000.0; + auto tps = + fetchResponse->transactions_list().transactions_size() / time; + + JLOG(journal_.debug()) << "Extract phase time = " << time + << " . Extract phase tps = " << tps; transformQueue.push(std::move(fetchResponse)); ++currentSequence; diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index cb248ac8c..6219c0277 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -307,15 +307,19 @@ Transactor::checkFee(PreclaimContext const& ctx, FeeUnit64 baseFee) if (!isLegalAmount(feePaid) || feePaid < beast::zero) return temBAD_FEE; - auto const feeDue = - minimumFee(ctx.app, baseFee, ctx.view.fees(), ctx.flags); - // Only check fee is sufficient when the ledger is open. - if (ctx.view.open() && feePaid < feeDue) + if (ctx.view.open()) { - JLOG(ctx.j.trace()) << "Insufficient fee paid: " << to_string(feePaid) - << "/" << to_string(feeDue); - return telINSUF_FEE_P; + auto const feeDue = + minimumFee(ctx.app, baseFee, ctx.view.fees(), ctx.flags); + + if (feePaid < feeDue) + { + JLOG(ctx.j.trace()) + << "Insufficient fee paid: " << to_string(feePaid) << "/" + << to_string(feeDue); + return telINSUF_FEE_P; + } } if (feePaid == beast::zero) diff --git a/src/ripple/basics/KeyCache.h b/src/ripple/basics/KeyCache.h index 9f6266310..d8fa4910a 100644 --- a/src/ripple/basics/KeyCache.h +++ b/src/ripple/basics/KeyCache.h @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. + Copyright (c) 2021 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -17,312 +17,16 @@ */ //============================================================================== -#ifndef RIPPLE_BASICS_KEYCACHE_H_INCLUDED -#define RIPPLE_BASICS_KEYCACHE_H_INCLUDED +#ifndef RIPPLE_BASICS_KEYCACHE_H +#define RIPPLE_BASICS_KEYCACHE_H -#include -#include -#include -#include -#include +#include +#include namespace ripple { -/** Maintains a cache of keys with no associated data. - - The cache has a target size and an expiration time. When cached items become - older than the maximum age they are eligible for removal during a - call to @ref sweep. -*/ -template < - class Key, - class Hash = hardened_hash<>, - class KeyEqual = std::equal_to, - // class Allocator = std::allocator >, - class Mutex = std::mutex> -class KeyCache -{ -public: - using key_type = Key; - using clock_type = beast::abstract_clock; - -private: - struct Stats - { - template - Stats( - std::string const& prefix, - Handler const& handler, - beast::insight::Collector::ptr const& collector) - : hook(collector->make_hook(handler)) - , size(collector->make_gauge(prefix, "size")) - , hit_rate(collector->make_gauge(prefix, "hit_rate")) - , hits(0) - , misses(0) - { - } - - beast::insight::Hook hook; - beast::insight::Gauge size; - beast::insight::Gauge hit_rate; - - std::size_t hits; - std::size_t misses; - }; - - struct Entry - { - explicit Entry(clock_type::time_point const& last_access_) - : last_access(last_access_) - { - } - - clock_type::time_point last_access; - }; - - using map_type = hardened_hash_map; - using iterator = typename map_type::iterator; - -public: - using size_type = typename map_type::size_type; - -private: - Mutex mutable m_mutex; - map_type m_map; - Stats mutable m_stats; - clock_type& m_clock; - std::string const m_name; - size_type m_target_size; - clock_type::duration m_target_age; - -public: - /** Construct with the specified name. - - @param size The initial target size. - @param age The initial expiration time. - */ - KeyCache( - std::string const& name, - clock_type& clock, - beast::insight::Collector::ptr const& collector, - size_type target_size = 0, - std::chrono::seconds expiration = std::chrono::minutes{2}) - : m_stats(name, std::bind(&KeyCache::collect_metrics, this), collector) - , m_clock(clock) - , m_name(name) - , m_target_size(target_size) - , m_target_age(expiration) - { - } - - // VFALCO TODO Use a forwarding constructor call here - KeyCache( - std::string const& name, - clock_type& clock, - size_type target_size = 0, - std::chrono::seconds expiration = std::chrono::minutes{2}) - : m_stats( - name, - std::bind(&KeyCache::collect_metrics, this), - beast::insight::NullCollector::New()) - , m_clock(clock) - , m_name(name) - , m_target_size(target_size) - , m_target_age(expiration) - { - } - - //-------------------------------------------------------------------------- - - /** Retrieve the name of this object. */ - std::string const& - name() const - { - return m_name; - } - - /** Return the clock associated with the cache. */ - clock_type& - clock() - { - return m_clock; - } - - /** Returns the number of items in the container. */ - size_type - size() const - { - std::lock_guard lock(m_mutex); - return m_map.size(); - } - - /** Empty the cache */ - void - clear() - { - std::lock_guard lock(m_mutex); - m_map.clear(); - } - - void - reset() - { - std::lock_guard lock(m_mutex); - m_map.clear(); - m_stats.hits = 0; - m_stats.misses = 0; - } - - void - setTargetSize(size_type s) - { - std::lock_guard lock(m_mutex); - m_target_size = s; - } - - void - setTargetAge(std::chrono::seconds s) - { - std::lock_guard lock(m_mutex); - m_target_age = s; - } - - /** Returns `true` if the key was found. - Does not update the last access time. - */ - template - bool - exists(KeyComparable const& key) const - { - std::lock_guard lock(m_mutex); - typename map_type::const_iterator const iter(m_map.find(key)); - if (iter != m_map.end()) - { - ++m_stats.hits; - return true; - } - ++m_stats.misses; - return false; - } - - /** Insert the specified key. - The last access time is refreshed in all cases. - @return `true` If the key was newly inserted. - */ - bool - insert(Key const& key) - { - std::lock_guard lock(m_mutex); - clock_type::time_point const now(m_clock.now()); - auto [it, inserted] = m_map.emplace( - std::piecewise_construct, - std::forward_as_tuple(key), - std::forward_as_tuple(now)); - if (!inserted) - { - it->second.last_access = now; - return false; - } - return true; - } - - /** Refresh the last access time on a key if present. - @return `true` If the key was found. - */ - template - bool - touch_if_exists(KeyComparable const& key) - { - std::lock_guard lock(m_mutex); - iterator const iter(m_map.find(key)); - if (iter == m_map.end()) - { - ++m_stats.misses; - return false; - } - iter->second.last_access = m_clock.now(); - ++m_stats.hits; - return true; - } - - /** Remove the specified cache entry. - @param key The key to remove. - @return `false` If the key was not found. - */ - bool - erase(key_type const& key) - { - std::lock_guard lock(m_mutex); - if (m_map.erase(key) > 0) - { - ++m_stats.hits; - return true; - } - ++m_stats.misses; - return false; - } - - /** Remove stale entries from the cache. */ - void - sweep() - { - clock_type::time_point const now(m_clock.now()); - clock_type::time_point when_expire; - - std::lock_guard lock(m_mutex); - - if (m_target_size == 0 || (m_map.size() <= m_target_size)) - { - when_expire = now - m_target_age; - } - else - { - when_expire = now - m_target_age * m_target_size / m_map.size(); - - clock_type::duration const minimumAge(std::chrono::seconds(1)); - if (when_expire > (now - minimumAge)) - when_expire = now - minimumAge; - } - - iterator it = m_map.begin(); - - while (it != m_map.end()) - { - if (it->second.last_access > now) - { - it->second.last_access = now; - ++it; - } - else if (it->second.last_access <= when_expire) - { - it = m_map.erase(it); - } - else - { - ++it; - } - } - } - -private: - void - collect_metrics() - { - m_stats.size.set(size()); - - { - beast::insight::Gauge::value_type hit_rate(0); - { - std::lock_guard lock(m_mutex); - auto const total(m_stats.hits + m_stats.misses); - if (total != 0) - hit_rate = (m_stats.hits * 100) / total; - } - m_stats.hit_rate.set(hit_rate); - } - } -}; +using KeyCache = TaggedCache; } // namespace ripple -#endif +#endif // RIPPLE_BASICS_KEYCACHE_H diff --git a/src/ripple/basics/SHAMapHash.h b/src/ripple/basics/SHAMapHash.h new file mode 100644 index 000000000..796510ba1 --- /dev/null +++ b/src/ripple/basics/SHAMapHash.h @@ -0,0 +1,113 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED +#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED + +#include + +#include + +namespace ripple { + +// A SHAMapHash is the hash of a node in a SHAMap, and also the +// type of the hash of the entire SHAMap. + +class SHAMapHash +{ + uint256 hash_; + +public: + SHAMapHash() = default; + explicit SHAMapHash(uint256 const& hash) : hash_(hash) + { + } + + uint256 const& + as_uint256() const + { + return hash_; + } + uint256& + as_uint256() + { + return hash_; + } + bool + isZero() const + { + return hash_.isZero(); + } + bool + isNonZero() const + { + return hash_.isNonZero(); + } + int + signum() const + { + return hash_.signum(); + } + void + zero() + { + hash_.zero(); + } + + friend bool + operator==(SHAMapHash const& x, SHAMapHash const& y) + { + return x.hash_ == y.hash_; + } + + friend bool + operator<(SHAMapHash const& x, SHAMapHash const& y) + { + return x.hash_ < y.hash_; + } + + friend std::ostream& + operator<<(std::ostream& os, SHAMapHash const& x) + { + return os << x.hash_; + } + + friend std::string + to_string(SHAMapHash const& x) + { + return to_string(x.hash_); + } + + template + friend void + hash_append(H& h, SHAMapHash const& x) + { + hash_append(h, x.hash_); + } +}; + +inline bool +operator!=(SHAMapHash const& x, SHAMapHash const& y) +{ + return !(x == y); +} + +} // namespace ripple + +#endif // RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED diff --git a/src/ripple/basics/TaggedCache.h b/src/ripple/basics/TaggedCache.h index 157c7c8c6..45f069bd9 100644 --- a/src/ripple/basics/TaggedCache.h +++ b/src/ripple/basics/TaggedCache.h @@ -25,8 +25,11 @@ #include #include #include +#include #include #include +#include +#include #include namespace ripple { @@ -46,6 +49,7 @@ namespace ripple { template < class Key, class T, + bool IsKeyCache = false, class Hash = hardened_hash<>, class KeyEqual = std::equal_to, class Mutex = std::recursive_mutex> @@ -89,11 +93,12 @@ public: return m_clock; } - int - getTargetSize() const + /** Returns the number of items in the container. */ + std::size_t + size() const { std::lock_guard lock(m_mutex); - return m_target_size; + return m_cache.size(); } void @@ -103,8 +108,15 @@ public: m_target_size = s; if (s > 0) - m_cache.rehash(static_cast( - (s + (s >> 2)) / m_cache.max_load_factor() + 1)); + { + for (auto& partition : m_cache.map()) + { + partition.rehash(static_cast( + (s + (s >> 2)) / + (partition.max_load_factor() * m_cache.partitions()) + + 1)); + } + } JLOG(m_journal.debug()) << m_name << " target size set to " << s; } @@ -165,22 +177,39 @@ public: m_misses = 0; } + /** Refresh the last access time on a key if present. + @return `true` If the key was found. + */ + template + bool + touch_if_exists(KeyComparable const& key) + { + std::lock_guard lock(m_mutex); + auto const iter(m_cache.find(key)); + if (iter == m_cache.end()) + { + ++m_stats.misses; + return false; + } + iter->second.touch(m_clock.now()); + ++m_stats.hits; + return true; + } + void sweep() { - int cacheRemovals = 0; - int mapRemovals = 0; - int cc = 0; - // Keep references to all the stuff we sweep - // so that we can destroy them outside the lock. - // - std::vector> stuffToSweep; + // For performance, each worker thread should exit before the swept data + // is destroyed but still within the main cache lock. + std::vector>> allStuffToSweep( + m_cache.partitions()); + clock_type::time_point const now(m_clock.now()); + clock_type::time_point when_expire; + + auto const start = std::chrono::steady_clock::now(); { - clock_type::time_point const now(m_clock.now()); - clock_type::time_point when_expire; - std::lock_guard lock(m_mutex); if (m_target_size == 0 || @@ -204,61 +233,33 @@ public: << m_target_age.count(); } - stuffToSweep.reserve(m_cache.size()); + std::vector workers; + workers.reserve(m_cache.partitions()); + std::atomic allRemovals = 0; - auto cit = m_cache.begin(); - - while (cit != m_cache.end()) + for (std::size_t p = 0; p < m_cache.partitions(); ++p) { - if (cit->second.isWeak()) - { - // weak - if (cit->second.isExpired()) - { - ++mapRemovals; - cit = m_cache.erase(cit); - } - else - { - ++cit; - } - } - else if (cit->second.last_access <= when_expire) - { - // strong, expired - --m_cache_count; - ++cacheRemovals; - if (cit->second.ptr.unique()) - { - stuffToSweep.push_back(cit->second.ptr); - ++mapRemovals; - cit = m_cache.erase(cit); - } - else - { - // remains weakly cached - cit->second.ptr.reset(); - ++cit; - } - } - else - { - // strong, not expired - ++cc; - ++cit; - } + workers.push_back(sweepHelper( + when_expire, + now, + m_cache.map()[p], + allStuffToSweep[p], + allRemovals, + lock)); } - } + for (std::thread& worker : workers) + worker.join(); - if (mapRemovals || cacheRemovals) - { - JLOG(m_journal.trace()) - << m_name << ": cache = " << m_cache.size() << "-" - << cacheRemovals << ", map-=" << mapRemovals; + m_cache_count -= allRemovals; } - - // At this point stuffToSweep will go out of scope outside the lock + // At this point allStuffToSweep will go out of scope outside the lock // and decrement the reference count on each strong pointer. + JLOG(m_journal.debug()) + << m_name << " TaggedCache sweep lock duration " + << std::chrono::duration_cast( + std::chrono::steady_clock::now() - start) + .count() + << "ms"; } bool @@ -391,51 +392,41 @@ public: std::shared_ptr fetch(const key_type& key) { - // fetch us a shared pointer to the stored data object - std::lock_guard lock(m_mutex); - - auto cit = m_cache.find(key); - - if (cit == m_cache.end()) - { + std::lock_guard l(m_mutex); + auto ret = initialFetch(key, l); + if (!ret) ++m_misses; - return {}; - } - - Entry& entry = cit->second; - entry.touch(m_clock.now()); - - if (entry.isCached()) - { - ++m_hits; - return entry.ptr; - } - - entry.ptr = entry.lock(); - - if (entry.isCached()) - { - // independent of cache size, so not counted as a hit - ++m_cache_count; - return entry.ptr; - } - - m_cache.erase(cit); - ++m_misses; - return {}; + return ret; } /** Insert the element into the container. If the key already exists, nothing happens. @return `true` If the element was inserted */ - bool + template + auto insert(key_type const& key, T const& value) + -> std::enable_if_t { auto p = std::make_shared(std::cref(value)); return canonicalize_replace_client(key, p); } + template + auto + insert(key_type const& key) -> std::enable_if_t + { + std::lock_guard lock(m_mutex); + clock_type::time_point const now(m_clock.now()); + auto [it, inserted] = m_cache.emplace( + std::piecewise_construct, + std::forward_as_tuple(key), + std::forward_as_tuple(now)); + if (!inserted) + it->second.last_access = now; + return inserted; + } + // VFALCO NOTE It looks like this returns a copy of the data in // the output parameter 'data'. This could be expensive. // Perhaps it should work like standard containers, which @@ -454,53 +445,6 @@ public: return true; } - /** Refresh the expiration time on a key. - - @param key The key to refresh. - @return `true` if the key was found and the object is cached. - */ - bool - refreshIfPresent(const key_type& key) - { - bool found = false; - - // If present, make current in cache - std::lock_guard lock(m_mutex); - - if (auto cit = m_cache.find(key); cit != m_cache.end()) - { - Entry& entry = cit->second; - - if (!entry.isCached()) - { - // Convert weak to strong. - entry.ptr = entry.lock(); - - if (entry.isCached()) - { - // We just put the object back in cache - ++m_cache_count; - entry.touch(m_clock.now()); - found = true; - } - else - { - // Couldn't get strong pointer, - // object fell out of the cache so remove the entry. - m_cache.erase(cit); - } - } - else - { - // It's cached so update the timer - entry.touch(m_clock.now()); - found = true; - } - } - - return found; - } - mutex_type& peekMutex() { @@ -522,7 +466,75 @@ public: return v; } + // CachedSLEs functions. + /** Returns the fraction of cache hits. */ + double + rate() const + { + std::lock_guard lock(m_mutex); + auto const tot = m_hits + m_misses; + if (tot == 0) + return 0; + return double(m_hits) / tot; + } + + /** Fetch an item from the cache. + If the digest was not found, Handler + will be called with this signature: + std::shared_ptr(void) + */ + template + std::shared_ptr + fetch(key_type const& digest, Handler const& h) + { + { + std::lock_guard l(m_mutex); + if (auto ret = initialFetch(digest, l)) + return ret; + } + + auto sle = h(); + if (!sle) + return {}; + + std::lock_guard l(m_mutex); + ++m_misses; + auto const [it, inserted] = + m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); + if (!inserted) + it->second.touch(m_clock.now()); + return it->second.ptr; + } + // End CachedSLEs functions. + private: + std::shared_ptr + initialFetch(key_type const& key, std::lock_guard const& l) + { + auto cit = m_cache.find(key); + if (cit == m_cache.end()) + return {}; + + Entry& entry = cit->second; + if (entry.isCached()) + { + ++m_hits; + entry.touch(m_clock.now()); + return entry.ptr; + } + entry.ptr = entry.lock(); + if (entry.isCached()) + { + // independent of cache size, so not counted as a hit + ++m_cache_count; + entry.touch(m_clock.now()); + return entry.ptr; + } + + m_cache.erase(cit); + return {}; + } + void collect_metrics() { @@ -551,22 +563,44 @@ private: : hook(collector->make_hook(handler)) , size(collector->make_gauge(prefix, "size")) , hit_rate(collector->make_gauge(prefix, "hit_rate")) + , hits(0) + , misses(0) { } beast::insight::Hook hook; beast::insight::Gauge size; beast::insight::Gauge hit_rate; + + std::size_t hits; + std::size_t misses; }; - class Entry + class KeyOnlyEntry + { + public: + clock_type::time_point last_access; + + explicit KeyOnlyEntry(clock_type::time_point const& last_access_) + : last_access(last_access_) + { + } + + void + touch(clock_type::time_point const& now) + { + last_access = now; + } + }; + + class ValueEntry { public: std::shared_ptr ptr; std::weak_ptr weak_ptr; clock_type::time_point last_access; - Entry( + ValueEntry( clock_type::time_point const& last_access_, std::shared_ptr const& ptr_) : ptr(ptr_), weak_ptr(ptr_), last_access(last_access_) @@ -600,7 +634,136 @@ private: } }; - using cache_type = hardened_hash_map; + typedef + typename std::conditional::type + Entry; + + using KeyOnlyCacheType = + hardened_partitioned_hash_map; + + using KeyValueCacheType = + hardened_partitioned_hash_map; + + using cache_type = + hardened_partitioned_hash_map; + + [[nodiscard]] std::thread + sweepHelper( + clock_type::time_point const& when_expire, + [[maybe_unused]] clock_type::time_point const& now, + typename KeyValueCacheType::map_type& partition, + std::vector>& stuffToSweep, + std::atomic& allRemovals, + std::lock_guard const& lock) + { + return std::thread([&, this]() { + int cacheRemovals = 0; + int mapRemovals = 0; + + // Keep references to all the stuff we sweep + // so that we can destroy them outside the lock. + stuffToSweep.reserve(partition.size()); + { + auto cit = partition.begin(); + while (cit != partition.end()) + { + if (cit->second.isWeak()) + { + // weak + if (cit->second.isExpired()) + { + ++mapRemovals; + cit = partition.erase(cit); + } + else + { + ++cit; + } + } + else if (cit->second.last_access <= when_expire) + { + // strong, expired + ++cacheRemovals; + if (cit->second.ptr.unique()) + { + stuffToSweep.push_back(cit->second.ptr); + ++mapRemovals; + cit = partition.erase(cit); + } + else + { + // remains weakly cached + cit->second.ptr.reset(); + ++cit; + } + } + else + { + // strong, not expired + ++cit; + } + } + } + + if (mapRemovals || cacheRemovals) + { + JLOG(m_journal.debug()) + << "TaggedCache partition sweep " << m_name + << ": cache = " << partition.size() << "-" << cacheRemovals + << ", map-=" << mapRemovals; + } + + allRemovals += cacheRemovals; + }); + } + + [[nodiscard]] std::thread + sweepHelper( + clock_type::time_point const& when_expire, + clock_type::time_point const& now, + typename KeyOnlyCacheType::map_type& partition, + std::vector>& stuffToSweep, + std::atomic& allRemovals, + std::lock_guard const& lock) + { + return std::thread([&, this]() { + int cacheRemovals = 0; + int mapRemovals = 0; + + // Keep references to all the stuff we sweep + // so that we can destroy them outside the lock. + stuffToSweep.reserve(partition.size()); + { + auto cit = partition.begin(); + while (cit != partition.end()) + { + if (cit->second.last_access > now) + { + cit->second.last_access = now; + ++cit; + } + else if (cit->second.last_access <= when_expire) + { + cit = partition.erase(cit); + } + else + { + ++cit; + } + } + } + + if (mapRemovals || cacheRemovals) + { + JLOG(m_journal.debug()) + << "TaggedCache partition sweep " << m_name + << ": cache = " << partition.size() << "-" << cacheRemovals + << ", map-=" << mapRemovals; + } + + allRemovals += cacheRemovals; + }); + }; beast::Journal m_journal; clock_type& m_clock; diff --git a/src/ripple/basics/UnorderedContainers.h b/src/ripple/basics/UnorderedContainers.h index 2758551b9..e929ebec8 100644 --- a/src/ripple/basics/UnorderedContainers.h +++ b/src/ripple/basics/UnorderedContainers.h @@ -21,6 +21,7 @@ #define RIPPLE_BASICS_UNORDEREDCONTAINERS_H_INCLUDED #include +#include #include #include #include @@ -86,6 +87,15 @@ template < class Allocator = std::allocator>> using hardened_hash_map = std::unordered_map; +template < + class Key, + class Value, + class Hash = hardened_hash, + class Pred = std::equal_to, + class Allocator = std::allocator>> +using hardened_partitioned_hash_map = + partitioned_unordered_map; + template < class Key, class Value, diff --git a/src/ripple/basics/impl/partitioned_unordered_map.cpp b/src/ripple/basics/impl/partitioned_unordered_map.cpp new file mode 100644 index 000000000..6fb2cbec1 --- /dev/null +++ b/src/ripple/basics/impl/partitioned_unordered_map.cpp @@ -0,0 +1,78 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +static std::size_t +extract(uint256 const& key) +{ + return *reinterpret_cast(key.data()); +} + +static std::size_t +extract(SHAMapHash const& key) +{ + return *reinterpret_cast(key.as_uint256().data()); +} + +static std::size_t +extract(LedgerIndex key) +{ + return static_cast(key); +} + +static std::size_t +extract(std::string const& key) +{ + return ::beast::uhash<>{}(key); +} + +template +std::size_t +partitioner(Key const& key, std::size_t const numPartitions) +{ + return extract(key) % numPartitions; +} + +template std::size_t +partitioner( + LedgerIndex const& key, + std::size_t const numPartitions); + +template std::size_t +partitioner(uint256 const& key, std::size_t const numPartitions); + +template std::size_t +partitioner(SHAMapHash const& key, std::size_t const numPartitions); + +template std::size_t +partitioner( + std::string const& key, + std::size_t const numPartitions); + +} // namespace ripple diff --git a/src/ripple/basics/partitioned_unordered_map.h b/src/ripple/basics/partitioned_unordered_map.h new file mode 100644 index 000000000..08f4cba9d --- /dev/null +++ b/src/ripple/basics/partitioned_unordered_map.h @@ -0,0 +1,409 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H +#define RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +template +std::size_t +partitioner(Key const& key, std::size_t const numPartitions); + +template < + typename Key, + typename Value, + typename Hash, + typename Pred = std::equal_to, + typename Alloc = std::allocator>> +class partitioned_unordered_map +{ + std::size_t partitions_; + +public: + using key_type = Key; + using mapped_type = Value; + using value_type = std::pair; + using size_type = std::size_t; + using difference_type = std::size_t; + using hasher = Hash; + using key_equal = Pred; + using allocator_type = Alloc; + using reference = value_type&; + using const_reference = value_type const&; + using pointer = value_type*; + using const_pointer = value_type const*; + using map_type = std:: + unordered_map; + using partition_map_type = std::vector; + + struct iterator + { + using iterator_category = std::forward_iterator_tag; + partition_map_type* map_{nullptr}; + typename partition_map_type::iterator ait_; + typename map_type::iterator mit_; + + iterator() = default; + + iterator(partition_map_type* map) : map_(map) + { + } + + reference + operator*() const + { + return *mit_; + } + + pointer + operator->() const + { + return &(*mit_); + } + + void + inc() + { + ++mit_; + while (mit_ == ait_->end()) + { + ++ait_; + if (ait_ == map_->end()) + return; + mit_ = ait_->begin(); + } + } + + // ++it + iterator& + operator++() + { + inc(); + return *this; + } + + // it++ + iterator + operator++(int) + { + iterator tmp(*this); + inc(); + return tmp; + } + + friend bool + operator==(iterator const& lhs, iterator const& rhs) + { + return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ && + lhs.mit_ == rhs.mit_; + } + + friend bool + operator!=(iterator const& lhs, iterator const& rhs) + { + return !(lhs == rhs); + } + }; + + struct const_iterator + { + using iterator_category = std::forward_iterator_tag; + + partition_map_type* map_{nullptr}; + typename partition_map_type::iterator ait_; + typename map_type::iterator mit_; + + const_iterator() = default; + + const_iterator(partition_map_type* map) : map_(map) + { + } + + const_iterator(iterator const& orig) + { + map_ = orig.map_; + ait_ = orig.ait_; + mit_ = orig.mit_; + } + + const_reference + operator*() const + { + return *mit_; + } + + const_pointer + operator->() const + { + return &(*mit_); + } + + void + inc() + { + ++mit_; + while (mit_ == ait_->end()) + { + ++ait_; + if (ait_ == map_->end()) + return; + mit_ = ait_->begin(); + } + } + + // ++it + const_iterator& + operator++() + { + inc(); + return *this; + } + + // it++ + const_iterator + operator++(int) + { + const_iterator tmp(*this); + inc(); + return tmp; + } + + friend bool + operator==(const_iterator const& lhs, const_iterator const& rhs) + { + return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ && + lhs.mit_ == rhs.mit_; + } + + friend bool + operator!=(const_iterator const& lhs, const_iterator const& rhs) + { + return !(lhs == rhs); + } + }; + +private: + std::size_t + partitioner(Key const& key) const + { + return ripple::partitioner(key, partitions_); + } + + template + static void + end(T& it) + { + it.ait_ = it.map_->end(); + it.mit_ = it.map_->back().end(); + } + + template + static void + begin(T& it) + { + for (it.ait_ = it.map_->begin(); it.ait_ != it.map_->end(); ++it.ait_) + { + if (it.ait_->begin() == it.ait_->end()) + continue; + it.mit_ = it.ait_->begin(); + return; + } + end(it); + } + +public: + partitioned_unordered_map( + std::optional partitions = std::nullopt) + { + // Set partitions to the number of hardware threads if the parameter + // is either empty or set to 0. + partitions_ = partitions && *partitions + ? *partitions + : std::thread::hardware_concurrency(); + map_.resize(partitions_); + assert(partitions_); + } + + std::size_t + partitions() const + { + return partitions_; + } + + partition_map_type& + map() + { + return map_; + } + + iterator + begin() + { + iterator it(&map_); + begin(it); + return it; + } + + const_iterator + cbegin() const + { + const_iterator it(&map_); + begin(it); + return it; + } + + const_iterator + begin() const + { + return cbegin(); + } + + iterator + end() + { + iterator it(&map_); + end(it); + return it; + } + + const_iterator + cend() const + { + const_iterator it(&map_); + end(it); + return it; + } + + const_iterator + end() const + { + return cend(); + } + +private: + template + void + find(key_type const& key, T& it) const + { + it.ait_ = it.map_->begin() + partitioner(key); + it.mit_ = it.ait_->find(key); + if (it.mit_ == it.ait_->end()) + end(it); + } + +public: + iterator + find(key_type const& key) + { + iterator it(&map_); + find(key, it); + return it; + } + + const_iterator + find(key_type const& key) const + { + const_iterator it(&map_); + find(key, it); + return it; + } + + template + std::pair + emplace(std::piecewise_construct_t const&, T&& keyTuple, U&& valueTuple) + { + auto const& key = std::get<0>(keyTuple); + iterator it(&map_); + it.ait_ = it.map_->begin() + partitioner(key); + auto [eit, inserted] = it.ait_->emplace( + std::piecewise_construct, + std::forward(keyTuple), + std::forward(valueTuple)); + it.mit_ = eit; + return {it, inserted}; + } + + template + std::pair + emplace(T&& key, U&& val) + { + iterator it(&map_); + it.ait_ = it.map_->begin() + partitioner(key); + auto [eit, inserted] = + it.ait_->emplace(std::forward(key), std::forward(val)); + it.mit_ = eit; + return {it, inserted}; + } + + void + clear() + { + for (auto& p : map_) + p.clear(); + } + + iterator + erase(const_iterator position) + { + iterator it(&map_); + it.ait_ = position.ait_; + it.mit_ = position.ait_->erase(position.mit_); + + while (it.mit_ == it.ait_->end()) + { + ++it.ait_; + if (it.ait_ == it.map_->end()) + break; + it.mit_ = it.ait_->begin(); + } + + return it; + } + + std::size_t + size() const + { + std::size_t ret = 0; + for (auto& p : map_) + ret += p.size(); + return ret; + } + + Value& + operator[](Key const& key) + { + return map_[partitioner(key)][key]; + } + +private: + mutable partition_map_type map_{}; +}; + +} // namespace ripple + +#endif // RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H diff --git a/src/ripple/beast/clock/basic_seconds_clock.cpp b/src/ripple/beast/clock/basic_seconds_clock.cpp index c1c97cb7d..7c55a3f8b 100644 --- a/src/ripple/beast/clock/basic_seconds_clock.cpp +++ b/src/ripple/beast/clock/basic_seconds_clock.cpp @@ -19,6 +19,7 @@ #include +#include #include #include #include @@ -38,7 +39,7 @@ class seconds_clock_thread std::mutex mut_; std::condition_variable cv_; std::thread thread_; - Clock::time_point tp_; + std::atomic tp_; public: ~seconds_clock_thread(); @@ -52,6 +53,8 @@ private: run(); }; +static_assert(std::atomic::is_always_lock_free); + seconds_clock_thread::~seconds_clock_thread() { assert(thread_.joinable()); @@ -63,7 +66,8 @@ seconds_clock_thread::~seconds_clock_thread() thread_.join(); } -seconds_clock_thread::seconds_clock_thread() : stop_{false}, tp_{Clock::now()} +seconds_clock_thread::seconds_clock_thread() + : stop_{false}, tp_{Clock::now().time_since_epoch().count()} { thread_ = std::thread(&seconds_clock_thread::run, this); } @@ -71,8 +75,7 @@ seconds_clock_thread::seconds_clock_thread() : stop_{false}, tp_{Clock::now()} seconds_clock_thread::Clock::time_point seconds_clock_thread::now() { - std::lock_guard lock(mut_); - return tp_; + return Clock::time_point{Clock::duration{tp_.load()}}; } void @@ -83,8 +86,9 @@ seconds_clock_thread::run() { using namespace std::chrono; - tp_ = Clock::now(); - auto const when = floor(tp_) + 1s; + auto now = Clock::now(); + tp_ = now.time_since_epoch().count(); + auto const when = floor(now) + 1s; if (cv_.wait_until(lock, when, [this] { return stop_; })) return; } diff --git a/src/ripple/consensus/Consensus.h b/src/ripple/consensus/Consensus.h index 7a5c49c85..a86144c38 100644 --- a/src/ripple/consensus/Consensus.h +++ b/src/ripple/consensus/Consensus.h @@ -666,6 +666,7 @@ Consensus::startRoundInternal( ConsensusMode mode) { phase_ = ConsensusPhase::open; + JLOG(j_.debug()) << "transitioned to ConsensusPhase::open"; mode_.set(mode, adaptor_); now_ = now; prevLedgerID_ = prevLedgerID; @@ -1290,6 +1291,7 @@ Consensus::phaseEstablish() prevProposers_ = currPeerPositions_.size(); prevRoundTime_ = result_->roundTime.read(); phase_ = ConsensusPhase::accepted; + JLOG(j_.debug()) << "transitioned to ConsensusPhase::accepted"; adaptor_.onAccept( *result_, previousLedger_, @@ -1307,6 +1309,7 @@ Consensus::closeLedger() assert(!result_); phase_ = ConsensusPhase::establish; + JLOG(j_.debug()) << "transitioned to ConsensusPhase::establish"; rawCloseTimes_.self = now_; result_.emplace(adaptor_.onClose(previousLedger_, now_, mode_.get())); diff --git a/src/ripple/consensus/Validations.h b/src/ripple/consensus/Validations.h index 2cbddaa51..9200ac883 100644 --- a/src/ripple/consensus/Validations.h +++ b/src/ripple/consensus/Validations.h @@ -722,46 +722,59 @@ public: validationSET_EXPIRES ago and were not asked to keep. */ void - expire() + expire(beast::Journal& j) { - std::lock_guard lock{mutex_}; - if (toKeep_) + auto const start = std::chrono::steady_clock::now(); { - // We only need to refresh the keep range when it's just about to - // expire. Track the next time we need to refresh. - static std::chrono::steady_clock::time_point refreshTime; - if (auto const now = byLedger_.clock().now(); refreshTime <= now) + std::lock_guard lock{mutex_}; + if (toKeep_) { - // The next refresh time is shortly before the expiration - // time from now. - refreshTime = now + parms_.validationSET_EXPIRES - - parms_.validationFRESHNESS; - - for (auto i = byLedger_.begin(); i != byLedger_.end(); ++i) + // We only need to refresh the keep range when it's just about + // to expire. Track the next time we need to refresh. + static std::chrono::steady_clock::time_point refreshTime; + if (auto const now = byLedger_.clock().now(); + refreshTime <= now) { - auto const& validationMap = i->second; - if (!validationMap.empty()) + // The next refresh time is shortly before the expiration + // time from now. + refreshTime = now + parms_.validationSET_EXPIRES - + parms_.validationFRESHNESS; + + for (auto i = byLedger_.begin(); i != byLedger_.end(); ++i) { - auto const seq = validationMap.begin()->second.seq(); - if (toKeep_->low_ <= seq && seq < toKeep_->high_) + auto const& validationMap = i->second; + if (!validationMap.empty()) { - byLedger_.touch(i); + auto const seq = + validationMap.begin()->second.seq(); + if (toKeep_->low_ <= seq && seq < toKeep_->high_) + { + byLedger_.touch(i); + } + } + } + + for (auto i = bySequence_.begin(); i != bySequence_.end(); + ++i) + { + if (toKeep_->low_ <= i->first && + i->first < toKeep_->high_) + { + bySequence_.touch(i); } } } - - for (auto i = bySequence_.begin(); i != bySequence_.end(); ++i) - { - if (toKeep_->low_ <= i->first && i->first < toKeep_->high_) - { - bySequence_.touch(i); - } - } } - } - beast::expire(byLedger_, parms_.validationSET_EXPIRES); - beast::expire(bySequence_, parms_.validationSET_EXPIRES); + beast::expire(byLedger_, parms_.validationSET_EXPIRES); + beast::expire(bySequence_, parms_.validationSET_EXPIRES); + } + JLOG(j.debug()) + << "Validations sets sweep lock duration " + << std::chrono::duration_cast( + std::chrono::steady_clock::now() - start) + .count() + << "ms"; } /** Update trust status of validations diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 30117ad13..25f160558 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -146,8 +146,10 @@ public: std::size_t NETWORK_QUORUM = 1; // Peer networking parameters - bool RELAY_UNTRUSTED_VALIDATIONS = true; - bool RELAY_UNTRUSTED_PROPOSALS = false; + // 1 = relay, 0 = do not relay (but process), -1 = drop completely (do NOT + // process) + int RELAY_UNTRUSTED_VALIDATIONS = 1; + int RELAY_UNTRUSTED_PROPOSALS = 0; // True to ask peers not to relay current IP. bool PEER_PRIVATE = false; @@ -160,7 +162,18 @@ public: std::size_t PEERS_OUT_MAX = 0; std::size_t PEERS_IN_MAX = 0; - // Path searching + // Path searching: these were reasonable default values at some point but + // further research is needed to decide if they still are + // and whether all of them are needed. + // + // The performance and resource consumption of a server can + // be dramatically impacted by changing these configuration + // options; higher values result in exponentially higher + // resource usage. + // + // Servers operating as validators disable path finding by + // default by setting the `PATH_SEARCH_MAX` option to 0 + // unless it is explicitly set in the configuration file. int PATH_SEARCH_OLD = 7; int PATH_SEARCH = 7; int PATH_SEARCH_FAST = 2; @@ -201,8 +214,17 @@ public: // Amendment majority time std::chrono::seconds AMENDMENT_MAJORITY_TIME = defaultAmendmentMajorityTime; - // Thread pool configuration - std::size_t WORKERS = 0; + // Thread pool configuration (0 = choose for me) + int WORKERS = 0; // jobqueue thread count. default: upto 6 + int IO_WORKERS = 0; // io svc thread count. default: 2 + int PREFETCH_WORKERS = 0; // prefetch thread count. default: 4 + + // Can only be set in code, specifically unit tests + bool FORCE_MULTI_THREAD = false; + + // Normally the sweep timer is automatically deduced based on the node + // size, but we allow admins to explicitly set it in the config. + std::optional SWEEP_INTERVAL; // Reduce-relay - these parameters are experimental. // Enable reduce-relay features diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index 1c06e75d9..ba0f209c0 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -96,8 +96,11 @@ struct ConfigSection #define SECTION_VALIDATOR_TOKEN "validator_token" #define SECTION_VETO_AMENDMENTS "veto_amendments" #define SECTION_WORKERS "workers" +#define SECTION_IO_WORKERS "io_workers" +#define SECTION_PREFETCH_WORKERS "prefetch_workers" #define SECTION_LEDGER_REPLAY "ledger_replay" #define SECTION_BETA_RPC_API "beta_rpc_api" +#define SECTION_SWEEP_INTERVAL "sweep_interval" } // namespace ripple diff --git a/src/ripple/core/Job.h b/src/ripple/core/Job.h index 15552be70..66aa0d051 100644 --- a/src/ripple/core/Job.h +++ b/src/ripple/core/Job.h @@ -39,34 +39,41 @@ enum JobType { // earlier jobs having lower priority than later jobs. If you wish to // insert a job at a specific priority, simply add it at the right location. - jtPACK, // Make a fetch pack for a peer - jtPUBOLDLEDGER, // An old ledger has been accepted - jtVALIDATION_ut, // A validation from an untrusted source - jtTRANSACTION_l, // A local transaction - jtREPLAY_REQ, // Peer request a ledger delta or a skip list - jtLEDGER_REQ, // Peer request ledger/txnset data - jtPROPOSAL_ut, // A proposal from an untrusted source - jtREPLAY_TASK, // A Ledger replay task/subtask - jtLEDGER_DATA, // Received data for a ledger we're acquiring - jtCLIENT, // A websocket command from the client - jtRPC, // A websocket command from the client - jtUPDATE_PF, // Update pathfinding requests - jtTRANSACTION, // A transaction received from the network - jtMISSING_TXN, // Request missing transactions - jtREQUESTED_TXN, // Reply with requested transactions - jtBATCH, // Apply batched transactions - jtADVANCE, // Advance validated/acquired ledgers - jtPUBLEDGER, // Publish a fully-accepted ledger - jtTXN_DATA, // Fetch a proposed set - jtWAL, // Write-ahead logging - jtVALIDATION_t, // A validation from a trusted source - jtWRITE, // Write out hashed objects - jtACCEPT, // Accept a consensus ledger - jtPROPOSAL_t, // A proposal from a trusted source - jtSWEEP, // Sweep for stale structures - jtNETOP_CLUSTER, // NetworkOPs cluster peer report - jtNETOP_TIMER, // NetworkOPs net timer processing - jtADMIN, // An administrative operation + jtPACK, // Make a fetch pack for a peer + jtPUBOLDLEDGER, // An old ledger has been accepted + jtCLIENT, // A placeholder for the priority of all jtCLIENT jobs + jtCLIENT_SUBSCRIBE, // A websocket subscription by a client + jtCLIENT_FEE_CHANGE, // Subscription for fee change by a client + jtCLIENT_CONSENSUS, // Subscription for consensus state change by a client + jtCLIENT_ACCT_HIST, // Subscription for account history by a client + jtCLIENT_SHARD, // Client request for shard archiving + jtCLIENT_RPC, // Client RPC request + jtCLIENT_WEBSOCKET, // Client websocket request + jtRPC, // A websocket command from the client + jtSWEEP, // Sweep for stale structures + jtVALIDATION_ut, // A validation from an untrusted source + jtUPDATE_PF, // Update pathfinding requests + jtTRANSACTION_l, // A local transaction + jtREPLAY_REQ, // Peer request a ledger delta or a skip list + jtLEDGER_REQ, // Peer request ledger/txnset data + jtPROPOSAL_ut, // A proposal from an untrusted source + jtREPLAY_TASK, // A Ledger replay task/subtask + jtLEDGER_DATA, // Received data for a ledger we're acquiring + jtTRANSACTION, // A transaction received from the network + jtMISSING_TXN, // Request missing transactions + jtREQUESTED_TXN, // Reply with requested transactions + jtBATCH, // Apply batched transactions + jtADVANCE, // Advance validated/acquired ledgers + jtPUBLEDGER, // Publish a fully-accepted ledger + jtTXN_DATA, // Fetch a proposed set + jtWAL, // Write-ahead logging + jtVALIDATION_t, // A validation from a trusted source + jtWRITE, // Write out hashed objects + jtACCEPT, // Accept a consensus ledger + jtPROPOSAL_t, // A proposal from a trusted source + jtNETOP_CLUSTER, // NetworkOPs cluster peer report + jtNETOP_TIMER, // NetworkOPs net timer processing + jtADMIN, // An administrative operation // Special job types which are not dispatched by the job pool jtPEER, diff --git a/src/ripple/core/JobQueue.h b/src/ripple/core/JobQueue.h index 1d3fd5498..a93d68b85 100644 --- a/src/ripple/core/JobQueue.h +++ b/src/ripple/core/JobQueue.h @@ -141,6 +141,7 @@ public: using JobFunction = std::function; JobQueue( + int threadCount, beast::insight::Collector::ptr const& collector, beast::Journal journal, Logs& logs, @@ -200,11 +201,6 @@ public: int getJobCountGE(JobType t) const; - /** Set the number of thread serving the job queue to precisely this number. - */ - void - setThreadCount(int c, bool const standaloneMode); - /** Return a scoped LoadEvent. */ std::unique_ptr diff --git a/src/ripple/core/JobTypeInfo.h b/src/ripple/core/JobTypeInfo.h index f4bb79b05..7ed7c4697 100644 --- a/src/ripple/core/JobTypeInfo.h +++ b/src/ripple/core/JobTypeInfo.h @@ -31,11 +31,12 @@ private: JobType const m_type; std::string const m_name; - /** The limit on the number of running jobs for this job type. */ - int const m_limit; + /** The limit on the number of running jobs for this job type. - /** Special jobs are not dispatched via the job queue */ - bool const m_special; + A limit of 0 marks this as a "special job" which is not + dispatched via the job queue. + */ + int const m_limit; /** Average and peak latencies for this job type. 0 is none specified */ std::chrono::milliseconds const m_avgLatency; @@ -49,13 +50,11 @@ public: JobType type, std::string name, int limit, - bool special, std::chrono::milliseconds avgLatency, std::chrono::milliseconds peakLatency) : m_type(type) , m_name(std::move(name)) , m_limit(limit) - , m_special(special) , m_avgLatency(avgLatency) , m_peakLatency(peakLatency) { @@ -82,7 +81,7 @@ public: bool special() const { - return m_special; + return m_limit == 0; } std::chrono::milliseconds diff --git a/src/ripple/core/JobTypes.h b/src/ripple/core/JobTypes.h index 45f69a530..b28bc3d59 100644 --- a/src/ripple/core/JobTypes.h +++ b/src/ripple/core/JobTypes.h @@ -41,63 +41,82 @@ private: jtINVALID, "invalid", 0, - true, std::chrono::milliseconds{0}, std::chrono::milliseconds{0}) { using namespace std::chrono_literals; int maxLimit = std::numeric_limits::max(); - add(jtPACK, "makeFetchPack", 1, false, 0ms, 0ms); - add(jtPUBOLDLEDGER, "publishAcqLedger", 2, false, 10000ms, 15000ms); - add(jtVALIDATION_ut, - "untrustedValidation", - maxLimit, - false, - 2000ms, - 5000ms); - add(jtTRANSACTION_l, "localTransaction", maxLimit, false, 100ms, 500ms); - add(jtREPLAY_REQ, "ledgerReplayRequest", 10, false, 250ms, 1000ms); - add(jtLEDGER_REQ, "ledgerRequest", 2, false, 0ms, 0ms); - add(jtPROPOSAL_ut, "untrustedProposal", maxLimit, false, 500ms, 1250ms); - add(jtREPLAY_TASK, "ledgerReplayTask", maxLimit, false, 0ms, 0ms); - add(jtLEDGER_DATA, "ledgerData", 2, false, 0ms, 0ms); - add(jtCLIENT, "clientCommand", maxLimit, false, 2000ms, 5000ms); - add(jtRPC, "RPC", maxLimit, false, 0ms, 0ms); - add(jtUPDATE_PF, "updatePaths", maxLimit, false, 0ms, 0ms); - add(jtTRANSACTION, "transaction", maxLimit, false, 250ms, 1000ms); - add(jtBATCH, "batch", maxLimit, false, 250ms, 1000ms); - add(jtADVANCE, "advanceLedger", maxLimit, false, 0ms, 0ms); - add(jtPUBLEDGER, "publishNewLedger", maxLimit, false, 3000ms, 4500ms); - add(jtTXN_DATA, "fetchTxnData", 1, false, 0ms, 0ms); - add(jtWAL, "writeAhead", maxLimit, false, 1000ms, 2500ms); - add(jtVALIDATION_t, - "trustedValidation", - maxLimit, - false, - 500ms, - 1500ms); - add(jtWRITE, "writeObjects", maxLimit, false, 1750ms, 2500ms); - add(jtACCEPT, "acceptLedger", maxLimit, false, 0ms, 0ms); - add(jtPROPOSAL_t, "trustedProposal", maxLimit, false, 100ms, 500ms); - add(jtSWEEP, "sweep", maxLimit, false, 0ms, 0ms); - add(jtNETOP_CLUSTER, "clusterReport", 1, false, 9999ms, 9999ms); - add(jtNETOP_TIMER, "heartbeat", 1, false, 999ms, 999ms); - add(jtADMIN, "administration", maxLimit, false, 0ms, 0ms); - add(jtMISSING_TXN, "handleHaveTransactions", 1200, false, 0ms, 0ms); - add(jtREQUESTED_TXN, "doTransactions", 1200, false, 0ms, 0ms); + auto add = [this]( + JobType jt, + std::string name, + int limit, + std::chrono::milliseconds avgLatency, + std::chrono::milliseconds peakLatency) { + assert(m_map.find(jt) == m_map.end()); - add(jtPEER, "peerCommand", 0, true, 200ms, 2500ms); - add(jtDISK, "diskAccess", 0, true, 500ms, 1000ms); - add(jtTXN_PROC, "processTransaction", 0, true, 0ms, 0ms); - add(jtOB_SETUP, "orderBookSetup", 0, true, 0ms, 0ms); - add(jtPATH_FIND, "pathFind", 0, true, 0ms, 0ms); - add(jtHO_READ, "nodeRead", 0, true, 0ms, 0ms); - add(jtHO_WRITE, "nodeWrite", 0, true, 0ms, 0ms); - add(jtGENERIC, "generic", 0, true, 0ms, 0ms); - add(jtNS_SYNC_READ, "SyncReadNode", 0, true, 0ms, 0ms); - add(jtNS_ASYNC_READ, "AsyncReadNode", 0, true, 0ms, 0ms); - add(jtNS_WRITE, "WriteNode", 0, true, 0ms, 0ms); + auto const [_, inserted] = m_map.emplace( + std::piecewise_construct, + std::forward_as_tuple(jt), + std::forward_as_tuple( + jt, name, limit, avgLatency, peakLatency)); + + assert(inserted == true); + (void)_; + (void)inserted; + }; + + // clang-format off + // avg peak + // JobType name limit latency latency + add(jtPACK, "makeFetchPack", 1, 0ms, 0ms); + add(jtPUBOLDLEDGER, "publishAcqLedger", 2, 10000ms, 15000ms); + add(jtVALIDATION_ut, "untrustedValidation", maxLimit, 2000ms, 5000ms); + add(jtTRANSACTION_l, "localTransaction", maxLimit, 100ms, 500ms); + add(jtREPLAY_REQ, "ledgerReplayRequest", 10, 250ms, 1000ms); + add(jtLEDGER_REQ, "ledgerRequest", 4, 0ms, 0ms); + add(jtPROPOSAL_ut, "untrustedProposal", maxLimit, 500ms, 1250ms); + add(jtREPLAY_TASK, "ledgerReplayTask", maxLimit, 0ms, 0ms); + add(jtLEDGER_DATA, "ledgerData", 4, 0ms, 0ms); + add(jtCLIENT, "clientCommand", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_SUBSCRIBE, "clientSubscribe", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_FEE_CHANGE, "clientFeeChange", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_CONSENSUS, "clientConsensus", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_ACCT_HIST, "clientAccountHistory", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_SHARD, "clientShardArchive", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_RPC, "clientRPC", maxLimit, 2000ms, 5000ms); + add(jtCLIENT_WEBSOCKET, "clientWebsocket", maxLimit, 2000ms, 5000ms); + add(jtRPC, "RPC", maxLimit, 0ms, 0ms); + add(jtUPDATE_PF, "updatePaths", 1, 0ms, 0ms); + add(jtTRANSACTION, "transaction", maxLimit, 250ms, 1000ms); + add(jtBATCH, "batch", maxLimit, 250ms, 1000ms); + add(jtADVANCE, "advanceLedger", maxLimit, 0ms, 0ms); + add(jtPUBLEDGER, "publishNewLedger", maxLimit, 3000ms, 4500ms); + add(jtTXN_DATA, "fetchTxnData", 5, 0ms, 0ms); + add(jtWAL, "writeAhead", maxLimit, 1000ms, 2500ms); + add(jtVALIDATION_t, "trustedValidation", maxLimit, 500ms, 1500ms); + add(jtWRITE, "writeObjects", maxLimit, 1750ms, 2500ms); + add(jtACCEPT, "acceptLedger", maxLimit, 0ms, 0ms); + add(jtPROPOSAL_t, "trustedProposal", maxLimit, 100ms, 500ms); + add(jtSWEEP, "sweep", 1, 0ms, 0ms); + add(jtNETOP_CLUSTER, "clusterReport", 1, 9999ms, 9999ms); + add(jtNETOP_TIMER, "heartbeat", 1, 999ms, 999ms); + add(jtADMIN, "administration", maxLimit, 0ms, 0ms); + add(jtMISSING_TXN, "handleHaveTransactions", 1200, 0ms, 0ms); + add(jtREQUESTED_TXN, "doTransactions", 1200, 0ms, 0ms); + + add(jtPEER, "peerCommand", 0, 200ms, 2500ms); + add(jtDISK, "diskAccess", 0, 500ms, 1000ms); + add(jtTXN_PROC, "processTransaction", 0, 0ms, 0ms); + add(jtOB_SETUP, "orderBookSetup", 0, 0ms, 0ms); + add(jtPATH_FIND, "pathFind", 0, 0ms, 0ms); + add(jtHO_READ, "nodeRead", 0, 0ms, 0ms); + add(jtHO_WRITE, "nodeWrite", 0, 0ms, 0ms); + add(jtGENERIC, "generic", 0, 0ms, 0ms); + add(jtNS_SYNC_READ, "SyncReadNode", 0, 0ms, 0ms); + add(jtNS_ASYNC_READ, "AsyncReadNode", 0, 0ms, 0ms); + add(jtNS_WRITE, "WriteNode", 0, 0ms, 0ms); + // clang-format on } public: @@ -162,28 +181,6 @@ public: return m_map.cend(); } -private: - void - add(JobType jt, - std::string name, - int limit, - bool special, - std::chrono::milliseconds avgLatency, - std::chrono::milliseconds peakLatency) - { - assert(m_map.find(jt) == m_map.end()); - - auto const [_, inserted] = m_map.emplace( - std::piecewise_construct, - std::forward_as_tuple(jt), - std::forward_as_tuple( - jt, name, limit, special, avgLatency, peakLatency)); - - assert(inserted == true); - (void)_; - (void)inserted; - } - JobTypeInfo m_unknown; Map m_map; }; diff --git a/src/ripple/core/Pg.cpp b/src/ripple/core/Pg.cpp index 2e2121c11..df0f6da5e 100644 --- a/src/ripple/core/Pg.cpp +++ b/src/ripple/core/Pg.cpp @@ -792,11 +792,7 @@ CREATE OR REPLACE FUNCTION tx ( _in_trans_id bytea ) RETURNS jsonb AS $$ DECLARE - _min_ledger bigint := min_ledger(); - _min_seq bigint := (SELECT ledger_seq - FROM ledgers - WHERE ledger_seq = _min_ledger - FOR SHARE); + _min_seq bigint := min_ledger(); _max_seq bigint := max_ledger(); _ledger_seq bigint; _nodestore_hash bytea; diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 08c30f0da..ae65709a1 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -553,9 +553,11 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_RELAY_VALIDATIONS, strTemp, j_)) { if (boost::iequals(strTemp, "all")) - RELAY_UNTRUSTED_VALIDATIONS = true; + RELAY_UNTRUSTED_VALIDATIONS = 1; else if (boost::iequals(strTemp, "trusted")) - RELAY_UNTRUSTED_VALIDATIONS = false; + RELAY_UNTRUSTED_VALIDATIONS = 0; + else if (boost::iequals(strTemp, "drop_untrusted")) + RELAY_UNTRUSTED_VALIDATIONS = -1; else Throw( "Invalid value specified in [" SECTION_RELAY_VALIDATIONS @@ -565,9 +567,11 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_RELAY_PROPOSALS, strTemp, j_)) { if (boost::iequals(strTemp, "all")) - RELAY_UNTRUSTED_PROPOSALS = true; + RELAY_UNTRUSTED_PROPOSALS = 1; else if (boost::iequals(strTemp, "trusted")) - RELAY_UNTRUSTED_PROPOSALS = false; + RELAY_UNTRUSTED_PROPOSALS = 0; + else if (boost::iequals(strTemp, "drop_untrusted")) + RELAY_UNTRUSTED_PROPOSALS = -1; else Throw( "Invalid value specified in [" SECTION_RELAY_PROPOSALS @@ -615,6 +619,11 @@ Config::loadFromString(std::string const& fileContents) FETCH_DEPTH = 10; } + // By default, validators don't have pathfinding enabled, unless it is + // explicitly requested by the server's admin. + if (exists(SECTION_VALIDATION_SEED) || exists(SECTION_VALIDATOR_TOKEN)) + PATH_SEARCH_MAX = 0; + if (getSingleSection(secConfig, SECTION_PATH_SEARCH_OLD, strTemp, j_)) PATH_SEARCH_OLD = beast::lexicalCastThrow(strTemp); if (getSingleSection(secConfig, SECTION_PATH_SEARCH, strTemp, j_)) @@ -627,8 +636,44 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_DEBUG_LOGFILE, strTemp, j_)) DEBUG_LOGFILE = strTemp; + if (getSingleSection(secConfig, SECTION_SWEEP_INTERVAL, strTemp, j_)) + { + SWEEP_INTERVAL = beast::lexicalCastThrow(strTemp); + + if (SWEEP_INTERVAL < 10 || SWEEP_INTERVAL > 600) + Throw("Invalid " SECTION_SWEEP_INTERVAL + ": must be between 10 and 600 inclusive"); + } + if (getSingleSection(secConfig, SECTION_WORKERS, strTemp, j_)) - WORKERS = beast::lexicalCastThrow(strTemp); + { + WORKERS = beast::lexicalCastThrow(strTemp); + + if (WORKERS < 1 || WORKERS > 1024) + Throw( + "Invalid " SECTION_WORKERS + ": must be between 1 and 1024 inclusive."); + } + + if (getSingleSection(secConfig, SECTION_IO_WORKERS, strTemp, j_)) + { + IO_WORKERS = beast::lexicalCastThrow(strTemp); + + if (IO_WORKERS < 1 || IO_WORKERS > 1024) + Throw( + "Invalid " SECTION_IO_WORKERS + ": must be between 1 and 1024 inclusive."); + } + + if (getSingleSection(secConfig, SECTION_PREFETCH_WORKERS, strTemp, j_)) + { + PREFETCH_WORKERS = beast::lexicalCastThrow(strTemp); + + if (PREFETCH_WORKERS < 1 || PREFETCH_WORKERS > 1024) + Throw( + "Invalid " SECTION_PREFETCH_WORKERS + ": must be between 1 and 1024 inclusive."); + } if (getSingleSection(secConfig, SECTION_COMPRESSION, strTemp, j_)) COMPRESSION = beast::lexicalCastThrow(strTemp); diff --git a/src/ripple/core/impl/JobQueue.cpp b/src/ripple/core/impl/JobQueue.cpp index 4fd394994..85dcec1ca 100644 --- a/src/ripple/core/impl/JobQueue.cpp +++ b/src/ripple/core/impl/JobQueue.cpp @@ -25,6 +25,7 @@ namespace ripple { JobQueue::JobQueue( + int threadCount, beast::insight::Collector::ptr const& collector, beast::Journal journal, Logs& logs, @@ -33,11 +34,13 @@ JobQueue::JobQueue( , m_lastJob(0) , m_invalidJobData(JobTypes::instance().getInvalid(), collector, logs) , m_processCount(0) - , m_workers(*this, &perfLog, "JobQueue", 0) + , m_workers(*this, &perfLog, "JobQueue", threadCount) , m_cancelCallback(std::bind(&JobQueue::isStopping, this)) , perfLog_(perfLog) , m_collector(collector) { + JLOG(m_journal.info()) << "Using " << threadCount << " threads"; + hook = m_collector->make_hook(std::bind(&JobQueue::collect, this)); job_count = m_collector->make_gauge("job_count"); @@ -91,7 +94,9 @@ JobQueue::addRefCountedJob( // FIXME: Workaround incorrect client shutdown ordering // do not add jobs to a queue with no threads - assert(type == jtCLIENT || m_workers.getNumberOfThreads() > 0); + assert( + (type >= jtCLIENT && type <= jtCLIENT_WEBSOCKET) || + m_workers.getNumberOfThreads() > 0); { std::lock_guard lock(m_mutex); @@ -139,29 +144,6 @@ JobQueue::getJobCountGE(JobType t) const return ret; } -void -JobQueue::setThreadCount(int c, bool const standaloneMode) -{ - if (standaloneMode) - { - c = 1; - } - else if (c == 0) - { - c = static_cast(std::thread::hardware_concurrency()); - c = 2 + std::min(c, 4); // I/O will bottleneck - JLOG(m_journal.info()) << "Auto-tuning to " << c - << " validation/transaction/proposal threads."; - } - else - { - JLOG(m_journal.info()) << "Configured " << c - << " validation/transaction/proposal threads."; - } - - m_workers.setNumberOfThreads(c); -} - std::unique_ptr JobQueue::makeLoadEvent(JobType t, std::string const& name) { @@ -188,15 +170,9 @@ JobQueue::addLoadEvents(JobType t, int count, std::chrono::milliseconds elapsed) bool JobQueue::isOverloaded() { - int count = 0; - - for (auto& x : m_jobData) - { - if (x.second.load().isOver()) - ++count; - } - - return count > 0; + return std::any_of(m_jobData.begin(), m_jobData.end(), [](auto& entry) { + return entry.second.load().isOver(); + }); } Json::Value diff --git a/src/ripple/crypto/impl/RFC1751.cpp b/src/ripple/crypto/impl/RFC1751.cpp index 177d43858..6b6b2c31f 100644 --- a/src/ripple/crypto/impl/RFC1751.cpp +++ b/src/ripple/crypto/impl/RFC1751.cpp @@ -438,7 +438,7 @@ RFC1751::etob(std::string& strData, std::vector vsHuman) return 1; } -/** Convert words seperated by spaces into a 128 bit key in big-endian format. +/** Convert words separated by spaces into a 128 bit key in big-endian format. @return 1 if succeeded diff --git a/src/ripple/ledger/ApplyView.h b/src/ripple/ledger/ApplyView.h index fc394844c..cecb0a0e6 100644 --- a/src/ripple/ledger/ApplyView.h +++ b/src/ripple/ledger/ApplyView.h @@ -37,11 +37,6 @@ enum ApplyFlags : std::uint32_t { // Transaction can be retried, soft failures allowed tapRETRY = 0x20, - // Transaction must pay more than both the open ledger - // fee and all transactions in the queue to get into the - // open ledger - tapPREFER_QUEUE = 0x40, - // Transaction came from a privileged source tapUNLIMITED = 0x400 }; @@ -55,10 +50,10 @@ operator|(ApplyFlags const& lhs, ApplyFlags const& rhs) } static_assert( - (tapPREFER_QUEUE | tapRETRY) == safe_cast(0x60u), + (tapFAIL_HARD | tapRETRY) == safe_cast(0x30u), "ApplyFlags operator |"); static_assert( - (tapRETRY | tapPREFER_QUEUE) == safe_cast(0x60u), + (tapRETRY | tapFAIL_HARD) == safe_cast(0x30u), "ApplyFlags operator |"); constexpr ApplyFlags @@ -69,8 +64,8 @@ operator&(ApplyFlags const& lhs, ApplyFlags const& rhs) safe_cast>(rhs)); } -static_assert((tapPREFER_QUEUE & tapRETRY) == tapNONE, "ApplyFlags operator &"); -static_assert((tapRETRY & tapPREFER_QUEUE) == tapNONE, "ApplyFlags operator &"); +static_assert((tapFAIL_HARD & tapRETRY) == tapNONE, "ApplyFlags operator &"); +static_assert((tapRETRY & tapFAIL_HARD) == tapNONE, "ApplyFlags operator &"); constexpr ApplyFlags operator~(ApplyFlags const& flags) diff --git a/src/ripple/ledger/CachedSLEs.h b/src/ripple/ledger/CachedSLEs.h index 190bc3709..d2b04e2cb 100644 --- a/src/ripple/ledger/CachedSLEs.h +++ b/src/ripple/ledger/CachedSLEs.h @@ -20,90 +20,12 @@ #ifndef RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED #define RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED -#include -#include +#include +#include #include -#include -#include namespace ripple { +using CachedSLEs = TaggedCache; +} -/** Caches SLEs by their digest. */ -class CachedSLEs -{ -public: - using digest_type = uint256; - - using value_type = std::shared_ptr; - - CachedSLEs(CachedSLEs const&) = delete; - CachedSLEs& - operator=(CachedSLEs const&) = delete; - - template - CachedSLEs( - std::chrono::duration const& timeToLive, - Stopwatch& clock) - : timeToLive_(timeToLive), map_(clock) - { - } - - /** Discard expired entries. - - Needs to be called periodically. - */ - void - expire(); - - /** Fetch an item from the cache. - - If the digest was not found, Handler - will be called with this signature: - - std::shared_ptr(void) - */ - template - value_type - fetch(digest_type const& digest, Handler const& h) - { - { - std::lock_guard lock(mutex_); - auto iter = map_.find(digest); - if (iter != map_.end()) - { - ++hit_; - map_.touch(iter); - return iter->second; - } - } - auto sle = h(); - if (!sle) - return nullptr; - std::lock_guard lock(mutex_); - ++miss_; - auto const [it, inserted] = map_.emplace(digest, std::move(sle)); - if (!inserted) - map_.touch(it); - return it->second; - } - - /** Returns the fraction of cache hits. */ - double - rate() const; - -private: - std::size_t hit_ = 0; - std::size_t miss_ = 0; - std::mutex mutable mutex_; - Stopwatch::duration timeToLive_; - beast::aged_unordered_map< - digest_type, - value_type, - Stopwatch::clock_type, - hardened_hash> - map_; -}; - -} // namespace ripple - -#endif +#endif // RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED diff --git a/src/ripple/ledger/impl/CachedSLEs.cpp b/src/ripple/ledger/impl/CachedSLEs.cpp deleted file mode 100644 index 3d00aeaf1..000000000 --- a/src/ripple/ledger/impl/CachedSLEs.cpp +++ /dev/null @@ -1,57 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { - -void -CachedSLEs::expire() -{ - std::vector> trash; - { - auto const expireTime = map_.clock().now() - timeToLive_; - std::lock_guard lock(mutex_); - for (auto iter = map_.chronological.begin(); - iter != map_.chronological.end(); - ++iter) - { - if (iter.when() > expireTime) - break; - if (iter->second.unique()) - { - trash.emplace_back(std::move(iter->second)); - iter = map_.erase(iter); - } - } - } -} - -double -CachedSLEs::rate() const -{ - std::lock_guard lock(mutex_); - auto const tot = hit_ + miss_; - if (tot == 0) - return 0; - return double(hit_) / tot; -} - -} // namespace ripple diff --git a/src/ripple/net/RPCErr.h b/src/ripple/net/RPCErr.h index f2bce8639..e49e96b3d 100644 --- a/src/ripple/net/RPCErr.h +++ b/src/ripple/net/RPCErr.h @@ -28,7 +28,7 @@ namespace ripple { bool isRpcError(Json::Value jvResult); Json::Value -rpcError(int iError, Json::Value jvResult = Json::Value(Json::objectValue)); +rpcError(int iError); } // namespace ripple diff --git a/src/ripple/net/impl/RPCErr.cpp b/src/ripple/net/impl/RPCErr.cpp index 1c28eda00..8af2a248c 100644 --- a/src/ripple/net/impl/RPCErr.cpp +++ b/src/ripple/net/impl/RPCErr.cpp @@ -26,8 +26,9 @@ struct RPCErr; // VFALCO NOTE Deprecated function Json::Value -rpcError(int iError, Json::Value jvResult) +rpcError(int iError) { + Json::Value jvResult(Json::objectValue); RPC::inject_error(iError, jvResult); return jvResult; } diff --git a/src/ripple/net/impl/RPCSub.cpp b/src/ripple/net/impl/RPCSub.cpp index f9b08e5c0..f65f9a361 100644 --- a/src/ripple/net/impl/RPCSub.cpp +++ b/src/ripple/net/impl/RPCSub.cpp @@ -96,7 +96,9 @@ public: JLOG(j_.info()) << "RPCCall::fromNetwork start"; mSending = m_jobQueue.addJob( - jtCLIENT, "RPCSub::sendThread", [this](Job&) { sendThread(); }); + jtCLIENT_SUBSCRIBE, "RPCSub::sendThread", [this](Job&) { + sendThread(); + }); } } diff --git a/src/ripple/nodestore/Database.h b/src/ripple/nodestore/Database.h index 471621bc8..c2c5b5b88 100644 --- a/src/ripple/nodestore/Database.h +++ b/src/ripple/nodestore/Database.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_NODESTORE_DATABASE_H_INCLUDED #define RIPPLE_NODESTORE_DATABASE_H_INCLUDED -#include #include #include #include diff --git a/src/ripple/nodestore/backend/CassandraFactory.cpp b/src/ripple/nodestore/backend/CassandraFactory.cpp index 10282d94b..c8d0c139c 100644 --- a/src/ripple/nodestore/backend/CassandraFactory.cpp +++ b/src/ripple/nodestore/backend/CassandraFactory.cpp @@ -249,18 +249,22 @@ public: cluster, username.c_str(), get(config_, "password").c_str()); } - unsigned int const workers = std::thread::hardware_concurrency(); - rc = cass_cluster_set_num_threads_io(cluster, workers); + unsigned int const ioThreads = get(config_, "io_threads", 4); + maxRequestsOutstanding = + get(config_, "max_requests_outstanding", 10000000); + JLOG(j_.info()) << "Configuring Cassandra driver to use " << ioThreads + << " IO threads. Capping maximum pending requests at " + << maxRequestsOutstanding; + rc = cass_cluster_set_num_threads_io(cluster, ioThreads); if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting Cassandra io threads to " << workers - << ", result: " << rc << ", " << cass_error_desc(rc); + ss << "nodestore: Error setting Cassandra io threads to " + << ioThreads << ", result: " << rc << ", " + << cass_error_desc(rc); Throw(ss.str()); } - cass_cluster_set_request_timeout(cluster, 2000); - rc = cass_cluster_set_queue_size_io( cluster, maxRequestsOutstanding); // This number needs to scale w/ the @@ -275,6 +279,7 @@ public: return; ; } + cass_cluster_set_request_timeout(cluster, 2000); std::string certfile = get(config_, "certfile"); if (certfile.size()) @@ -466,12 +471,6 @@ public: work_.emplace(ioContext_); ioThread_ = std::thread{[this]() { ioContext_.run(); }}; open_ = true; - - if (config_.exists("max_requests_outstanding")) - { - maxRequestsOutstanding = - get(config_, "max_requests_outstanding"); - } } // Close the connection to the database diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp index 405e13acd..2b20b574a 100644 --- a/src/ripple/nodestore/backend/NuDBFactory.cpp +++ b/src/ripple/nodestore/backend/NuDBFactory.cpp @@ -92,7 +92,16 @@ public: ~NuDBBackend() override { - close(); + try + { + // close can throw and we don't want the destructor to throw. + close(); + } + catch (nudb::system_error const&) + { + // Don't allow exceptions to propagate out of destructors. + // close() has already logged the error. + } } std::string @@ -174,10 +183,20 @@ public: nudb::error_code ec; db_.close(ec); if (ec) + { + // Log to make sure the nature of the error gets to the user. + JLOG(j_.fatal()) << "NuBD close() failed: " << ec.message(); Throw(ec); + } + if (deletePath_) { - boost::filesystem::remove_all(name_); + boost::filesystem::remove_all(name_, ec); + if (ec) + { + JLOG(j_.fatal()) << "Filesystem remove_all of " << name_ + << " failed with: " << ec.message(); + } } } } diff --git a/src/ripple/nodestore/backend/RocksDBFactory.cpp b/src/ripple/nodestore/backend/RocksDBFactory.cpp index 18223e77f..e17dc55de 100644 --- a/src/ripple/nodestore/backend/RocksDBFactory.cpp +++ b/src/ripple/nodestore/backend/RocksDBFactory.cpp @@ -111,9 +111,18 @@ public: rocksdb::BlockBasedTableOptions table_options; m_options.env = env; + bool hard_set = + keyValues.exists("hard_set") && get(keyValues, "hard_set"); + if (keyValues.exists("cache_mb")) - table_options.block_cache = rocksdb::NewLRUCache( - get(keyValues, "cache_mb") * megabytes(1)); + { + auto size = get(keyValues, "cache_mb"); + + if (!hard_set && size == 256) + size = 1024; + + table_options.block_cache = rocksdb::NewLRUCache(megabytes(size)); + } if (auto const v = get(keyValues, "filter_bits")) { @@ -124,12 +133,21 @@ public: } if (get_if_exists(keyValues, "open_files", m_options.max_open_files)) - fdRequired_ = m_options.max_open_files; + { + if (!hard_set && m_options.max_open_files == 2000) + m_options.max_open_files = 8000; + + fdRequired_ = m_options.max_open_files + 128; + } if (keyValues.exists("file_size_mb")) { - m_options.target_file_size_base = - megabytes(1) * get(keyValues, "file_size_mb"); + auto file_size_mb = get(keyValues, "file_size_mb"); + + if (!hard_set && file_size_mb == 8) + file_size_mb = 256; + + m_options.target_file_size_base = megabytes(file_size_mb); m_options.max_bytes_for_level_base = 5 * m_options.target_file_size_base; m_options.write_buffer_size = 2 * m_options.target_file_size_base; diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index 4fc17a8d4..17001a6b8 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -39,7 +40,7 @@ namespace ripple { namespace NodeStore { using PCache = TaggedCache; -using NCache = KeyCache; +using NCache = KeyCache; class DatabaseShard; /* A range of historical ledgers backed by a node store. diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 6771db089..47e03a76b 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -190,7 +190,7 @@ OverlayImpl::onHandoff( auto consumer = m_resourceManager.newInboundEndpoint( beast::IPAddressConversion::from_asio(remote_endpoint)); - if (consumer.disconnect()) + if (consumer.disconnect(journal)) return handoff; auto const slot = m_peerFinder->new_inbound_slot( @@ -392,7 +392,7 @@ OverlayImpl::connect(beast::IP::Endpoint const& remote_endpoint) assert(work_); auto usage = resourceManager().newOutboundEndpoint(remote_endpoint); - if (usage.disconnect()) + if (usage.disconnect(journal_)) { JLOG(journal_.info()) << "Over resource limit: " << remote_endpoint; return; diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index a0b48d734..417ab91d3 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -342,8 +342,8 @@ PeerImp::removeTxQueue(uint256 const& hash) void PeerImp::charge(Resource::Charge const& fee) { - if ((usage_.charge(fee) == Resource::drop) && usage_.disconnect() && - strand_.running_in_this_thread()) + if ((usage_.charge(fee) == Resource::drop) && + usage_.disconnect(p_journal_) && strand_.running_in_this_thread()) { // Sever the connection overlay_.incPeerDisconnectCharges(); @@ -1859,13 +1859,6 @@ PeerImp::onMessage(std::shared_ptr const& m) if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE) return badData("Invalid ledger info type"); - // Verify ledger nodes - if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::maxReplyNodes) - { - return badData( - "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size())); - } - // Verify reply error if (m->has_error() && (m->error() < protocol::reNO_LEDGER || @@ -1874,6 +1867,13 @@ PeerImp::onMessage(std::shared_ptr const& m) return badData("Invalid reply error"); } + // Verify ledger nodes. + if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes) + { + return badData( + "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size())); + } + // If there is a request cookie, attempt to relay the message if (m->has_requestcookie()) { @@ -1935,10 +1935,21 @@ PeerImp::onMessage(std::shared_ptr const& m) return; } + // RH TODO: when isTrusted = false we should probably also cache a key + // suppression for 30 seconds to avoid doing a relatively expensive lookup + // every time a spam packet is received + PublicKey const publicKey{makeSlice(set.nodepubkey())}; + auto const isTrusted = app_.validators().trusted(publicKey); + + // If the operator has specified that untrusted proposals be dropped then + // this happens here I.e. before further wasting CPU verifying the signature + // of an untrusted key + if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1) + return; + uint256 const proposeHash{set.currenttxhash()}; uint256 const prevLedger{set.previousledger()}; - PublicKey const publicKey{makeSlice(set.nodepubkey())}; NetClock::time_point const closeTime{NetClock::duration{set.closetime()}}; uint256 const suppression = proposalUniqueId( @@ -1963,8 +1974,6 @@ PeerImp::onMessage(std::shared_ptr const& m) return; } - auto const isTrusted = app_.validators().trusted(publicKey); - if (!isTrusted) { if (tracking_.load() == Tracking::diverged) @@ -2549,6 +2558,18 @@ PeerImp::onMessage(std::shared_ptr const& m) return; } + // RH TODO: when isTrusted = false we should probably also cache a key + // suppression for 30 seconds to avoid doing a relatively expensive + // lookup every time a spam packet is received + auto const isTrusted = + app_.validators().trusted(val->getSignerPublic()); + + // If the operator has specified that untrusted validations be dropped + // then this happens here I.e. before further wasting CPU verifying the + // signature of an untrusted key + if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1) + return; + auto key = sha512Half(makeSlice(m->validation())); if (auto [added, relayed] = app_.getHashRouter().addSuppressionPeerWithStatus(key, id_); @@ -2566,9 +2587,6 @@ PeerImp::onMessage(std::shared_ptr const& m) return; } - auto const isTrusted = - app_.validators().trusted(val->getSignerPublic()); - if (!isTrusted && (tracking_.load() == Tracking::diverged)) { JLOG(p_journal_.debug()) @@ -3112,7 +3130,7 @@ PeerImp::checkPropose( if (isTrusted) relay = app_.getOPs().processTrustedProposal(peerPos); else - relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster(); + relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster(); if (relay) { @@ -3509,7 +3527,7 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) std::vector rawNodes; for (int i = 0; i < m->nodeids_size() && - ledgerData.nodes_size() < Tuning::maxReplyNodes; + ledgerData.nodes_size() < Tuning::softMaxReplyNodes; ++i) { auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))}; diff --git a/src/ripple/overlay/impl/Tuning.h b/src/ripple/overlay/impl/Tuning.h index a23d482f2..706c2459b 100644 --- a/src/ripple/overlay/impl/Tuning.h +++ b/src/ripple/overlay/impl/Tuning.h @@ -35,9 +35,11 @@ enum { consider it diverged */ divergedLedgerLimit = 128, - /** The maximum number of ledger entries in a single - reply */ - maxReplyNodes = 8192, + /** The soft cap on the number of ledger entries in a single reply. */ + softMaxReplyNodes = 8192, + + /** The hard cap on the number of ledger entries in a single reply. */ + hardMaxReplyNodes = 12288, /** How many timer intervals a sendq has to stay large before we disconnect */ diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 246ff61bb..cbfe2c8a0 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,10 +33,14 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.8.0-b7-hooks" +char const* const versionString = "1.8.2" // clang-format on +"-hooks" #if defined(DEBUG) || defined(SANITIZER) +#ifdef GIT_COMMIT_HASH + "-" GIT_COMMIT_HASH +#endif "+" #ifdef DEBUG "DEBUG" diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 50df60a8e..4cca64acf 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -286,6 +286,7 @@ JSS(hotwallet); // in: GatewayBalances JSS(id); // websocket. JSS(ident); // in: AccountCurrencies, AccountInfo, // OwnerInfo +JSS(ignore_default); // in: AccountLines JSS(inLedger); // out: tx/Transaction JSS(inbound); // out: PeerImp JSS(index); // in: LedgerEntry, DownloadShard diff --git a/src/ripple/resource/Consumer.h b/src/ripple/resource/Consumer.h index 4ef916a50..34fb02ee6 100644 --- a/src/ripple/resource/Consumer.h +++ b/src/ripple/resource/Consumer.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_RESOURCE_CONSUMER_H_INCLUDED #define RIPPLE_RESOURCE_CONSUMER_H_INCLUDED +#include #include #include @@ -76,7 +77,7 @@ public: /** Returns `true` if the consumer should be disconnected. */ bool - disconnect(); + disconnect(beast::Journal const& j); /** Returns the credit balance representing consumption. */ int diff --git a/src/ripple/resource/impl/Consumer.cpp b/src/ripple/resource/impl/Consumer.cpp index fc4e35535..34edbbfcc 100644 --- a/src/ripple/resource/impl/Consumer.cpp +++ b/src/ripple/resource/impl/Consumer.cpp @@ -114,10 +114,15 @@ Consumer::warn() } bool -Consumer::disconnect() +Consumer::disconnect(beast::Journal const& j) { assert(m_entry != nullptr); - return m_logic->disconnect(*m_entry); + bool const d = m_logic->disconnect(*m_entry); + if (d) + { + JLOG(j.debug()) << "disconnecting " << m_entry->to_string(); + } + return d; } int diff --git a/src/ripple/rpc/handlers/AccountChannels.cpp b/src/ripple/rpc/handlers/AccountChannels.cpp index d222e4c72..cc79173e5 100644 --- a/src/ripple/rpc/handlers/AccountChannels.cpp +++ b/src/ripple/rpc/handlers/AccountChannels.cpp @@ -101,6 +101,9 @@ doAccountChannels(RPC::JsonContext& context) if (auto err = readLimitField(limit, RPC::Tuning::accountChannels, context)) return *err; + if (limit == 0u) + return rpcError(rpcINVALID_PARAMS); + Json::Value jsonChannels{Json::arrayValue}; struct VisitData { @@ -110,71 +113,93 @@ doAccountChannels(RPC::JsonContext& context) AccountID const& raDstAccount; }; VisitData visitData = {{}, accountID, hasDst, raDstAccount}; - visitData.items.reserve(limit + 1); - uint256 startAfter; - std::uint64_t startHint; + visitData.items.reserve(limit); + uint256 startAfter = beast::zero; + std::uint64_t startHint = 0; if (params.isMember(jss::marker)) { - Json::Value const& marker(params[jss::marker]); - - if (!marker.isString()) + if (!params[jss::marker].isString()) return RPC::expected_field_error(jss::marker, "string"); - if (!startAfter.parseHex(marker.asString())) + // Marker is composed of a comma separated index and start hint. The + // former will be read as hex, and the latter using boost lexical cast. + std::stringstream marker(params[jss::marker].asString()); + std::string value; + if (!std::getline(marker, value, ',')) + return rpcError(rpcINVALID_PARAMS); + + if (!startAfter.parseHex(value)) + return rpcError(rpcINVALID_PARAMS); + + if (!std::getline(marker, value, ',')) + return rpcError(rpcINVALID_PARAMS); + + try + { + startHint = boost::lexical_cast(value); + } + catch (boost::bad_lexical_cast&) { return rpcError(rpcINVALID_PARAMS); } - auto const sleChannel = ledger->read({ltPAYCHAN, startAfter}); + // We then must check if the object pointed to by the marker is actually + // owned by the account in the request. + auto const sle = ledger->read({ltANY, startAfter}); - if (!sleChannel) + if (!sle) return rpcError(rpcINVALID_PARAMS); - if (!visitData.hasDst || - visitData.raDstAccount == (*sleChannel)[sfDestination]) - { - visitData.items.emplace_back(sleChannel); - startHint = sleChannel->getFieldU64(sfOwnerNode); - } - else - { + if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) return rpcError(rpcINVALID_PARAMS); - } - } - else - { - startHint = 0; } + auto count = 0; + std::optional marker = {}; + std::uint64_t nextHint = 0; if (!forEachItemAfter( *ledger, accountID, startAfter, startHint, - limit - visitData.items.size() + 1, - [&visitData, &accountID](std::shared_ptr const& sleCur) { - if (sleCur && sleCur->getType() == ltPAYCHAN && + limit + 1, + [&visitData, &accountID, &count, &limit, &marker, &nextHint]( + std::shared_ptr const& sleCur) { + if (!sleCur) + { + assert(false); + return false; + } + + if (++count == limit) + { + marker = sleCur->key(); + nextHint = RPC::getStartHint(sleCur, visitData.accountID); + } + + if (count <= limit && sleCur->getType() == ltPAYCHAN && (*sleCur)[sfAccount] == accountID && (!visitData.hasDst || visitData.raDstAccount == (*sleCur)[sfDestination])) { visitData.items.emplace_back(sleCur); - return true; } - return false; + return true; })) { return rpcError(rpcINVALID_PARAMS); } - if (visitData.items.size() == limit + 1) + // Both conditions need to be checked because marker is set on the limit-th + // item, but if there is no item on the limit + 1 iteration, then there is + // no need to return a marker. + if (count == limit + 1 && marker) { result[jss::limit] = limit; - - result[jss::marker] = to_string(visitData.items.back()->key()); - visitData.items.pop_back(); + result[jss::marker] = + to_string(*marker) + "," + std::to_string(nextHint); } result[jss::account] = context.app.accountIDCache().toBase58(accountID); diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index 2e1dac1d7..1044dcc72 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -36,6 +36,10 @@ struct VisitData AccountID const& accountID; bool hasPeer; AccountID const& raPeerAccount; + + bool ignoreDefault; + uint32_t foundCount; + RippleState::pointer lastFound; }; void @@ -78,6 +82,8 @@ addLine(Json::Value& jsonLines, RippleState const& line) // ledger_index : // limit: integer // optional // marker: opaque // optional, resume previous query +// ignore_default: bool // do not return lines in default state (on +// this account's side) // } Json::Value doAccountLines(RPC::JsonContext& context) @@ -124,83 +130,124 @@ doAccountLines(RPC::JsonContext& context) if (auto err = readLimitField(limit, RPC::Tuning::accountLines, context)) return *err; + if (limit == 0) + return rpcError(rpcINVALID_PARAMS); + + // this flag allows the requester to ask incoming trustlines in default + // state be omitted + bool ignoreDefault = params.isMember(jss::ignore_default) && + params[jss::ignore_default].asBool(); + Json::Value& jsonLines(result[jss::lines] = Json::arrayValue); - VisitData visitData = {{}, accountID, hasPeer, raPeerAccount}; - unsigned int reserve(limit); - uint256 startAfter; - std::uint64_t startHint; + VisitData visitData = { + {}, accountID, hasPeer, raPeerAccount, ignoreDefault, 0, nullptr}; + uint256 startAfter = beast::zero; + std::uint64_t startHint = 0; if (params.isMember(jss::marker)) { - // We have a start point. Use limit - 1 from the result and use the - // very last one for the resume. - Json::Value const& marker(params[jss::marker]); - - if (!marker.isString()) + if (!params[jss::marker].isString()) return RPC::expected_field_error(jss::marker, "string"); - if (!startAfter.parseHex(marker.asString())) + // Marker is composed of a comma separated index and start hint. The + // former will be read as hex, and the latter using boost lexical cast. + std::stringstream marker(params[jss::marker].asString()); + std::string value; + if (!std::getline(marker, value, ',')) return rpcError(rpcINVALID_PARAMS); - auto const sleLine = ledger->read({ltRIPPLE_STATE, startAfter}); - - if (!sleLine) + if (!startAfter.parseHex(value)) return rpcError(rpcINVALID_PARAMS); - if (sleLine->getFieldAmount(sfLowLimit).getIssuer() == accountID) - startHint = sleLine->getFieldU64(sfLowNode); - else if (sleLine->getFieldAmount(sfHighLimit).getIssuer() == accountID) - startHint = sleLine->getFieldU64(sfHighNode); - else + if (!std::getline(marker, value, ',')) return rpcError(rpcINVALID_PARAMS); - // Caller provided the first line (startAfter), add it as first result - auto const line = RippleState::makeItem(accountID, sleLine); - if (line == nullptr) + try + { + startHint = boost::lexical_cast(value); + } + catch (boost::bad_lexical_cast&) + { + return rpcError(rpcINVALID_PARAMS); + } + + // We then must check if the object pointed to by the marker is actually + // owned by the account in the request. + auto const sle = ledger->read({ltANY, startAfter}); + + if (!sle) return rpcError(rpcINVALID_PARAMS); - addLine(jsonLines, *line); - visitData.items.reserve(reserve); - } - else - { - startHint = 0; - // We have no start point, limit should be one higher than requested. - visitData.items.reserve(++reserve); + if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) + return rpcError(rpcINVALID_PARAMS); } + auto count = 0; + std::optional marker = {}; + std::uint64_t nextHint = 0; { if (!forEachItemAfter( *ledger, accountID, startAfter, startHint, - reserve, - [&visitData](std::shared_ptr const& sleCur) { - auto const line = - RippleState::makeItem(visitData.accountID, sleCur); - if (line != nullptr && - (!visitData.hasPeer || - visitData.raPeerAccount == line->getAccountIDPeer())) + limit + 1, + [&visitData, &count, &marker, &limit, &nextHint]( + std::shared_ptr const& sleCur) { + bool ignore = false; + if (visitData.ignoreDefault) { - visitData.items.emplace_back(line); - return true; + if (sleCur->getFieldAmount(sfLowLimit).getIssuer() == + visitData.accountID) + ignore = + !(sleCur->getFieldU32(sfFlags) & lsfLowReserve); + else + ignore = !( + sleCur->getFieldU32(sfFlags) & lsfHighReserve); } - return false; + if (!sleCur) + { + assert(false); + return false; + } + + if (++count == limit) + { + marker = sleCur->key(); + nextHint = + RPC::getStartHint(sleCur, visitData.accountID); + } + + if (!ignore && count <= limit) + { + auto const line = + RippleState::makeItem(visitData.accountID, sleCur); + + if (line != nullptr && + (!visitData.hasPeer || + visitData.raPeerAccount == + line->getAccountIDPeer())) + { + visitData.items.emplace_back(line); + } + } + + return true; })) { return rpcError(rpcINVALID_PARAMS); } } - if (visitData.items.size() == reserve) + // Both conditions need to be checked because marker is set on the limit-th + // item, but if there is no item on the limit + 1 iteration, then there is + // no need to return a marker. + if (count == limit + 1 && marker) { result[jss::limit] = limit; - - RippleState::pointer line(visitData.items.back()); - result[jss::marker] = to_string(line->key()); - visitData.items.pop_back(); + result[jss::marker] = + to_string(*marker) + "," + std::to_string(nextHint); } result[jss::account] = context.app.accountIDCache().toBase58(accountID); diff --git a/src/ripple/rpc/handlers/AccountOffers.cpp b/src/ripple/rpc/handlers/AccountOffers.cpp index 063d08743..d31787563 100644 --- a/src/ripple/rpc/handlers/AccountOffers.cpp +++ b/src/ripple/rpc/handlers/AccountOffers.cpp @@ -86,68 +86,94 @@ doAccountOffers(RPC::JsonContext& context) if (auto err = readLimitField(limit, RPC::Tuning::accountOffers, context)) return *err; + if (limit == 0) + return rpcError(rpcINVALID_PARAMS); + Json::Value& jsonOffers(result[jss::offers] = Json::arrayValue); std::vector> offers; - unsigned int reserve(limit); - uint256 startAfter; - std::uint64_t startHint; + uint256 startAfter = beast::zero; + std::uint64_t startHint = 0; if (params.isMember(jss::marker)) { - // We have a start point. Use limit - 1 from the result and use the - // very last one for the resume. - Json::Value const& marker(params[jss::marker]); - - if (!marker.isString()) + if (!params[jss::marker].isString()) return RPC::expected_field_error(jss::marker, "string"); - if (!startAfter.parseHex(marker.asString())) + // Marker is composed of a comma separated index and start hint. The + // former will be read as hex, and the latter using boost lexical cast. + std::stringstream marker(params[jss::marker].asString()); + std::string value; + if (!std::getline(marker, value, ',')) return rpcError(rpcINVALID_PARAMS); - auto const sleOffer = ledger->read({ltOFFER, startAfter}); + if (!startAfter.parseHex(value)) + return rpcError(rpcINVALID_PARAMS); - if (!sleOffer || accountID != sleOffer->getAccountID(sfAccount)) + if (!std::getline(marker, value, ',')) + return rpcError(rpcINVALID_PARAMS); + + try + { + startHint = boost::lexical_cast(value); + } + catch (boost::bad_lexical_cast&) { return rpcError(rpcINVALID_PARAMS); } - startHint = sleOffer->getFieldU64(sfOwnerNode); - // Caller provided the first offer (startAfter), add it as first result - appendOfferJson(sleOffer, jsonOffers); - offers.reserve(reserve); - } - else - { - startHint = 0; - // We have no start point, limit should be one higher than requested. - offers.reserve(++reserve); + // We then must check if the object pointed to by the marker is actually + // owned by the account in the request. + auto const sle = ledger->read({ltANY, startAfter}); + + if (!sle) + return rpcError(rpcINVALID_PARAMS); + + if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) + return rpcError(rpcINVALID_PARAMS); } + auto count = 0; + std::optional marker = {}; + std::uint64_t nextHint = 0; if (!forEachItemAfter( *ledger, accountID, startAfter, startHint, - reserve, - [&offers](std::shared_ptr const& offer) { - if (offer->getType() == ltOFFER) + limit + 1, + [&offers, &count, &marker, &limit, &nextHint, &accountID]( + std::shared_ptr const& sle) { + if (!sle) { - offers.emplace_back(offer); - return true; + assert(false); + return false; } - return false; + if (++count == limit) + { + marker = sle->key(); + nextHint = RPC::getStartHint(sle, accountID); + } + + if (count <= limit && sle->getType() == ltOFFER) + { + offers.emplace_back(sle); + } + + return true; })) { return rpcError(rpcINVALID_PARAMS); } - if (offers.size() == reserve) + // Both conditions need to be checked because marker is set on the limit-th + // item, but if there is no item on the limit + 1 iteration, then there is + // no need to return a marker. + if (count == limit + 1 && marker) { result[jss::limit] = limit; - - result[jss::marker] = to_string(offers.back()->key()); - offers.pop_back(); + result[jss::marker] = + to_string(*marker) + "," + std::to_string(nextHint); } for (auto const& offer : offers) diff --git a/src/ripple/rpc/handlers/NodeToShard.cpp b/src/ripple/rpc/handlers/NodeToShard.cpp index ac250db18..fd48797cb 100644 --- a/src/ripple/rpc/handlers/NodeToShard.cpp +++ b/src/ripple/rpc/handlers/NodeToShard.cpp @@ -38,7 +38,7 @@ doNodeToShard(RPC::JsonContext& context) // Shard store must be enabled auto const shardStore = context.app.getShardStore(); if (!shardStore) - return rpcError(rpcINTERNAL, "No shard store"); + return RPC::make_error(rpcNOT_ENABLED); if (!context.params.isMember(jss::action)) return RPC::missing_field_error(jss::action); diff --git a/src/ripple/rpc/impl/RPCHandler.cpp b/src/ripple/rpc/impl/RPCHandler.cpp index 5430f3a6a..b04a6f0ed 100644 --- a/src/ripple/rpc/impl/RPCHandler.cpp +++ b/src/ripple/rpc/impl/RPCHandler.cpp @@ -129,12 +129,11 @@ fillHandler(JsonContext& context, Handler const*& result) { if (!isUnlimited(context.role)) { - // VFALCO NOTE Should we also add up the jtRPC jobs? - // - int jc = context.app.getJobQueue().getJobCountGE(jtCLIENT); - if (jc > Tuning::maxJobQueueClients) + // Count all jobs at jtCLIENT priority or higher. + int const jobCount = context.app.getJobQueue().getJobCountGE(jtCLIENT); + if (jobCount > Tuning::maxJobQueueClients) { - JLOG(context.j.debug()) << "Too busy for command: " << jc; + JLOG(context.j.debug()) << "Too busy for command: " << jobCount; return rpcTOO_BUSY; } } diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index be1d005a3..5c42aae96 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -89,6 +90,47 @@ accountFromString(AccountID& result, std::string const& strIdent, bool bStrict) return Json::objectValue; } +std::uint64_t +getStartHint(std::shared_ptr const& sle, AccountID const& accountID) +{ + if (sle->getType() == ltRIPPLE_STATE) + { + if (sle->getFieldAmount(sfLowLimit).getIssuer() == accountID) + return sle->getFieldU64(sfLowNode); + else if (sle->getFieldAmount(sfHighLimit).getIssuer() == accountID) + return sle->getFieldU64(sfHighNode); + } + + if (!sle->isFieldPresent(sfOwnerNode)) + return 0; + + return sle->getFieldU64(sfOwnerNode); +} + +bool +isOwnedByAccount( + ReadView const& ledger, + std::shared_ptr const& sle, + AccountID const& accountID) +{ + if (sle->getType() == ltRIPPLE_STATE) + { + return (sle->getFieldAmount(sfLowLimit).getIssuer() == accountID) || + (sle->getFieldAmount(sfHighLimit).getIssuer() == accountID); + } + else if (sle->isFieldPresent(sfAccount)) + { + return sle->getAccountID(sfAccount) == accountID; + } + else if (sle->getType() == ltSIGNER_LIST) + { + Keylet const accountSignerList = keylet::signers(accountID); + return sle->key() == accountSignerList.key; + } + + return false; +} + bool getAccountObjects( ReadView const& ledger, @@ -144,19 +186,19 @@ getAccountObjects( typeMatchesFilter(typeFilter.value(), sleNode->getType())) { jvObjects.append(sleNode->getJson(JsonOptions::none)); + } - if (++i == limit) + if (++i == limit) + { + if (++iter != entries.end()) { - if (++iter != entries.end()) - { - jvResult[jss::limit] = limit; - jvResult[jss::marker] = - to_string(dirIndex) + ',' + to_string(*iter); - return true; - } - - break; + jvResult[jss::limit] = limit; + jvResult[jss::marker] = + to_string(dirIndex) + ',' + to_string(*iter); + return true; } + + break; } } diff --git a/src/ripple/rpc/impl/RPCHelpers.h b/src/ripple/rpc/impl/RPCHelpers.h index 0ecdebe24..e3d44c2e7 100644 --- a/src/ripple/rpc/impl/RPCHelpers.h +++ b/src/ripple/rpc/impl/RPCHelpers.h @@ -71,6 +71,25 @@ accountFromStringWithCode( std::string const& strIdent, bool bStrict = false); +/** Gets the start hint for traversing account objects + * @param sle - Ledger entry defined by the marker passed into the RPC. + * @param accountID - The ID of the account whose objects you are traversing. + */ +std::uint64_t +getStartHint(std::shared_ptr const& sle, AccountID const& accountID); + +/** + * Tests if a SLE is owned by accountID. + * @param ledger - The ledger used to search for the sle. + * @param sle - The SLE to test for ownership. + * @param account - The account being tested for SLE ownership. + */ +bool +isOwnedByAccount( + ReadView const& ledger, + std::shared_ptr const& sle, + AccountID const& accountID); + /** Gathers all objects for an account in a ledger. @param ledger Ledger to search account objects. @param account AccountID to find objects for. diff --git a/src/ripple/rpc/impl/Role.cpp b/src/ripple/rpc/impl/Role.cpp index f3da98c3a..3bfc7d56a 100644 --- a/src/ripple/rpc/impl/Role.cpp +++ b/src/ripple/rpc/impl/Role.cpp @@ -122,18 +122,125 @@ requestInboundEndpoint( remoteAddress, role == Role::PROXY, forwardedFor); } +static boost::string_view +extractIpAddrFromField(boost::string_view field) +{ + // Lambda to trim leading and trailing spaces on the field. + auto trim = [](boost::string_view str) -> boost::string_view { + boost::string_view ret = str; + + // Only do the work if there's at least one leading space. + if (!ret.empty() && ret.front() == ' ') + { + std::size_t const firstNonSpace = ret.find_first_not_of(' '); + if (firstNonSpace == boost::string_view::npos) + // We know there's at least one leading space. So if we got + // npos, then it must be all spaces. Return empty string_view. + return {}; + + ret = ret.substr(firstNonSpace); + } + // Trim trailing spaces. + if (!ret.empty()) + { + // Only do the work if there's at least one trailing space. + if (unsigned char const c = ret.back(); + c == ' ' || c == '\r' || c == '\n') + { + std::size_t const lastNonSpace = ret.find_last_not_of(" \r\n"); + if (lastNonSpace == boost::string_view::npos) + // We know there's at least one leading space. So if we + // got npos, then it must be all spaces. + return {}; + + ret = ret.substr(0, lastNonSpace + 1); + } + } + return ret; + }; + + boost::string_view ret = trim(field); + if (ret.empty()) + return {}; + + // If there are surrounding quotes, strip them. + if (ret.front() == '"') + { + ret.remove_prefix(1); + if (ret.empty() || ret.back() != '"') + return {}; // Unbalanced double quotes. + + ret.remove_suffix(1); + + // Strip leading and trailing spaces that were inside the quotes. + ret = trim(ret); + } + if (ret.empty()) + return {}; + + // If we have an IPv6 or IPv6 (dual) address wrapped in square brackets, + // then we need to remove the square brackets. + if (ret.front() == '[') + { + // Remove leading '['. + ret.remove_prefix(1); + + // We may have an IPv6 address in square brackets. Scan up to the + // closing square bracket. + auto const closeBracket = + std::find_if_not(ret.begin(), ret.end(), [](unsigned char c) { + return std::isxdigit(c) || c == ':' || c == '.' || c == ' '; + }); + + // If the string does not close with a ']', then it's not valid IPv6 + // or IPv6 (dual). + if (closeBracket == ret.end() || (*closeBracket) != ']') + return {}; + + // Remove trailing ']' + ret = ret.substr(0, closeBracket - ret.begin()); + ret = trim(ret); + } + if (ret.empty()) + return {}; + + // If this is an IPv6 address (after unwrapping from square brackets), + // then there cannot be an appended port. In that case we're done. + { + // Skip any leading hex digits. + auto const colon = + std::find_if_not(ret.begin(), ret.end(), [](unsigned char c) { + return std::isxdigit(c) || c == ' '; + }); + + // If the string starts with optional hex digits followed by a colon + // it's an IVv6 address. We're done. + if (colon == ret.end() || (*colon) == ':') + return ret; + } + + // If there's a port appended to the IP address, strip that by + // terminating at the colon. + if (std::size_t colon = ret.find(':'); colon != boost::string_view::npos) + ret = ret.substr(0, colon); + + return ret; +} + boost::string_view forwardedFor(http_request_type const& request) { - auto it = request.find(boost::beast::http::field::forwarded); - if (it != request.end()) + // Look for the Forwarded field in the request. + if (auto it = request.find(boost::beast::http::field::forwarded); + it != request.end()) { auto ascii_tolower = [](char c) -> char { return ((static_cast(c) - 65U) < 26) ? c + 'a' - 'A' : c; }; + // Look for the first (case insensitive) "for=" static std::string const forStr{"for="}; - auto found = std::search( + char const* found = std::search( it->value().begin(), it->value().end(), forStr.begin(), @@ -146,22 +253,29 @@ forwardedFor(http_request_type const& request) return {}; found += forStr.size(); - std::size_t const pos([&]() { - std::size_t const pos{ - boost::string_view(found, it->value().end() - found).find(';')}; - if (pos == boost::string_view::npos) - return it->value().size() - forStr.size(); - return pos; - }()); - return *boost::beast::http::token_list(boost::string_view(found, pos)) - .begin(); + // We found a "for=". Scan for the end of the IP address. + std::size_t const pos = [&found, &it]() { + std::size_t pos = + boost::string_view(found, it->value().end() - found) + .find_first_of(",;"); + if (pos != boost::string_view::npos) + return pos; + + return it->value().size() - forStr.size(); + }(); + + return extractIpAddrFromField({found, pos}); } - it = request.find("X-Forwarded-For"); - if (it != request.end()) + // Look for the X-Forwarded-For field in the request. + if (auto it = request.find("X-Forwarded-For"); it != request.end()) { - return *boost::beast::http::token_list(it->value()).begin(); + // The first X-Forwarded-For entry may be terminated by a comma. + std::size_t found = it->value().find(','); + if (found == boost::string_view::npos) + found = it->value().length(); + return extractIpAddrFromField(it->value().substr(0, found)); } return {}; diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index b2f52e9a5..3aed45382 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -292,7 +292,7 @@ ServerHandlerImp::onRequest(Session& session) std::shared_ptr detachedSession = session.detach(); auto const postResult = m_jobQueue.postCoro( - jtCLIENT, + jtCLIENT_RPC, "RPC-Client", [this, detachedSession](std::shared_ptr coro) { processSession(detachedSession, coro); @@ -339,7 +339,7 @@ ServerHandlerImp::onWSMessage( JLOG(m_journal.trace()) << "Websocket received '" << jv << "'"; auto const postResult = m_jobQueue.postCoro( - jtCLIENT, + jtCLIENT_WEBSOCKET, "WS-Client", [this, session, jv = std::move(jv)]( std::shared_ptr const& coro) { @@ -377,6 +377,25 @@ ServerHandlerImp::onStopped(Server&) //------------------------------------------------------------------------------ +template +void +logDuration( + Json::Value const& request, + T const& duration, + beast::Journal& journal) +{ + using namespace std::chrono_literals; + auto const level = (duration >= 10s) + ? journal.error() + : (duration >= 1s) ? journal.warn() : journal.debug(); + + JLOG(level) << "RPC request processing duration = " + << std::chrono::duration_cast( + duration) + .count() + << " microseconds. request = " << request; +} + Json::Value ServerHandlerImp::processSession( std::shared_ptr const& session, @@ -384,7 +403,7 @@ ServerHandlerImp::processSession( Json::Value const& jv) { auto is = std::static_pointer_cast(session->appDefined); - if (is->getConsumer().disconnect()) + if (is->getConsumer().disconnect(m_journal)) { session->close( {boost::beast::websocket::policy_error, "threshold exceeded"}); @@ -458,7 +477,10 @@ ServerHandlerImp::processSession( jv, {is->user(), is->forwarded_for()}}; + auto start = std::chrono::system_clock::now(); RPC::doCommand(context, jr[jss::result]); + auto end = std::chrono::system_clock::now(); + logDuration(jv, end - start, m_journal); } } catch (std::exception const& ex) @@ -687,7 +709,7 @@ ServerHandlerImp::processRequest( { usage = m_resourceManager.newInboundEndpoint( remoteIPAddress, role == Role::PROXY, forwardedFor); - if (usage.disconnect()) + if (usage.disconnect(m_journal)) { if (!batch) { @@ -851,7 +873,11 @@ ServerHandlerImp::processRequest( params, {user, forwardedFor}}; Json::Value result; + auto start = std::chrono::system_clock::now(); RPC::doCommand(context, result); + auto end = std::chrono::system_clock::now(); + logDuration(params, end - start, m_journal); + usage.charge(loadType); if (usage.warn()) result[jss::warning] = jss::load; diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index 7ad8bcbba..c52c1b501 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -382,7 +382,7 @@ ShardArchiveHandler::next(std::lock_guard const& l) return onClosureFailed( "failed to wrap closure for starting download", l); - app_.getJobQueue().addJob(jtCLIENT, "ShardArchiveHandler", *wrapper); + app_.getJobQueue().addJob(jtCLIENT_SHARD, "ShardArchiveHandler", *wrapper); return true; } @@ -465,7 +465,7 @@ ShardArchiveHandler::complete(path dstPath) } // Process in another thread to not hold up the IO service - app_.getJobQueue().addJob(jtCLIENT, "ShardArchiveHandler", *wrapper); + app_.getJobQueue().addJob(jtCLIENT_SHARD, "ShardArchiveHandler", *wrapper); } void diff --git a/src/ripple/rpc/impl/Tuning.h b/src/ripple/rpc/impl/Tuning.h index f52c60f09..233e73794 100644 --- a/src/ripple/rpc/impl/Tuning.h +++ b/src/ripple/rpc/impl/Tuning.h @@ -46,7 +46,7 @@ static LimitRange constexpr accountObjects = {10, 200, 400}; static LimitRange constexpr accountOffers = {10, 200, 400}; /** Limits for the book_offers command. */ -static LimitRange constexpr bookOffers = {0, 300, 400}; +static LimitRange constexpr bookOffers = {0, 60, 100}; /** Limits for the no_ripple_check command. */ static LimitRange constexpr noRippleCheck = {10, 300, 400}; diff --git a/src/ripple/shamap/FullBelowCache.h b/src/ripple/shamap/FullBelowCache.h index ad051b1eb..6d809d3b9 100644 --- a/src/ripple/shamap/FullBelowCache.h +++ b/src/ripple/shamap/FullBelowCache.h @@ -21,8 +21,10 @@ #define RIPPLE_SHAMAP_FULLBELOWCACHE_H_INCLUDED #include +#include #include #include +#include #include #include @@ -33,17 +35,15 @@ namespace detail { /** Remembers which tree keys have all descendants resident. This optimizes the process of acquiring a complete tree. */ -template class BasicFullBelowCache { private: - using CacheType = KeyCache; + using CacheType = KeyCache; public: enum { defaultCacheTargetSize = 0 }; - using key_type = Key; - using size_type = typename CacheType::size_type; + using key_type = uint256; using clock_type = typename CacheType::clock_type; /** Construct the cache. @@ -56,11 +56,12 @@ public: BasicFullBelowCache( std::string const& name, clock_type& clock, + beast::Journal j, beast::insight::Collector::ptr const& collector = beast::insight::NullCollector::New(), std::size_t target_size = defaultCacheTargetSize, std::chrono::seconds expiration = std::chrono::minutes{2}) - : m_cache(name, clock, collector, target_size, expiration), m_gen(1) + : m_cache(name, target_size, expiration, clock, j, collector), m_gen(1) { } @@ -75,7 +76,7 @@ public: Thread safety: Safe to call from any thread. */ - size_type + std::size_t size() const { return m_cache.size(); @@ -138,13 +139,13 @@ public: } private: - KeyCache m_cache; + CacheType m_cache; std::atomic m_gen; }; } // namespace detail -using FullBelowCache = detail::BasicFullBelowCache; +using FullBelowCache = detail::BasicFullBelowCache; } // namespace ripple diff --git a/src/ripple/shamap/SHAMapTreeNode.h b/src/ripple/shamap/SHAMapTreeNode.h index bc3a0b0d8..8e351cce9 100644 --- a/src/ripple/shamap/SHAMapTreeNode.h +++ b/src/ripple/shamap/SHAMapTreeNode.h @@ -21,6 +21,7 @@ #define RIPPLE_SHAMAP_SHAMAPTREENODE_H_INCLUDED #include +#include #include #include #include @@ -42,88 +43,6 @@ static constexpr unsigned char const wireTypeInner = 2; static constexpr unsigned char const wireTypeCompressedInner = 3; static constexpr unsigned char const wireTypeTransactionWithMeta = 4; -// A SHAMapHash is the hash of a node in a SHAMap, and also the -// type of the hash of the entire SHAMap. - -class SHAMapHash -{ - uint256 hash_; - -public: - SHAMapHash() = default; - explicit SHAMapHash(uint256 const& hash) : hash_(hash) - { - } - - uint256 const& - as_uint256() const - { - return hash_; - } - uint256& - as_uint256() - { - return hash_; - } - bool - isZero() const - { - return hash_.isZero(); - } - bool - isNonZero() const - { - return hash_.isNonZero(); - } - int - signum() const - { - return hash_.signum(); - } - void - zero() - { - hash_.zero(); - } - - friend bool - operator==(SHAMapHash const& x, SHAMapHash const& y) - { - return x.hash_ == y.hash_; - } - - friend bool - operator<(SHAMapHash const& x, SHAMapHash const& y) - { - return x.hash_ < y.hash_; - } - - friend std::ostream& - operator<<(std::ostream& os, SHAMapHash const& x) - { - return os << x.hash_; - } - - friend std::string - to_string(SHAMapHash const& x) - { - return to_string(x.hash_); - } - - template - friend void - hash_append(H& h, SHAMapHash const& x) - { - hash_append(h, x.hash_); - } -}; - -inline bool -operator!=(SHAMapHash const& x, SHAMapHash const& y) -{ - return !(x == y); -} - enum class SHAMapNodeType { tnINNER = 1, tnTRANSACTION_NM = 2, // transaction, no metadata diff --git a/src/ripple/shamap/impl/NodeFamily.cpp b/src/ripple/shamap/impl/NodeFamily.cpp index f81702037..f9c6dedb2 100644 --- a/src/ripple/shamap/impl/NodeFamily.cpp +++ b/src/ripple/shamap/impl/NodeFamily.cpp @@ -31,6 +31,7 @@ NodeFamily::NodeFamily(Application& app, CollectorManager& cm) , fbCache_(std::make_shared( "Node family full below cache", stopwatch(), + app.journal("NodeFamilyFulLBelowCache"), cm.collector(), fullBelowTargetSize, fullBelowExpiration)) diff --git a/src/ripple/shamap/impl/SHAMapInnerNode.cpp b/src/ripple/shamap/impl/SHAMapInnerNode.cpp index 0ef0dd455..6a2a4504f 100644 --- a/src/ripple/shamap/impl/SHAMapInnerNode.cpp +++ b/src/ripple/shamap/impl/SHAMapInnerNode.cpp @@ -132,19 +132,21 @@ SHAMapInnerNode::makeFullInner( SHAMapHash const& hash, bool hashValid) { - if (data.size() != 512) + // A full inner node is serialized as 16 256-bit hashes, back to back: + if (data.size() != branchFactor * uint256::bytes) Throw("Invalid FI node"); auto ret = std::make_shared(0, branchFactor); - Serializer s(data.data(), data.size()); + SerialIter si(data); + + auto hashes = ret->hashesAndChildren_.getHashes(); - auto retHashes = ret->hashesAndChildren_.getHashes(); for (int i = 0; i < branchFactor; ++i) { - s.getBitString(retHashes[i].as_uint256(), i * 32); + hashes[i].as_uint256() = si.getBitString<256>(); - if (retHashes[i].isNonZero()) + if (hashes[i].isNonZero()) ret->isBranch_ |= (1 << i); } @@ -154,39 +156,43 @@ SHAMapInnerNode::makeFullInner( ret->hash_ = hash; else ret->updateHash(); + return ret; } std::shared_ptr SHAMapInnerNode::makeCompressedInner(Slice data) { - Serializer s(data.data(), data.size()); + // A compressed inner node is serialized as a series of 33 byte chunks, + // representing a one byte "position" and a 256-bit hash: + constexpr std::size_t chunkSize = uint256::bytes + 1; - int len = s.getLength(); + if (auto const s = data.size(); + (s % chunkSize != 0) || (s > chunkSize * branchFactor)) + Throw("Invalid CI node"); + + SerialIter si(data); auto ret = std::make_shared(0, branchFactor); - auto retHashes = ret->hashesAndChildren_.getHashes(); - for (int i = 0; i < (len / 33); ++i) + auto hashes = ret->hashesAndChildren_.getHashes(); + + while (!si.empty()) { - int pos; + auto const hash = si.getBitString<256>(); + auto const pos = si.get8(); - if (!s.get8(pos, 32 + (i * 33))) - Throw("short CI node"); - - if ((pos < 0) || (pos >= branchFactor)) + if (pos >= branchFactor) Throw("invalid CI node"); - s.getBitString(retHashes[pos].as_uint256(), i * 33); + hashes[pos].as_uint256() = hash; - if (retHashes[pos].isNonZero()) + if (hashes[pos].isNonZero()) ret->isBranch_ |= (1 << pos); } ret->resizeChildArrays(ret->getBranchCount()); - ret->updateHash(); - return ret; } diff --git a/src/ripple/shamap/impl/ShardFamily.cpp b/src/ripple/shamap/impl/ShardFamily.cpp index ee4a7c83c..eadfc42aa 100644 --- a/src/ripple/shamap/impl/ShardFamily.cpp +++ b/src/ripple/shamap/impl/ShardFamily.cpp @@ -55,6 +55,7 @@ ShardFamily::getFullBelowCache(std::uint32_t ledgerSeq) auto fbCache{std::make_shared( "Shard family full below cache shard " + std::to_string(shardIndex), stopwatch(), + j_, cm_.collector(), fullBelowTargetSize, fullBelowExpiration)}; diff --git a/src/test/app/HashRouter_test.cpp b/src/test/app/HashRouter_test.cpp index 9162fb9b1..96d14e824 100644 --- a/src/test/app/HashRouter_test.cpp +++ b/src/test/app/HashRouter_test.cpp @@ -31,7 +31,7 @@ class HashRouter_test : public beast::unit_test::suite { using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s, 2); + HashRouter router(stopwatch, 2s); uint256 const key1(1); uint256 const key2(2); @@ -68,7 +68,7 @@ class HashRouter_test : public beast::unit_test::suite { using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s, 2); + HashRouter router(stopwatch, 2s); uint256 const key1(1); uint256 const key2(2); @@ -146,7 +146,7 @@ class HashRouter_test : public beast::unit_test::suite // Normal HashRouter using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s, 2); + HashRouter router(stopwatch, 2s); uint256 const key1(1); uint256 const key2(2); @@ -174,7 +174,7 @@ class HashRouter_test : public beast::unit_test::suite { using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s, 2); + HashRouter router(stopwatch, 2s); uint256 const key1(1); BEAST_EXPECT(router.setFlags(key1, 10)); @@ -187,7 +187,7 @@ class HashRouter_test : public beast::unit_test::suite { using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 1s, 2); + HashRouter router(stopwatch, 1s); uint256 const key1(1); @@ -225,47 +225,12 @@ class HashRouter_test : public beast::unit_test::suite BEAST_EXPECT(peers && peers->size() == 0); } - void - testRecover() - { - using namespace std::chrono_literals; - TestStopwatch stopwatch; - HashRouter router(stopwatch, 1s, 5); - - uint256 const key1(1); - - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(!router.shouldRecover(key1)); - // Expire, but since the next search will - // be for this entry, it will get refreshed - // instead. - ++stopwatch; - BEAST_EXPECT(router.shouldRecover(key1)); - // Expire, but since the next search will - // be for this entry, it will get refreshed - // instead. - ++stopwatch; - // Recover again. Recovery is independent of - // time as long as the entry doesn't expire. - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(router.shouldRecover(key1)); - // Expire again - ++stopwatch; - BEAST_EXPECT(router.shouldRecover(key1)); - BEAST_EXPECT(!router.shouldRecover(key1)); - } - void testProcess() { using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 5s, 5); + HashRouter router(stopwatch, 5s); uint256 const key(1); HashRouter::PeerShortID peer = 1; int flags; @@ -286,7 +251,6 @@ public: testSuppression(); testSetFlags(); testRelay(); - testRecover(); testProcess(); } }; diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index 0e38de517..cf600a9fc 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -1033,8 +1033,7 @@ struct PayChan_test : public beast::unit_test::suite { // degenerate case auto const r = testLimit(env, alice, 0); - BEAST_EXPECT(r.isMember(jss::marker)); - BEAST_EXPECT(r[jss::channels].size() == 0); + BEAST_EXPECT(r.isMember(jss::error_message)); } } diff --git a/src/test/app/Transaction_ordering_test.cpp b/src/test/app/Transaction_ordering_test.cpp index d292c8906..0353df906 100644 --- a/src/test/app/Transaction_ordering_test.cpp +++ b/src/test/app/Transaction_ordering_test.cpp @@ -28,6 +28,7 @@ struct Transaction_ordering_test : public beast::unit_test::suite testCorrectOrder() { using namespace jtx; + testcase("Correct Order"); Env env(*this); auto const alice = Account("alice"); @@ -69,8 +70,13 @@ struct Transaction_ordering_test : public beast::unit_test::suite { using namespace jtx; - Env env(*this); - env.app().getJobQueue().setThreadCount(0, false); + testcase("Incorrect order"); + + Env env(*this, envconfig([](std::unique_ptr cfg) { + cfg->FORCE_MULTI_THREAD = false; + return cfg; + })); + auto const alice = Account("alice"); env.fund(XRP(1000), noripple(alice)); @@ -109,8 +115,13 @@ struct Transaction_ordering_test : public beast::unit_test::suite { using namespace jtx; - Env env(*this); - env.app().getJobQueue().setThreadCount(0, false); + testcase("Incorrect order multiple intermediaries"); + + Env env(*this, envconfig([](std::unique_ptr cfg) { + cfg->FORCE_MULTI_THREAD = true; + return cfg; + })); + auto const alice = Account("alice"); env.fund(XRP(1000), noripple(alice)); diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 3fdf00a50..2ade9e8e3 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -802,7 +802,7 @@ public: env(noop(charlie), fee(7000), queued); env(noop(daria), fee(7000), queued); env(noop(edgar), fee(7000), queued); - env(noop(felicia), fee(7000), queued); + env(noop(felicia), fee(6999), queued); checkMetrics(env, 6, 6, 4, 3, 257); env.close(); @@ -816,8 +816,8 @@ public: env(noop(daria), fee(8000), queued); env(noop(daria), fee(8000), seq(env.seq(daria) + 1), queued); env(noop(edgar), fee(8000), queued); - env(noop(felicia), fee(8000), queued); - env(noop(felicia), fee(8000), seq(env.seq(felicia) + 1), queued); + env(noop(felicia), fee(7999), queued); + env(noop(felicia), fee(7999), seq(env.seq(felicia) + 1), queued); checkMetrics(env, 8, 8, 5, 4, 257, 700 * 256); env.close(); @@ -1039,7 +1039,7 @@ public: checkMetrics(env, 0, initQueueMax, 4, 3, 256); // Alice - price starts exploding: held - env(noop(alice), queued); + env(noop(alice), fee(11), queued); checkMetrics(env, 1, initQueueMax, 4, 3, 256); auto aliceSeq = env.seq(alice); @@ -1088,7 +1088,7 @@ public: BEAST_EXPECT(env.seq(charlie) == charlieSeq + 1); // Alice - fill up the queue - std::int64_t aliceFee = 20; + std::int64_t aliceFee = 27; aliceSeq = env.seq(alice); auto lastLedgerSeq = env.current()->info().seq + 2; for (auto i = 0; i < 7; i++) @@ -1096,7 +1096,7 @@ public: env(noop(alice), seq(aliceSeq), json(jss::LastLedgerSequence, lastLedgerSeq + i), - fee(aliceFee), + fee(--aliceFee), queued); ++aliceSeq; } @@ -1104,17 +1104,18 @@ public: { auto& txQ = env.app().getTxQ(); auto aliceStat = txQ.getAccountTxs(alice.id(), *env.current()); - constexpr XRPAmount fee{20}; + aliceFee = 27; auto const& baseFee = env.current()->fees().base; auto seq = env.seq(alice); BEAST_EXPECT(aliceStat.size() == 7); for (auto const& tx : aliceStat) { BEAST_EXPECT(tx.seqProxy.isSeq() && tx.seqProxy.value() == seq); - BEAST_EXPECT(tx.feeLevel == toFeeLevel(fee, baseFee)); + BEAST_EXPECT( + tx.feeLevel == toFeeLevel(XRPAmount(--aliceFee), baseFee)); BEAST_EXPECT(tx.lastValid); BEAST_EXPECT( - (tx.consequences.fee() == drops(fee) && + (tx.consequences.fee() == drops(aliceFee) && tx.consequences.potentialSpend() == drops(0) && !tx.consequences.isBlocker()) || tx.seqProxy.value() == env.seq(alice) + 6); @@ -1141,13 +1142,13 @@ public: // Charlie - add another item to the queue, which // causes Alice's last txn to drop env(noop(charlie), fee(30), queued); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(env, 8, 8, 5, 4, 538); // Alice - now attempt to add one more to the queue, // which fails because the last tx was dropped, so // there is no complete chain. env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE)); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(env, 8, 8, 5, 4, 538); // Alice wants this tx more than the dropped tx, // so resubmits with higher fee, but the queue @@ -1156,22 +1157,22 @@ public: seq(aliceSeq - 1), fee(aliceFee), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(env, 8, 8, 5, 4, 538); // Try to replace a middle item in the queue // without enough fee. aliceSeq = env.seq(alice) + 2; - aliceFee = 25; + aliceFee = 29; env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(env, 8, 8, 5, 4, 538); // Replace a middle item from the queue successfully ++aliceFee; env(noop(alice), seq(aliceSeq), fee(aliceFee), queued); - checkMetrics(env, 8, 8, 5, 4, 513); + checkMetrics(env, 8, 8, 5, 4, 538); env.close(); // Alice's transactions processed, along with @@ -1186,7 +1187,7 @@ public: // more than the minimum reserve in flight before the // last queued transaction aliceFee = - env.le(alice)->getFieldAmount(sfBalance).xrp().drops() - (59); + env.le(alice)->getFieldAmount(sfBalance).xrp().drops() - (62); env(noop(alice), seq(aliceSeq), fee(aliceFee), @@ -1334,6 +1335,9 @@ public: auto hankSeq = env.seq(hank); // This time, use identical fees. + + // This one gets into the queue, but gets dropped when the + // higher fee one is added later. env(noop(alice), fee(15), queued); env(noop(bob), fee(15), queued); env(noop(charlie), fee(15), queued); @@ -1341,8 +1345,6 @@ public: env(noop(elmo), fee(15), queued); env(noop(fred), fee(15), queued); env(noop(gwen), fee(15), queued); - // This one gets into the queue, but gets dropped when the - // higher fee one is added later. env(noop(hank), fee(15), queued); // Queue is full now. Minimum fee now reflects the @@ -1362,9 +1364,9 @@ public: // Queue is still full. checkMetrics(env, 8, 8, 5, 4, 385); - // alice, bob, charlie, daria, and elmo's txs + // bob, charlie, daria, elmo, and fred's txs // are processed out of the queue into the ledger, - // leaving fred and gwen's txs. hank's tx is + // leaving fred and hank's txs. alice's tx is // retried from localTxs, and put back into the // queue. env.close(); @@ -1372,45 +1374,46 @@ public: BEAST_EXPECT(aliceSeq + 1 == env.seq(alice)); BEAST_EXPECT(bobSeq + 1 == env.seq(bob)); - BEAST_EXPECT(charlieSeq + 2 == env.seq(charlie)); + BEAST_EXPECT(charlieSeq == env.seq(charlie)); BEAST_EXPECT(dariaSeq + 1 == env.seq(daria)); - BEAST_EXPECT(elmoSeq + 1 == env.seq(elmo)); - BEAST_EXPECT(fredSeq == env.seq(fred)); - BEAST_EXPECT(gwenSeq == env.seq(gwen)); - BEAST_EXPECT(hankSeq == env.seq(hank)); + BEAST_EXPECT(elmoSeq == env.seq(elmo)); + BEAST_EXPECT(fredSeq + 1 == env.seq(fred)); + BEAST_EXPECT(gwenSeq + 1 == env.seq(gwen)); + BEAST_EXPECT(hankSeq + 1 == env.seq(hank)); aliceSeq = env.seq(alice); bobSeq = env.seq(bob); charlieSeq = env.seq(charlie); dariaSeq = env.seq(daria); elmoSeq = env.seq(elmo); + fredSeq = env.seq(fred); // Fill up the queue again - env(noop(alice), fee(15), queued); - env(noop(alice), seq(aliceSeq + 1), fee(15), queued); - env(noop(alice), seq(aliceSeq + 2), fee(15), queued); + env(noop(fred), fee(15), queued); + env(noop(fred), seq(fredSeq + 1), fee(15), queued); + env(noop(fred), seq(fredSeq + 2), fee(15), queued); env(noop(bob), fee(15), queued); - env(noop(charlie), fee(15), queued); + env(noop(charlie), seq(charlieSeq + 2), fee(15), queued); env(noop(daria), fee(15), queued); // This one gets into the queue, but gets dropped when the // higher fee one is added later. - env(noop(elmo), fee(15), queued); + env(noop(elmo), seq(elmoSeq + 1), fee(15), queued); checkMetrics(env, 10, 10, 6, 5, 385); // Add another transaction, with a higher fee, // Not high enough to get into the ledger, but high // enough to get into the queue (and kick somebody out) - env(noop(alice), fee(100), seq(aliceSeq + 3), queued); + env(noop(fred), fee(100), seq(fredSeq + 3), queued); env.close(); checkMetrics(env, 4, 12, 7, 6, 256); - BEAST_EXPECT(fredSeq + 1 == env.seq(fred)); + BEAST_EXPECT(fredSeq + 4 == env.seq(fred)); BEAST_EXPECT(gwenSeq + 1 == env.seq(gwen)); BEAST_EXPECT(hankSeq + 1 == env.seq(hank)); - BEAST_EXPECT(aliceSeq + 4 == env.seq(alice)); - BEAST_EXPECT(bobSeq == env.seq(bob)); - BEAST_EXPECT(charlieSeq == env.seq(charlie)); + BEAST_EXPECT(aliceSeq == env.seq(alice)); + BEAST_EXPECT(bobSeq + 1 == env.seq(bob)); + BEAST_EXPECT(charlieSeq + 2 == env.seq(charlie)); BEAST_EXPECT(dariaSeq == env.seq(daria)); BEAST_EXPECT(elmoSeq == env.seq(elmo)); } @@ -1466,6 +1469,7 @@ public: *this, makeConfig( {{"minimum_txn_in_ledger_standalone", "2"}, + {"minimum_txn_in_ledger", "5"}, {"target_txn_in_ledger", "4"}, {"maximum_txn_in_ledger", "5"}})); @@ -2767,7 +2771,7 @@ public: // we only see a reduction by 5. env.close(); checkMetrics(env, 9, 50, 6, 5, 256); - BEAST_EXPECT(env.seq(alice) == aliceSeq + 16); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 15); // Close ledger 7. That should remove 7 more of alice's transactions. env.close(); @@ -2775,7 +2779,7 @@ public: BEAST_EXPECT(env.seq(alice) == aliceSeq + 19); // Close one last ledger to see all of alice's transactions moved - // into the ledger. + // into the ledger, including the tickets env.close(); checkMetrics(env, 0, 70, 2, 7, 256); BEAST_EXPECT(env.seq(alice) == aliceSeq + 21); @@ -4130,7 +4134,7 @@ public: {{"minimum_txn_in_ledger_standalone", "1"}, {"ledgers_in_queue", "5"}, {"maximum_txn_per_account", "10"}}, - {{"account_reserve", "200"}, {"owner_reserve", "50"}}); + {{"account_reserve", "1000"}, {"owner_reserve", "50"}}); Env env(*this, std::move(cfg)); @@ -4184,14 +4188,16 @@ public: auto seqDaria = env.seq(daria); auto seqEllie = env.seq(ellie); auto seqFiona = env.seq(fiona); + // Use fees to guarantee order + int txFee{90}; for (int i = 0; i < 10; ++i) { - env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - env(noop(bob), seq(seqBob++), ter(terQUEUED)); - env(noop(carol), seq(seqCarol++), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), ter(terQUEUED)); - env(noop(fiona), seq(seqFiona++), ter(terQUEUED)); + env(noop(alice), seq(seqAlice++), fee(--txFee), ter(terQUEUED)); + env(noop(bob), seq(seqBob++), fee(--txFee), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--txFee), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--txFee), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--txFee), ter(terQUEUED)); + env(noop(fiona), seq(seqFiona++), fee(--txFee), ter(terQUEUED)); } std::size_t expectedInQueue = 60; checkMetrics( @@ -4283,8 +4289,8 @@ public: // We'll be using fees to control which entries leave the queue in // which order. There's no "lowFee" -- that's the default fee from // the unit test. - auto const medFee = drops(15); - auto const hiFee = drops(1000); + int const medFee = 100; + int const hiFee = 1000; auto cfg = makeConfig( {{"minimum_txn_in_ledger_standalone", "5"}, @@ -4314,12 +4320,14 @@ public: // will expire out soon. auto seqAlice = env.seq(alice); auto const seqSaveAlice = seqAlice; + int feeDrops = 40; env(noop(alice), seq(seqAlice++), + fee(--feeDrops), json(R"({"LastLedgerSequence": 7})"), ter(terQUEUED)); - env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - env(noop(alice), seq(seqAlice++), ter(terQUEUED)); + env(noop(alice), seq(seqAlice++), fee(--feeDrops), ter(terQUEUED)); + env(noop(alice), seq(seqAlice++), fee(--feeDrops), ter(terQUEUED)); BEAST_EXPECT(env.seq(alice) == seqSaveAlice); // Similarly for bob, but bob uses tickets in his transactions. @@ -4328,8 +4336,14 @@ public: ticket::use(bobTicketSeq + 0), json(R"({"LastLedgerSequence": 7})"), ter(terQUEUED)); - env(noop(bob), ticket::use(bobTicketSeq + 1), ter(terQUEUED)); - env(noop(bob), ticket::use(bobTicketSeq + 2), ter(terQUEUED)); + env(noop(bob), + ticket::use(bobTicketSeq + 1), + fee(--feeDrops), + ter(terQUEUED)); + env(noop(bob), + ticket::use(bobTicketSeq + 2), + fee(--feeDrops), + ter(terQUEUED)); // Fill the queue with higher fee transactions so alice's and // bob's transactions are stuck in the queue. @@ -4337,12 +4351,13 @@ public: auto seqDaria = env.seq(daria); auto seqEllie = env.seq(ellie); auto seqFiona = env.seq(fiona); + feeDrops = medFee; for (int i = 0; i < 7; ++i) { - env(noop(carol), seq(seqCarol++), fee(medFee), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), fee(medFee), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), fee(medFee), ter(terQUEUED)); - env(noop(fiona), seq(seqFiona++), fee(medFee), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); + env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } checkMetrics(env, 34, 50, 7, 6, 256); @@ -4350,24 +4365,26 @@ public: checkMetrics(env, 26, 50, 8, 7, 256); // Re-fill the queue so alice and bob stay stuck. + feeDrops = medFee; for (int i = 0; i < 3; ++i) { - env(noop(carol), seq(seqCarol++), fee(medFee), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), fee(medFee), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), fee(medFee), ter(terQUEUED)); - env(noop(fiona), seq(seqFiona++), fee(medFee), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); + env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } checkMetrics(env, 38, 50, 8, 7, 256); env.close(); checkMetrics(env, 29, 50, 9, 8, 256); // One more time... + feeDrops = medFee; for (int i = 0; i < 3; ++i) { - env(noop(carol), seq(seqCarol++), fee(medFee), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), fee(medFee), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), fee(medFee), ter(terQUEUED)); - env(noop(fiona), seq(seqFiona++), fee(medFee), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); + env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } checkMetrics(env, 41, 50, 9, 8, 256); env.close(); @@ -4382,16 +4399,17 @@ public: env(noop(alice), seq(seqAlice), fee(hiFee), ter(telCAN_NOT_QUEUE)); // Once again, fill the queue almost to the brim. + feeDrops = medFee; for (int i = 0; i < 4; ++i) { - env(noop(carol), seq(seqCarol++), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), ter(terQUEUED)); - env(noop(fiona), seq(seqFiona++), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); + env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - env(noop(carol), seq(seqCarol++), ter(terQUEUED)); - env(noop(daria), seq(seqDaria++), ter(terQUEUED)); - env(noop(ellie), seq(seqEllie++), ter(terQUEUED)); + env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); + env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); + env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); checkMetrics(env, 48, 50, 10, 9, 256); // Now induce a fee jump which should cause all the transactions @@ -4401,7 +4419,7 @@ public: // asynchronously lowered by LoadManager. Here we're just // pushing the local fee up really high and then hoping that we // outrace LoadManager undoing our work. - for (int i = 0; i < 10; ++i) + for (int i = 0; i < 30; ++i) env.app().getFeeTrack().raiseLocalFee(); // Now close the ledger, which will attempt to process alice's @@ -4442,7 +4460,7 @@ public: // Verify that there's a gap at the front of alice's queue by // queuing another low fee transaction into that spot. - env(noop(alice), seq(seqAlice++), ter(terQUEUED)); + env(noop(alice), seq(seqAlice++), fee(11), ter(terQUEUED)); // Verify that the first entry in alice's queue is still there // by trying to replace it and having that fail. @@ -4468,11 +4486,11 @@ public: // Verify that bob's first transaction was removed from the queue // by queueing another low fee transaction into that spot. - env(noop(bob), ticket::use(bobTicketSeq + 0), ter(terQUEUED)); + env(noop(bob), ticket::use(bobTicketSeq + 0), fee(12), ter(terQUEUED)); // Verify that bob's second transaction was removed from the queue // by queueing another low fee transaction into that spot. - env(noop(bob), ticket::use(bobTicketSeq + 1), ter(terQUEUED)); + env(noop(bob), ticket::use(bobTicketSeq + 1), fee(11), ter(terQUEUED)); // Verify that the last entry in bob's queue is still there // by trying to replace it and having that fail. diff --git a/src/test/basics/KeyCache_test.cpp b/src/test/basics/KeyCache_test.cpp index c3ee03595..7f3f13e27 100644 --- a/src/test/basics/KeyCache_test.cpp +++ b/src/test/basics/KeyCache_test.cpp @@ -17,10 +17,13 @@ */ //============================================================================== -#include +#include #include #include #include +#include +#include +#include namespace ripple { @@ -35,32 +38,31 @@ public: clock.set(0); using Key = std::string; - using Cache = KeyCache; + using Cache = TaggedCache; + + test::SuiteJournal j("KeyCacheTest", *this); // Insert an item, retrieve it, and age it so it gets purged. { - Cache c("test", clock, 1, 2s); + Cache c("test", LedgerIndex(1), 2s, clock, j); BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.insert("one")); BEAST_EXPECT(!c.insert("one")); BEAST_EXPECT(c.size() == 1); - BEAST_EXPECT(c.exists("one")); BEAST_EXPECT(c.touch_if_exists("one")); ++clock; c.sweep(); BEAST_EXPECT(c.size() == 1); - BEAST_EXPECT(c.exists("one")); ++clock; c.sweep(); BEAST_EXPECT(c.size() == 0); - BEAST_EXPECT(!c.exists("one")); BEAST_EXPECT(!c.touch_if_exists("one")); } // Insert two items, have one expire { - Cache c("test", clock, 2, 2s); + Cache c("test", LedgerIndex(2), 2s, clock, j); BEAST_EXPECT(c.insert("one")); BEAST_EXPECT(c.size() == 1); @@ -73,12 +75,11 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.size() == 1); - BEAST_EXPECT(c.exists("two")); } // Insert three items (1 over limit), sweep { - Cache c("test", clock, 2, 3s); + Cache c("test", LedgerIndex(2), 3s, clock, j); BEAST_EXPECT(c.insert("one")); ++clock; diff --git a/src/test/basics/TaggedCache_test.cpp b/src/test/basics/TaggedCache_test.cpp index 9eb2d3cb5..6a5b44299 100644 --- a/src/test/basics/TaggedCache_test.cpp +++ b/src/test/basics/TaggedCache_test.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include namespace ripple { @@ -48,7 +49,7 @@ public: TestStopwatch clock; clock.set(0); - using Key = int; + using Key = LedgerIndex; using Value = std::string; using Cache = TaggedCache; diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index beb43421c..79de1fc80 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -21,9 +21,9 @@ #include #include #include -#include - #include +#include +#include #include #include #include @@ -703,6 +703,7 @@ class Validations_test : public beast::unit_test::suite { // Verify expiring clears out validations stored by ledger testcase("Expire validations"); + SuiteJournal j("Validations_test", *this); LedgerHistoryHelper h; TestHarness harness(h.oracle); Node const a = harness.makeNode(); @@ -713,10 +714,10 @@ class Validations_test : public beast::unit_test::suite Ledger const ledgerA = h["a"]; BEAST_EXPECT(ValStatus::current == harness.add(a.validate(ledgerA))); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 1); - harness.vals().expire(); + harness.vals().expire(j); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 1); harness.clock().advance(harness.parms().validationSET_EXPIRES); - harness.vals().expire(); + harness.vals().expire(j); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 0); // use setSeqToKeep to keep the validation from expire @@ -725,7 +726,7 @@ class Validations_test : public beast::unit_test::suite BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 1); harness.vals().setSeqToKeep(ledgerB.seq(), ledgerB.seq() + one); harness.clock().advance(harness.parms().validationSET_EXPIRES); - harness.vals().expire(); + harness.vals().expire(j); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 1); // change toKeep harness.vals().setSeqToKeep(ledgerB.seq() + one, ledgerB.seq() + two); @@ -736,7 +737,7 @@ class Validations_test : public beast::unit_test::suite for (int i = 0; i < loops; ++i) { harness.clock().advance(harness.parms().validationFRESHNESS); - harness.vals().expire(); + harness.vals().expire(j); } BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 0); @@ -746,7 +747,7 @@ class Validations_test : public beast::unit_test::suite BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerC.id()) == 1); harness.vals().setSeqToKeep(ledgerC.seq() - one, ledgerC.seq()); harness.clock().advance(harness.parms().validationSET_EXPIRES); - harness.vals().expire(); + harness.vals().expire(j); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerC.id()) == 0); } diff --git a/src/test/core/Coroutine_test.cpp b/src/test/core/Coroutine_test.cpp index 2abac6574..944602784 100644 --- a/src/test/core/Coroutine_test.cpp +++ b/src/test/core/Coroutine_test.cpp @@ -63,17 +63,23 @@ public: { using namespace std::chrono_literals; using namespace jtx; - Env env(*this); - auto& jq = env.app().getJobQueue(); - jq.setThreadCount(0, false); + + testcase("correct order"); + + Env env(*this, envconfig([](std::unique_ptr cfg) { + cfg->FORCE_MULTI_THREAD = true; + return cfg; + })); + gate g1, g2; std::shared_ptr c; - jq.postCoro(jtCLIENT, "Coroutine-Test", [&](auto const& cr) { - c = cr; - g1.signal(); - c->yield(); - g2.signal(); - }); + env.app().getJobQueue().postCoro( + jtCLIENT, "Coroutine-Test", [&](auto const& cr) { + c = cr; + g1.signal(); + c->yield(); + g2.signal(); + }); BEAST_EXPECT(g1.wait_for(5s)); c->join(); c->post(); @@ -85,15 +91,21 @@ public: { using namespace std::chrono_literals; using namespace jtx; - Env env(*this); - auto& jq = env.app().getJobQueue(); - jq.setThreadCount(0, false); + + testcase("incorrect order"); + + Env env(*this, envconfig([](std::unique_ptr cfg) { + cfg->FORCE_MULTI_THREAD = true; + return cfg; + })); + gate g; - jq.postCoro(jtCLIENT, "Coroutine-Test", [&](auto const& c) { - c->post(); - c->yield(); - g.signal(); - }); + env.app().getJobQueue().postCoro( + jtCLIENT, "Coroutine-Test", [&](auto const& c) { + c->post(); + c->yield(); + g.signal(); + }); BEAST_EXPECT(g.wait_for(5s)); } @@ -102,9 +114,12 @@ public: { using namespace std::chrono_literals; using namespace jtx; + + testcase("thread specific storage"); Env env(*this); + auto& jq = env.app().getJobQueue(); - jq.setThreadCount(0, true); + static int const N = 4; std::array, N> a; diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 2f3ce1931..3a61b853c 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -919,7 +919,7 @@ struct Peer start() { // TODO: Expire validations less frequently? - validations.expire(); + validations.expire(j); scheduler.in(parms().ledgerGRANULARITY, [&]() { timerEntry(); }); startRound(); } diff --git a/src/test/resource/Logic_test.cpp b/src/test/resource/Logic_test.cpp index 719cce620..25379370f 100644 --- a/src/test/resource/Logic_test.cpp +++ b/src/test/resource/Logic_test.cpp @@ -146,7 +146,7 @@ public: if (c.charge(fee) == drop) { // Disconnect abusive Consumer - BEAST_EXPECT(c.disconnect() == limited); + BEAST_EXPECT(c.disconnect(j) == limited); break; } ++logic.clock(); diff --git a/src/test/rpc/AccountLinesRPC_test.cpp b/src/test/rpc/AccountLinesRPC_test.cpp index e7dbe5593..bdd376b3a 100644 --- a/src/test/rpc/AccountLinesRPC_test.cpp +++ b/src/test/rpc/AccountLinesRPC_test.cpp @@ -320,7 +320,7 @@ public: "account_lines", R"({"account": ")" + alice.human() + R"(", )" - R"("limit": 1, )" + R"("limit": 10, )" R"("peer": ")" + gw2.human() + R"("})"); auto const& line = lines[jss::result][jss::lines][0u]; @@ -363,6 +363,91 @@ public: } } + void + testAccountLinesMarker() + { + testcase("Entry pointed to by marker is not owned by account"); + using namespace test::jtx; + Env env(*this); + + // The goal of this test is observe account_lines RPC calls return an + // error message when the SLE pointed to by the marker is not owned by + // the Account being traversed. + // + // To start, we'll create an environment with some trust lines, offers + // and a signers list. + Account const alice{"alice"}; + Account const becky{"becky"}; + Account const gw1{"gw1"}; + env.fund(XRP(10000), alice, becky, gw1); + env.close(); + + // Give alice a SignerList. + Account const bogie{"bogie"}; + env(signers(alice, 2, {{bogie, 3}})); + env.close(); + + auto const EUR = gw1["EUR"]; + env(trust(alice, EUR(200))); + env(trust(becky, EUR(200))); + env.close(); + + // Get all account objects for alice and verify that her + // signerlist is first. This is only a (reliable) coincidence of + // object naming. So if any of alice's objects are renamed this + // may fail. + Json::Value const aliceObjects = env.rpc( + "json", + "account_objects", + R"({"account": ")" + alice.human() + + R"(", )" + R"("limit": 10})"); + Json::Value const& aliceSignerList = + aliceObjects[jss::result][jss::account_objects][0u]; + if (!(aliceSignerList[sfLedgerEntryType.jsonName] == jss::SignerList)) + { + fail( + "alice's account objects are misordered. " + "Please reorder the objects so the SignerList is first.", + __FILE__, + __LINE__); + return; + } + + // Get account_lines for alice. Limit at 1, so we get a marker + // pointing to her SignerList. + auto const aliceLines1 = env.rpc( + "json", + "account_lines", + R"({"account": ")" + alice.human() + R"(", "limit": 1})"); + BEAST_EXPECT(aliceLines1[jss::result].isMember(jss::marker)); + + // Verify that the marker points at the signer list. + std::string const aliceMarker = + aliceLines1[jss::result][jss::marker].asString(); + std::string const markerIndex = + aliceMarker.substr(0, aliceMarker.find(',')); + BEAST_EXPECT(markerIndex == aliceSignerList[jss::index].asString()); + + // When we fetch Alice's remaining lines we should find one and no more. + auto const aliceLines2 = env.rpc( + "json", + "account_lines", + R"({"account": ")" + alice.human() + R"(", "marker": ")" + + aliceMarker + R"("})"); + BEAST_EXPECT(aliceLines2[jss::result][jss::lines].size() == 1); + BEAST_EXPECT(!aliceLines2[jss::result].isMember(jss::marker)); + + // Get account lines for beckys account, using alices SignerList as a + // marker. This should cause an error. + auto const beckyLines = env.rpc( + "json", + "account_lines", + R"({"account": ")" + becky.human() + R"(", "marker": ")" + + aliceMarker + R"("})"); + BEAST_EXPECT(beckyLines[jss::result].isMember(jss::error_message)); + } + void testAccountLineDelete() { @@ -390,8 +475,10 @@ public: env.close(); auto const USD = gw1["USD"]; + auto const AUD = gw1["AUD"]; auto const EUR = gw2["EUR"]; env(trust(alice, USD(200))); + env(trust(alice, AUD(200))); env(trust(becky, EUR(200))); env(trust(cheri, EUR(200))); env.close(); @@ -414,7 +501,7 @@ public: "account_lines", R"({"account": ")" + alice.human() + R"(", )" - R"("limit": 1})"); + R"("limit": 2})"); BEAST_EXPECT( linesBeg[jss::result][jss::lines][0u][jss::currency] == "USD"); BEAST_EXPECT(linesBeg[jss::result].isMember(jss::marker)); @@ -966,7 +1053,7 @@ public: R"({"account": ")" + alice.human() + R"(", )" - R"("limit": 1, )" + R"("limit": 10, )" R"("peer": ")" + gw2.human() + R"("}})"); auto const& line = lines[jss::result][jss::lines][0u]; @@ -1068,8 +1155,10 @@ public: env.close(); auto const USD = gw1["USD"]; + auto const AUD = gw1["AUD"]; auto const EUR = gw2["EUR"]; env(trust(alice, USD(200))); + env(trust(alice, AUD(200))); env(trust(becky, EUR(200))); env(trust(cheri, EUR(200))); env.close(); @@ -1098,7 +1187,7 @@ public: R"({"account": ")" + alice.human() + R"(", )" - R"("limit": 1}})"); + R"("limit": 2}})"); BEAST_EXPECT( linesBeg[jss::result][jss::lines][0u][jss::currency] == "USD"); BEAST_EXPECT(linesBeg[jss::result].isMember(jss::marker)); @@ -1143,6 +1232,7 @@ public: run() override { testAccountLines(); + testAccountLinesMarker(); testAccountLineDelete(); testAccountLines2(); testAccountLineDelete2(); diff --git a/src/test/rpc/AccountOffers_test.cpp b/src/test/rpc/AccountOffers_test.cpp index 8871315aa..f4ad0a725 100644 --- a/src/test/rpc/AccountOffers_test.cpp +++ b/src/test/rpc/AccountOffers_test.cpp @@ -81,7 +81,8 @@ public: "json", "account_offers", jvParams.toStyledString())[jss::result]; auto const& jro_l = jrr_l[jss::offers]; BEAST_EXPECT(checkMarker(jrr_l)); - BEAST_EXPECT(checkArraySize(jro_l, 10u)); + // 9u is the expected size, since one account object is a trustline + BEAST_EXPECT(checkArraySize(jro_l, 9u)); } void @@ -173,6 +174,7 @@ public: // last item...with previous marker passed jvParams[jss::marker] = jrr_l_2[jss::marker]; + jvParams[jss::limit] = 10u; auto const jrr_l_3 = env.rpc( "json", "account_offers", @@ -203,9 +205,17 @@ public: "account_offers", jvParams.toStyledString())[jss::result]; auto const& jro = jrr[jss::offers]; - BEAST_EXPECT(checkArraySize(jro, asAdmin ? 0u : 3u)); - BEAST_EXPECT( - asAdmin ? checkMarker(jrr) : (!jrr.isMember(jss::marker))); + if (asAdmin) + { + // limit == 0 is invalid + BEAST_EXPECT(jrr.isMember(jss::error_message)); + } + else + { + // Call should enforce min limit of 10 + BEAST_EXPECT(checkArraySize(jro, 3u)); + BEAST_EXPECT(!jrr.isMember(jss::marker)); + } } } diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 13a2f9824..df8bebfac 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -1538,21 +1538,37 @@ class LedgerRPC_test : public beast::unit_test::suite env.close(); jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; - std::string txid1; - std::string txid2; - if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) - { - auto const& txj = jrr[jss::queue_data][0u]; - BEAST_EXPECT(txj[jss::account] == alice.human()); - BEAST_EXPECT(txj[jss::fee_level] == "256"); - BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); - BEAST_EXPECT(txj["retries_remaining"] == 10); - BEAST_EXPECT(txj.isMember(jss::tx)); - auto const& tx = txj[jss::tx]; - BEAST_EXPECT(tx[jss::Account] == alice.human()); - BEAST_EXPECT(tx[jss::TransactionType] == jss::OfferCreate); - txid1 = tx[jss::hash].asString(); - } + const std::string txid1 = [&]() { + if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) + { + const std::string txid0 = [&]() { + auto const& txj = jrr[jss::queue_data][0u]; + BEAST_EXPECT(txj[jss::account] == alice.human()); + BEAST_EXPECT(txj[jss::fee_level] == "256"); + BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); + BEAST_EXPECT(txj["retries_remaining"] == 10); + BEAST_EXPECT(txj.isMember(jss::tx)); + auto const& tx = txj[jss::tx]; + BEAST_EXPECT(tx[jss::Account] == alice.human()); + BEAST_EXPECT(tx[jss::TransactionType] == jss::AccountSet); + return tx[jss::hash].asString(); + }(); + + auto const& txj = jrr[jss::queue_data][1u]; + BEAST_EXPECT(txj[jss::account] == alice.human()); + BEAST_EXPECT(txj[jss::fee_level] == "256"); + BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); + BEAST_EXPECT(txj["retries_remaining"] == 10); + BEAST_EXPECT(txj.isMember(jss::tx)); + auto const& tx = txj[jss::tx]; + BEAST_EXPECT(tx[jss::Account] == alice.human()); + BEAST_EXPECT(tx[jss::TransactionType] == jss::OfferCreate); + const auto txid1 = tx[jss::hash].asString(); + BEAST_EXPECT(txid0 < txid1); + return txid1; + } + return std::string{}; + }(); env.close(); @@ -1561,7 +1577,15 @@ class LedgerRPC_test : public beast::unit_test::suite jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { - auto const& txj = jrr[jss::queue_data][0u]; + auto const txid0 = [&]() { + auto const& txj = jrr[jss::queue_data][0u]; + BEAST_EXPECT(txj[jss::account] == alice.human()); + BEAST_EXPECT(txj[jss::fee_level] == "256"); + BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); + BEAST_EXPECT(txj.isMember(jss::tx)); + return txj[jss::tx].asString(); + }(); + auto const& txj = jrr[jss::queue_data][1u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); @@ -1569,6 +1593,7 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); BEAST_EXPECT(txj.isMember(jss::tx)); BEAST_EXPECT(txj[jss::tx] == txid1); + BEAST_EXPECT(txid0 < txid1); } env.close(); @@ -1579,7 +1604,7 @@ class LedgerRPC_test : public beast::unit_test::suite jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { - auto const& txj = jrr[jss::queue_data][0u]; + auto const& txj = jrr[jss::queue_data][1u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); @@ -1588,7 +1613,7 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(txj.isMember(jss::tx)); BEAST_EXPECT(txj[jss::tx].isMember(jss::tx_blob)); - auto const& txj2 = jrr[jss::queue_data][1u]; + auto const& txj2 = jrr[jss::queue_data][0u]; BEAST_EXPECT(txj2[jss::account] == alice.human()); BEAST_EXPECT(txj2[jss::fee_level] == "256"); BEAST_EXPECT(txj2["preflight_result"] == "tesSUCCESS"); @@ -1607,18 +1632,21 @@ class LedgerRPC_test : public beast::unit_test::suite jv[jss::binary] = false; jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; - if (BEAST_EXPECT(jrr[jss::queue_data].size() == 1)) - { - auto const& txj = jrr[jss::queue_data][0u]; - BEAST_EXPECT(txj[jss::account] == alice.human()); - BEAST_EXPECT(txj[jss::fee_level] == "256"); - BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); - BEAST_EXPECT(txj["retries_remaining"] == 1); - BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); - BEAST_EXPECT(txj.isMember(jss::tx)); - BEAST_EXPECT(txj[jss::tx] != txid1); - txid2 = txj[jss::tx].asString(); - } + const std::string txid2 = [&]() { + if (BEAST_EXPECT(jrr[jss::queue_data].size() == 1)) + { + auto const& txj = jrr[jss::queue_data][0u]; + BEAST_EXPECT(txj[jss::account] == alice.human()); + BEAST_EXPECT(txj[jss::fee_level] == "256"); + BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); + BEAST_EXPECT(txj["retries_remaining"] == 1); + BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); + BEAST_EXPECT(txj.isMember(jss::tx)); + BEAST_EXPECT(txj[jss::tx] != txid1); + return txj[jss::tx].asString(); + } + return std::string{}; + }(); jv[jss::full] = true; diff --git a/src/test/rpc/NodeToShardRPC_test.cpp b/src/test/rpc/NodeToShardRPC_test.cpp index 64e089b0b..edfaf6c20 100644 --- a/src/test/rpc/NodeToShardRPC_test.cpp +++ b/src/test/rpc/NodeToShardRPC_test.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -52,6 +53,76 @@ class NodeToShardRPC_test : public beast::unit_test::suite } public: + void + testDisabled() + { + testcase("Disabled"); + + beast::temp_dir tempDir; + + jtx::Env env = [&] { + auto c = jtx::envconfig(); + auto& sectionNode = c->section(ConfigSection::nodeDatabase()); + sectionNode.set("earliest_seq", "257"); + sectionNode.set("ledgers_per_shard", "256"); + c->setupControl(true, true, true); + + return jtx::Env(*this, std::move(c)); + }(); + + std::uint8_t const numberOfShards = 10; + + // Create some ledgers so that we can initiate a + // shard store database import. + for (int i = 0; i < 256 * (numberOfShards + 1); ++i) + { + env.close(); + } + + { + auto shardStore = env.app().getShardStore(); + if (!BEAST_EXPECT(!shardStore)) + return; + } + + { + // Try the node_to_shard status RPC command. Should fail. + + Json::Value jvParams; + jvParams[jss::action] = "status"; + + auto const result = env.rpc( + "json", "node_to_shard", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); + } + + { + // Try to start a shard store import via the RPC + // interface. Should fail. + + Json::Value jvParams; + jvParams[jss::action] = "start"; + + auto const result = env.rpc( + "json", "node_to_shard", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); + } + + { + // Try the node_to_shard status RPC command. Should fail. + + Json::Value jvParams; + jvParams[jss::action] = "status"; + + auto const result = env.rpc( + "json", "node_to_shard", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); + } + } + void testStart() { @@ -321,6 +392,7 @@ public: void run() override { + testDisabled(); testStart(); testStop(); } diff --git a/src/test/rpc/Roles_test.cpp b/src/test/rpc/Roles_test.cpp index 2f2465ef0..a56120740 100644 --- a/src/test/rpc/Roles_test.cpp +++ b/src/test/rpc/Roles_test.cpp @@ -20,9 +20,12 @@ #include #include #include -#include #include #include + +#include + +#include #include namespace ripple { @@ -31,6 +34,14 @@ namespace test { class Roles_test : public beast::unit_test::suite { + bool + isValidIpAddress(std::string const& addr) + { + boost::system::error_code ec; + boost::asio::ip::make_address(addr, ec); + return !ec.failed(); + } + void testRoles() { @@ -63,31 +74,65 @@ class Roles_test : public beast::unit_test::suite !wsRes.isMember("unlimited") || !wsRes["unlimited"].asBool()); std::unordered_map headers; + Json::Value rpcRes; + + // IPv4 tests. headers["X-Forwarded-For"] = "12.34.56.78"; - auto rpcRes = env.rpc(headers, "ping")["result"]; + rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["role"] == "proxied"); BEAST_EXPECT(rpcRes["ip"] == "12.34.56.78"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); headers["X-Forwarded-For"] = "87.65.43.21, 44.33.22.11"; rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["ip"] == "87.65.43.21"); - headers.erase("X-Forwarded-For"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + headers["X-Forwarded-For"] = "87.65.43.21:47011, 44.33.22.11"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["ip"] == "87.65.43.21"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers = {}; headers["Forwarded"] = "for=88.77.66.55"; rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["ip"] == "88.77.66.55"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); headers["Forwarded"] = "what=where;for=55.66.77.88;for=nobody;" "who=3"; rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["ip"] == "55.66.77.88"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); headers["Forwarded"] = - "what=where;for=55.66.77.88, 99.00.11.22;" + "what=where; for=55.66.77.88, for=99.00.11.22;" "who=3"; rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["ip"] == "55.66.77.88"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "what=where; For=99.88.77.66, for=55.66.77.88;" + "who=3"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["ip"] == "99.88.77.66"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "what=where; for=\"55.66.77.88:47011\";" + "who=3"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["ip"] == "55.66.77.88"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "what=where; For= \" 99.88.77.66 \" ,for=11.22.33.44;" + "who=3"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["ip"] == "99.88.77.66"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); wsRes = makeWSClient(env.app().config(), true, 2, headers) ->invoke("ping")["result"]; @@ -99,10 +144,218 @@ class Roles_test : public beast::unit_test::suite rpcRes = env.rpc(headers, "ping")["result"]; BEAST_EXPECT(rpcRes["role"] == "identified"); BEAST_EXPECT(rpcRes["username"] == name); - BEAST_EXPECT(rpcRes["ip"] == "55.66.77.88"); + BEAST_EXPECT(rpcRes["ip"] == "99.88.77.66"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); wsRes = makeWSClient(env.app().config(), true, 2, headers) ->invoke("ping")["result"]; BEAST_EXPECT(wsRes["unlimited"].asBool()); + + // IPv6 tests. + headers = {}; + headers["X-Forwarded-For"] = + "2001:db8:3333:4444:5555:6666:7777:8888"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:7777:8888"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "2001:db8:3333:4444:5555:6666:7777:9999, a:b:c:d:e:f, " + "g:h:i:j:k:l"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:7777:9999"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "[2001:db8:3333:4444:5555:6666:7777:8888]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:7777:8888"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "[2001:db8:3333:4444:5555:6666:7777:9999], [a:b:c:d:e:f], " + "[g:h:i:j:k:l]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:7777:9999"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers = {}; + headers["Forwarded"] = + "for=\"[2001:db8:3333:4444:5555:6666:7777:aaaa]\""; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:7777:aaaa"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "For=\"[2001:db8:bb:cc:dd:ee:ff::]:2345\", for=99.00.11.22"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT(rpcRes["ip"] == "2001:db8:bb:cc:dd:ee:ff::"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "proto=http;FOR=\"[2001:db8:11:22:33:44:55:66]\"" + ";by=203.0.113.43"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT(rpcRes["ip"] == "2001:db8:11:22:33:44:55:66"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + // IPv6 (dual) tests. + headers = {}; + headers["X-Forwarded-For"] = "2001:db8:3333:4444:5555:6666:1.2.3.4"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:1.2.3.4"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "2001:db8:3333:4444:5555:6666:5.6.7.8, a:b:c:d:e:f, " + "g:h:i:j:k:l"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:5.6.7.8"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "[2001:db8:3333:4444:5555:6666:9.10.11.12]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:9.10.11.12"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["X-Forwarded-For"] = + "[2001:db8:3333:4444:5555:6666:13.14.15.16], [a:b:c:d:e:f], " + "[g:h:i:j:k:l]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:13.14.15.16"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers = {}; + headers["Forwarded"] = + "for=\"[2001:db8:3333:4444:5555:6666:20.19.18.17]\""; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT( + rpcRes["ip"] == "2001:db8:3333:4444:5555:6666:20.19.18.17"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "For=\"[2001:db8:bb:cc::24.23.22.21]\", for=99.00.11.22"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT(rpcRes["ip"] == "2001:db8:bb:cc::24.23.22.21"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + + headers["Forwarded"] = + "proto=http;FOR=\"[::11:22:33:44:45.55.65.75]:234\"" + ";by=203.0.113.43"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "proxied"); + BEAST_EXPECT(rpcRes["ip"] == "::11:22:33:44:45.55.65.75"); + BEAST_EXPECT(isValidIpAddress(rpcRes["ip"].asString())); + } + } + + void + testInvalidIpAddresses() + { + using namespace test::jtx; + + { + Env env(*this); + + std::unordered_map headers; + Json::Value rpcRes; + + // No "for=" in Forwarded. + headers["Forwarded"] = "for 88.77.66.55"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers["Forwarded"] = "by=88.77.66.55"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + // Empty field. + headers = {}; + headers["Forwarded"] = "for="; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers = {}; + headers["X-Forwarded-For"] = " "; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + // Empty quotes. + headers = {}; + headers["Forwarded"] = "for= \" \" "; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers = {}; + headers["X-Forwarded-For"] = "\"\""; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + // Unbalanced outer quotes. + headers = {}; + headers["X-Forwarded-For"] = "\"12.34.56.78 "; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers["X-Forwarded-For"] = "12.34.56.78\""; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + // Unbalanced square brackets for IPv6. + headers = {}; + headers["Forwarded"] = "FOR=[2001:db8:bb:cc::"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers = {}; + headers["X-Forwarded-For"] = "2001:db8:bb:cc::24.23.22.21]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + // Empty square brackets. + headers = {}; + headers["Forwarded"] = "FOR=[]"; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); + + headers = {}; + headers["X-Forwarded-For"] = "\" [ ] \""; + rpcRes = env.rpc(headers, "ping")["result"]; + BEAST_EXPECT(rpcRes["role"] == "admin"); + BEAST_EXPECT(!rpcRes.isMember("ip")); } } @@ -111,6 +364,7 @@ public: run() override { testRoles(); + testInvalidIpAddresses(); } }; diff --git a/src/test/shamap/common.h b/src/test/shamap/common.h index 1f6b924e0..c4238b2a6 100644 --- a/src/test/shamap/common.h +++ b/src/test/shamap/common.h @@ -46,7 +46,8 @@ public: TestNodeFamily(beast::Journal j) : fbCache_(std::make_shared( "App family full below cache", - clock_)) + clock_, + j)) , tnCache_(std::make_shared( "App family tree node cache", 65536,