Compare commits

...

12 Commits

Author SHA1 Message Date
Michael Legleux
e7aa31632e chore: Build voidstar on amd64 only 2026-03-04 21:39:25 -08:00
Michael Legleux
c2ff57c472 fix: Fix docs deployment for pull requests 2026-03-04 21:32:52 -08:00
Michael Legleux
77518394e8 fix: Stop committing generated docs to prevent repo bloat (#6474) 2026-03-04 19:19:57 -08:00
Ayaz Salikhov
c69091bded chore: Add Git information compile-time info to only one file (#6464)
The existing code added the git commit info (`GIT_COMMIT_HASH` and `GIT_BRANCH`) to every file, which was a problem for leveraging `ccache` to cache build objects. This change adds a separate C++ file from where these compile-time variables are propagated to wherever they are needed. A new CMake file is added to set the commit info if the `git` binary is available.
2026-03-04 19:45:28 +00:00
Alex Kremer
595f0dd461 chore: Enable clang-tidy bugprone-sizeof-expression check (#6466) 2026-03-04 19:15:22 +00:00
Alex Kremer
b451d5e412 chore: Enable clang-tidy bugprone-return-const-ref-from-parameter check (#6459) 2026-03-04 18:10:10 +00:00
Alex Kremer
af97df5a63 chore: Enable clang-tidy bugprone-move-forwarding-reference check (#6457) 2026-03-04 17:03:27 +00:00
Peter Chen
e39954d128 fix: Gateway balance with MPT (#6143)
When `gateway_balances` gets called on an account that is involved in the `EscrowCreate` transaction (with MPT being escrowed), the method returns internal error. This change fixes this case by excluding the MPT type when totaling escrow amount.
2026-03-04 15:50:51 +00:00
tequ
3cd1e3d94e refactor: Update PermissionedDomainDelete to use keylet for sle access (#6063) 2026-03-04 04:11:58 +01:00
Ayaz Salikhov
fcec31ed20 chore: Update pre-commit hooks (#6460) 2026-03-03 20:23:22 +00:00
dependabot[bot]
0abd762781 ci: [DEPENDABOT] bump actions/upload-artifact from 6.0.0 to 7.0.0 (#6450) 2026-03-03 17:17:08 +00:00
Sergey Kuznetsov
5300e65686 tests: Improve stability of Subscribe tests (#6420)
The `Subscribe` tests were flaky, because each test performs some operations (e.g. sends transactions) and waits for messages to appear in subscription with a 100ms timeout. If tests are slow (e.g. compiled in debug mode or a slow machine) then some of them could fail. This change adds an attempt to synchronize the background Env's thread and the test's thread by ensuring that all the scheduled operations are started before the test's thread starts to wait for a websocket message. This is done by limiting I/O threads of the app inside Env to 1 and adding a synchronization barrier after closing the ledger.
2026-03-03 08:46:55 -05:00
36 changed files with 366 additions and 161 deletions

View File

@@ -24,6 +24,7 @@ Checks: "-*,
bugprone-misplaced-operator-in-strlen-in-alloc,
bugprone-misplaced-pointer-arithmetic-in-alloc,
bugprone-misplaced-widening-cast,
bugprone-move-forwarding-reference,
bugprone-multi-level-implicit-pointer-conversion,
bugprone-multiple-new-in-one-expression,
bugprone-multiple-statement-macro,
@@ -32,10 +33,12 @@ Checks: "-*,
bugprone-parent-virtual-call,
bugprone-posix-return,
bugprone-redundant-branch-condition,
bugprone-return-const-ref-from-parameter,
bugprone-shared-ptr-array-mismatch,
bugprone-signal-handler,
bugprone-signed-char-misuse,
bugprone-sizeof-container,
bugprone-sizeof-expression,
bugprone-spuriously-wake-up-functions,
bugprone-standalone-empty,
bugprone-string-constructor,
@@ -82,16 +85,14 @@ Checks: "-*,
performance-trivially-destructible
"
# ---
# checks that have some issues that need to be resolved:
# more checks that have some issues that need to be resolved:
#
# bugprone-crtp-constructor-accessibility,
# bugprone-inc-dec-in-conditions,
# bugprone-reserved-identifier,
# bugprone-move-forwarding-reference,
# bugprone-unused-local-non-trivial-variable,
# bugprone-return-const-ref-from-parameter,
# bugprone-switch-missing-default-case,
# bugprone-sizeof-expression,
# bugprone-suspicious-stringview-data-usage,
# bugprone-suspicious-missing-comma,
# bugprone-pointer-arithmetic-on-polymorphic-object,

View File

@@ -55,7 +55,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
# fee to 500.
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
# coverage (which will be done below).
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
# - Bookworm using Clang 16: Debug on linux/amd64, enable voidstar.
# - Bookworm using Clang 17: Release on linux/amd64, set the
# reference fee to 1000.
# - Bookworm using Clang 20: Debug on linux/amd64.
@@ -78,7 +78,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
and build_type == "Debug"
and architecture["platform"] == "linux/arm64"
and architecture["platform"] == "linux/amd64"
):
cmake_args = f"-Dvoidstar=ON {cmake_args}"
skip = False

View File

@@ -40,15 +40,18 @@ env:
NPROC_SUBTRACT: ${{ github.event.repository.private && '1' || '2' }}
jobs:
publish:
build:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false
- name: Get number of processors
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
id: nproc
@@ -78,9 +81,23 @@ jobs:
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC}
- name: Publish documentation
- name: Create documentation artifact
if: ${{ github.event_name == 'push' }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ${{ env.BUILD_DIR }}/docs/html
path: ${{ env.BUILD_DIR }}/docs/html
deploy:
if: ${{ github.event_name == 'push' }}
needs: build
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deploy.outputs.page_url }}
steps:
- name: Deploy to GitHub Pages
id: deploy
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5

View File

@@ -177,7 +177,7 @@ jobs:
- name: Upload the binary (Linux)
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: xrpld-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/xrpld

View File

@@ -84,7 +84,7 @@ jobs:
- name: Upload clang-tidy output
if: steps.run_clang_tidy.outcome != 'success'
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clang-tidy-results
path: clang-tidy-output.txt

View File

@@ -20,7 +20,7 @@ repos:
args: [--assume-in-merge]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8
rev: cd481d7b0bfb5c7b3090c21846317f9a8262e891 # frozen: v22.1.0
hooks:
- id: clang-format
args: [--style=file]
@@ -33,17 +33,17 @@ repos:
additional_dependencies: [PyYAML]
- repo: https://github.com/rbubley/mirrors-prettier
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
rev: c2bc67fe8f8f549cc489e00ba8b45aa18ee713b1 # frozen: v3.8.1
hooks:
- id: prettier
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: v25.12.0
rev: ea488cebbfd88a5f50b8bd95d5c829d0bb76feb8 # frozen: 26.1.0
hooks:
- id: black
- repo: https://github.com/streetsidesoftware/cspell-cli
rev: 1cfa010f078c354f3ffb8413616280cc28f5ba21 # frozen: v9.4.0
rev: a42085ade523f591dca134379a595e7859986445 # frozen: v9.7.0
hooks:
- id: cspell # Spell check changed files
exclude: .config/cspell.config.yaml

View File

@@ -36,26 +36,6 @@ endif ()
# Enable ccache to speed up builds.
include(Ccache)
# make GIT_COMMIT_HASH define available to all sources
find_package(Git)
if (Git_FOUND)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse
HEAD OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
if (gch)
set(GIT_COMMIT_HASH "${gch}")
message(STATUS gch: ${GIT_COMMIT_HASH})
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
endif ()
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse
--abbrev-ref HEAD OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
if (gb)
set(GIT_BRANCH "${gb}")
message(STATUS gb: ${GIT_BRANCH})
add_definitions(-DGIT_BRANCH="${GIT_BRANCH}")
endif ()
endif () # git
if (thread_safety_analysis)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS
-DXRPL_ENABLE_THREAD_SAFETY_ANNOTATIONS)

21
cmake/GitInfo.cmake Normal file
View File

@@ -0,0 +1,21 @@
include_guard()
set(GIT_BUILD_BRANCH "")
set(GIT_COMMIT_HASH "")
find_package(Git)
if (NOT Git_FOUND)
message(WARNING "Git not found. Git branch and commit hash will be empty.")
return()
endif ()
set(GIT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/.git)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${GIT_DIRECTORY} rev-parse --abbrev-ref HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE GIT_BUILD_BRANCH)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${GIT_DIRECTORY} rev-parse HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE GIT_COMMIT_HASH)
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")

View File

@@ -58,6 +58,12 @@ include(target_link_modules)
add_module(xrpl beast)
target_link_libraries(xrpl.libxrpl.beast PUBLIC xrpl.imports.main)
include(GitInfo)
add_module(xrpl git)
target_compile_definitions(xrpl.libxrpl.git PRIVATE GIT_COMMIT_HASH="${GIT_COMMIT_HASH}"
GIT_BUILD_BRANCH="${GIT_BUILD_BRANCH}")
target_link_libraries(xrpl.libxrpl.git PUBLIC xrpl.imports.main)
# Level 02
add_module(xrpl basics)
target_link_libraries(xrpl.libxrpl.basics PUBLIC xrpl.libxrpl.beast)
@@ -71,7 +77,8 @@ target_link_libraries(xrpl.libxrpl.crypto PUBLIC xrpl.libxrpl.basics)
# Level 04
add_module(xrpl protocol)
target_link_libraries(xrpl.libxrpl.protocol PUBLIC xrpl.libxrpl.crypto xrpl.libxrpl.json)
target_link_libraries(xrpl.libxrpl.protocol PUBLIC xrpl.libxrpl.crypto xrpl.libxrpl.git
xrpl.libxrpl.json)
# Level 05
add_module(xrpl core)
@@ -135,6 +142,7 @@ target_link_modules(
conditions
core
crypto
git
json
ledger
net

View File

@@ -23,6 +23,7 @@ install(TARGETS common
xrpl.libxrpl.conditions
xrpl.libxrpl.core
xrpl.libxrpl.crypto
xrpl.libxrpl.git
xrpl.libxrpl.json
xrpl.libxrpl.rdb
xrpl.libxrpl.ledger

View File

@@ -1,6 +1,7 @@
#pragma once
#include <string>
#include <string_view>
#include <vector>
namespace beast {
@@ -26,14 +27,14 @@ public:
SemanticVersion();
SemanticVersion(std::string const& version);
SemanticVersion(std::string_view version);
/** Parse a semantic version string.
The parsing is as strict as possible.
@return `true` if the string was parsed.
*/
bool
parse(std::string const& input);
parse(std::string_view input);
/** Produce a string from semantic version components. */
std::string

13
include/xrpl/git/Git.h Normal file
View File

@@ -0,0 +1,13 @@
#pragma once
#include <string>
namespace xrpl::git {
std::string const&
getCommitHash();
std::string const&
getBuildBranch();
} // namespace xrpl::git

View File

@@ -23,13 +23,13 @@ public:
static constexpr size_t initialBufferSize = kilobytes(256);
RawStateTable()
: monotonic_resource_{std::make_unique<boost::container::pmr::monotonic_buffer_resource>(
initialBufferSize)}
: monotonic_resource_{
std::make_unique<boost::container::pmr::monotonic_buffer_resource>(initialBufferSize)}
, items_{monotonic_resource_.get()} {};
RawStateTable(RawStateTable const& rhs)
: monotonic_resource_{std::make_unique<boost::container::pmr::monotonic_buffer_resource>(
initialBufferSize)}
: monotonic_resource_{
std::make_unique<boost::container::pmr::monotonic_buffer_resource>(initialBufferSize)}
, items_{rhs.items_, monotonic_resource_.get()}
, dropsDestroyed_{rhs.dropsDestroyed_} {};

View File

@@ -49,7 +49,7 @@ getFullVersionString();
@return the encoded version in a 64-bit integer
*/
std::uint64_t
encodeSoftwareVersion(char const* const versionStr);
encodeSoftwareVersion(std::string_view versionStr);
/** Returns this server's version packed in a 64-bit integer. */
std::uint64_t

View File

@@ -138,14 +138,14 @@ SemanticVersion::SemanticVersion() : majorVersion(0), minorVersion(0), patchVers
{
}
SemanticVersion::SemanticVersion(std::string const& version) : SemanticVersion()
SemanticVersion::SemanticVersion(std::string_view version) : SemanticVersion()
{
if (!parse(version))
throw std::invalid_argument("invalid version string");
}
bool
SemanticVersion::parse(std::string const& input)
SemanticVersion::parse(std::string_view input)
{
// May not have leading or trailing whitespace
auto left_iter = std::find_if_not(input.begin(), input.end(), [](std::string::value_type c) {

31
src/libxrpl/git/Git.cpp Normal file
View File

@@ -0,0 +1,31 @@
#include "xrpl/git/Git.h"
#include <string>
#ifndef GIT_COMMIT_HASH
#error "GIT_COMMIT_HASH must be defined"
#endif
#ifndef GIT_BUILD_BRANCH
#error "GIT_BUILD_BRANCH must be defined"
#endif
namespace xrpl::git {
static constexpr char kGIT_COMMIT_HASH[] = GIT_COMMIT_HASH;
static constexpr char kGIT_BUILD_BRANCH[] = GIT_BUILD_BRANCH;
std::string const&
getCommitHash()
{
static std::string const kVALUE = kGIT_COMMIT_HASH;
return kVALUE;
}
std::string const&
getBuildBranch()
{
static std::string const kVALUE = kGIT_BUILD_BRANCH;
return kVALUE;
}
} // namespace xrpl::git

View File

@@ -72,8 +72,8 @@ OpenView::OpenView(
ReadView const* base,
Rules const& rules,
std::shared_ptr<void const> hold)
: monotonic_resource_{std::make_unique<boost::container::pmr::monotonic_buffer_resource>(
initialBufferSize)}
: monotonic_resource_{
std::make_unique<boost::container::pmr::monotonic_buffer_resource>(initialBufferSize)}
, txs_{monotonic_resource_.get()}
, rules_(rules)
, header_(base->header())
@@ -88,8 +88,8 @@ OpenView::OpenView(
}
OpenView::OpenView(ReadView const* base, std::shared_ptr<void const> hold)
: monotonic_resource_{std::make_unique<boost::container::pmr::monotonic_buffer_resource>(
initialBufferSize)}
: monotonic_resource_{
std::make_unique<boost::container::pmr::monotonic_buffer_resource>(initialBufferSize)}
, txs_{monotonic_resource_.get()}
, rules_(base->rules())
, header_(base->header())

View File

@@ -1,6 +1,7 @@
#include <xrpl/basics/contract.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/beast/core/SemanticVersion.h>
#include <xrpl/git/Git.h>
#include <xrpl/protocol/BuildInfo.h>
#include <boost/preprocessor/stringize.hpp>
@@ -14,44 +15,60 @@ namespace xrpl {
namespace BuildInfo {
namespace {
//--------------------------------------------------------------------------
// The build version number. You must edit this for each release
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "3.2.0-b0"
// clang-format on
#if defined(DEBUG) || defined(SANITIZERS)
"+"
#ifdef GIT_COMMIT_HASH
GIT_COMMIT_HASH
"."
#endif
#ifdef DEBUG
"DEBUG"
#ifdef SANITIZERS
"."
#endif
#endif
#ifdef SANITIZERS
BOOST_PP_STRINGIZE(SANITIZERS) // cspell: disable-line
#endif
#endif
//--------------------------------------------------------------------------
// clang-format on
;
//
// Don't touch anything below this line
//
std::string
buildVersionString()
{
std::string version = versionString;
#if defined(DEBUG) || defined(SANITIZERS)
std::string metadata;
std::string const& commitHash = xrpl::git::getCommitHash();
if (!commitHash.empty())
metadata += commitHash + ".";
#ifdef DEBUG
metadata += "DEBUG";
#endif
#if defined(DEBUG) && defined(SANITIZERS)
metadata += ".";
#endif
#ifdef SANITIZERS
metadata += BOOST_PP_STRINGIZE(SANITIZERS); // cspell: disable-line
#endif
if (!metadata.empty())
version += "+" + metadata;
#endif
return version;
}
} // namespace
std::string const&
getVersionString()
{
static std::string const value = [] {
std::string const s = versionString;
std::string const s = buildVersionString();
beast::SemanticVersion v;
if (!v.parse(s) || v.print() != s)
LogicError(s + ": Bad server version string");
@@ -71,13 +88,13 @@ static constexpr std::uint64_t implementationVersionIdentifier = 0x183B'0000'000
static constexpr std::uint64_t implementationVersionIdentifierMask = 0xFFFF'0000'0000'0000LLU;
std::uint64_t
encodeSoftwareVersion(char const* const versionStr)
encodeSoftwareVersion(std::string_view versionStr)
{
std::uint64_t c = implementationVersionIdentifier;
beast::SemanticVersion v;
if (v.parse(std::string(versionStr)))
if (v.parse(versionStr))
{
if (v.majorVersion >= 0 && v.majorVersion <= 255)
c |= static_cast<std::uint64_t>(v.majorVersion) << 40;
@@ -137,7 +154,7 @@ encodeSoftwareVersion(char const* const versionStr)
std::uint64_t
getEncodedVersion()
{
static std::uint64_t const cookie = {encodeSoftwareVersion(versionString)};
static std::uint64_t const cookie = {encodeSoftwareVersion(getVersionString())};
return cookie;
}

View File

@@ -133,9 +133,9 @@ STVar::constructST(SerializedTypeID id, int depth, Args&&... args)
{
construct<T>(std::forward<Args>(args)...);
}
else if constexpr (std::is_same_v<
std::tuple<std::remove_cvref_t<Args>...>,
std::tuple<SerialIter, SField>>)
else if constexpr (
std::
is_same_v<std::tuple<std::remove_cvref_t<Args>...>, std::tuple<SerialIter, SField>>)
{
construct<T>(std::forward<Args>(args)..., depth);
}

View File

@@ -180,8 +180,9 @@ ammAccountHolds(ReadView const& view, AccountID const& ammAccountID, Issue const
if (auto const sle = view.read(keylet::account(ammAccountID)))
return (*sle)[sfBalance];
}
else if (auto const sle = view.read(keylet::line(ammAccountID, issue.account, issue.currency));
sle && !isFrozen(view, ammAccountID, issue.currency, issue.account))
else if (
auto const sle = view.read(keylet::line(ammAccountID, issue.account, issue.currency));
sle && !isFrozen(view, ammAccountID, issue.currency, issue.account))
{
auto amount = (*sle)[sfBalance];
if (ammAccountID > issue.account)

View File

@@ -42,8 +42,9 @@ AMMVote::preclaim(PreclaimContext const& ctx)
}
else if (ammSle->getFieldAmount(sfLPTokenBalance) == beast::zero)
return tecAMM_EMPTY;
else if (auto const lpTokensNew = ammLPHolds(ctx.view, *ammSle, ctx.tx[sfAccount], ctx.j);
lpTokensNew == beast::zero)
else if (
auto const lpTokensNew = ammLPHolds(ctx.view, *ammSle, ctx.tx[sfAccount], ctx.j);
lpTokensNew == beast::zero)
{
JLOG(ctx.j.debug()) << "AMM Vote: account is not LP.";
return tecAMM_INVALID_TOKENS;

View File

@@ -84,11 +84,12 @@ LoanSet::preflight(PreflightContext const& ctx)
!validNumericMinimum(paymentInterval, LoanSet::minPaymentInterval))
return temINVALID;
// Grace period is between min default value and payment interval
else if (auto const gracePeriod = tx[~sfGracePeriod]; //
!validNumericRange(
gracePeriod,
paymentInterval.value_or(LoanSet::defaultPaymentInterval),
defaultGracePeriod))
else if (
auto const gracePeriod = tx[~sfGracePeriod]; //
!validNumericRange(
gracePeriod,
paymentInterval.value_or(LoanSet::defaultPaymentInterval),
defaultGracePeriod))
return temINVALID;
// Copied from preflight2

View File

@@ -18,7 +18,7 @@ TER
PermissionedDomainDelete::preclaim(PreclaimContext const& ctx)
{
auto const domain = ctx.tx.getFieldH256(sfDomainID);
auto const sleDomain = ctx.view.read({ltPERMISSIONED_DOMAIN, domain});
auto const sleDomain = ctx.view.read(keylet::permissionedDomain(domain));
if (!sleDomain)
return tecNO_ENTRY;
@@ -40,7 +40,7 @@ PermissionedDomainDelete::doApply()
ctx_.tx.isFieldPresent(sfDomainID),
"xrpl::PermissionedDomainDelete::doApply : required field present");
auto const slePd = view().peek({ltPERMISSIONED_DOMAIN, ctx_.tx.at(sfDomainID)});
auto const slePd = view().peek(keylet::permissionedDomain(ctx_.tx.at(sfDomainID)));
auto const page = (*slePd)[sfOwnerNode];
if (!view().dirRemove(keylet::ownerDir(account_), page, slePd->key(), true))

View File

@@ -227,7 +227,7 @@ public:
static typename Base::Key const&
extract(Value const& value)
{
return value;
return value; // NOLINT(bugprone-return-const-ref-from-parameter)
}
static Values

View File

@@ -31,6 +31,7 @@
#include <xrpl/protocol/STTx.h>
#include <functional>
#include <future>
#include <source_location>
#include <string>
#include <tuple>
@@ -393,6 +394,48 @@ public:
return close(std::chrono::seconds(5));
}
/** Close and advance the ledger, then synchronize with the server's
io_context to ensure all async operations initiated by the close have
been started.
This function performs the same ledger close as close(), but additionally
ensures that all tasks posted to the server's io_context (such as
WebSocket subscription message sends) have been initiated before returning.
What it guarantees:
- All async operations posted before syncClose() have been STARTED
- For WebSocket sends: async_write_some() has been called
- The actual I/O completion may still be pending (async)
What it does NOT guarantee:
- Async operations have COMPLETED
- WebSocket messages have been received by clients
- However, for localhost connections, the remaining latency is typically
microseconds, making tests reliable
Use this instead of close() when:
- Test code immediately checks for subscription messages
- Race conditions between test and worker threads must be avoided
- Deterministic test behavior is required
@param timeout Maximum time to wait for the barrier task to execute
@return true if close succeeded and barrier executed within timeout,
false otherwise
*/
[[nodiscard]] bool
syncClose(std::chrono::steady_clock::duration timeout = std::chrono::seconds{1})
{
XRPL_ASSERT(
app().getNumberOfThreads() == 1,
"syncClose() is only useful on an application with a single thread");
auto const result = close();
auto serverBarrier = std::make_shared<std::promise<void>>();
auto future = serverBarrier->get_future();
boost::asio::post(app().getIOContext(), [serverBarrier]() { serverBarrier->set_value(); });
auto const status = future.wait_for(timeout);
return result && status == std::future_status::ready;
}
/** Turn on JSON tracing.
With no arguments, trace all
*/

View File

@@ -73,6 +73,8 @@ std::unique_ptr<Config> admin_localnet(std::unique_ptr<Config>);
std::unique_ptr<Config> secure_gateway_localnet(std::unique_ptr<Config>);
std::unique_ptr<Config> single_thread_io(std::unique_ptr<Config>);
/// @brief adjust configuration with params needed to be a validator
///
/// this is intended for use with envconfig, as in

View File

@@ -87,6 +87,12 @@ secure_gateway_localnet(std::unique_ptr<Config> cfg)
(*cfg)[PORT_WS].set("secure_gateway", "127.0.0.0/8");
return cfg;
}
std::unique_ptr<Config>
single_thread_io(std::unique_ptr<Config> cfg)
{
cfg->IO_WORKERS = 1;
return cfg;
}
auto constexpr defaultseed = "shUwVw52ofnCUX5m7kPTKzJdr4HEH";

View File

@@ -556,7 +556,7 @@ struct MultiApiJson_test : beast::unit_test::suite
static_assert([](auto&& v) {
return !requires {
v.visitor(
std::move(v), // cannot bind rvalue
decltype(v){}, // cannot bind rvalue
1,
[](Json::Value&, auto) {});
};

View File

@@ -223,6 +223,45 @@ public:
expect(jv[jss::result][jss::obligations]["USD"] == maxUSD.getText());
}
void
testGWBWithMPT()
{
testcase("Gateway Balances with MPT Escrow");
using namespace std::chrono_literals;
using namespace jtx;
// Ensure MPT is enabled
FeatureBitset features = testable_amendments() | featureMPTokensV1;
Env env(*this, features);
Account const alice{"alice"};
Account const bob{"bob"};
env.fund(XRP(10000), alice, bob);
env.close();
// Create MPT issuance (Alice) with Escrow capability
MPTTester mpt(env, alice, {.holders = {bob}, .fund = false});
mpt.create({.flags = tfMPTCanEscrow});
// Authorize Bob and fund him
mpt.authorize({.account = bob, .holderCount = 1});
mpt.pay(alice, bob, 1000);
// Bob creates an escrow of MPT to Alice.
auto const MPT = mpt["MPT"];
env(escrow::create(bob, alice, MPT(100)), escrow::finish_time(env.now() + 10s));
env.close();
// Query gateway_balances for Bob.
auto wsc = makeWSClient(env.app().config());
Json::Value qry;
qry[jss::account] = bob.human();
auto jv = wsc->invoke("gateway_balances", qry);
expect(jv[jss::status] == "success");
}
void
run() override
{
@@ -233,7 +272,7 @@ public:
testGWB(feature);
testGWBApiVersions(feature);
}
testGWBWithMPT();
testGWBOverflow();
}
};

View File

@@ -26,7 +26,7 @@ public:
{
using namespace std::chrono_literals;
using namespace jtx;
Env env(*this);
Env env{*this, single_thread_io(envconfig())};
auto wsc = makeWSClient(env.app().config());
Json::Value stream;
@@ -92,7 +92,7 @@ public:
{
using namespace std::chrono_literals;
using namespace jtx;
Env env(*this);
Env env{*this, single_thread_io(envconfig())};
auto wsc = makeWSClient(env.app().config());
Json::Value stream;
@@ -114,7 +114,7 @@ public:
{
// Accept a ledger
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream update
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -125,7 +125,7 @@ public:
{
// Accept another ledger
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream update
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -150,7 +150,7 @@ public:
{
using namespace std::chrono_literals;
using namespace jtx;
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto baseFee = env.current()->fees().base.drops();
auto wsc = makeWSClient(env.app().config());
Json::Value stream;
@@ -171,7 +171,7 @@ public:
{
env.fund(XRP(10000), "alice");
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream update for payment transaction
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -195,7 +195,7 @@ public:
}));
env.fund(XRP(10000), "bob");
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream update for payment transaction
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -249,12 +249,12 @@ public:
{
// Transaction that does not affect stream
env.fund(XRP(10000), "carol");
env.close();
BEAST_EXPECT(env.syncClose());
BEAST_EXPECT(!wsc->getMsg(10ms));
// Transactions concerning alice
env.trust(Account("bob")["USD"](100), "alice");
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream updates
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -288,6 +288,7 @@ public:
using namespace jtx;
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
cfg->FEES.reference_fee = 10;
cfg = single_thread_io(std::move(cfg));
return cfg;
}));
auto wsc = makeWSClient(env.app().config());
@@ -310,7 +311,7 @@ public:
{
env.fund(XRP(10000), "alice");
env.close();
BEAST_EXPECT(env.syncClose());
// Check stream update for payment transaction
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
@@ -360,7 +361,7 @@ public:
testManifests()
{
using namespace jtx;
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto wsc = makeWSClient(env.app().config());
Json::Value stream;
@@ -394,7 +395,7 @@ public:
{
using namespace jtx;
Env env{*this, envconfig(validator, ""), features};
Env env{*this, single_thread_io(envconfig(validator, "")), features};
auto& cfg = env.app().config();
if (!BEAST_EXPECT(cfg.section(SECTION_VALIDATION_SEED).empty()))
return;
@@ -483,7 +484,7 @@ public:
// at least one flag ledger.
while (env.closed()->header().seq < 300)
{
env.close();
BEAST_EXPECT(env.syncClose());
using namespace std::chrono_literals;
BEAST_EXPECT(wsc->findMsg(5s, validValidationFields));
}
@@ -505,7 +506,7 @@ public:
{
using namespace jtx;
testcase("Subscribe by url");
Env env{*this};
Env env{*this, single_thread_io(envconfig())};
Json::Value jv;
jv[jss::url] = "http://localhost/events";
@@ -536,7 +537,7 @@ public:
auto const method = subscribe ? "subscribe" : "unsubscribe";
testcase << "Error cases for " << method;
Env env{*this};
Env env{*this, single_thread_io(envconfig())};
auto wsc = makeWSClient(env.app().config());
{
@@ -572,7 +573,7 @@ public:
}
{
Env env_nonadmin{*this, no_admin(envconfig())};
Env env_nonadmin{*this, single_thread_io(no_admin(envconfig()))};
Json::Value jv;
jv[jss::url] = "no-url";
auto jr = env_nonadmin.rpc("json", method, to_string(jv))[jss::result];
@@ -834,12 +835,13 @@ public:
* send payments between the two accounts a and b,
* and close ledgersToClose ledgers
*/
auto sendPayments = [](Env& env,
Account const& a,
Account const& b,
int newTxns,
std::uint32_t ledgersToClose,
int numXRP = 10) {
auto sendPayments = [this](
Env& env,
Account const& a,
Account const& b,
int newTxns,
std::uint32_t ledgersToClose,
int numXRP = 10) {
env.memoize(a);
env.memoize(b);
for (int i = 0; i < newTxns; ++i)
@@ -852,7 +854,7 @@ public:
jtx::sig(jtx::autofill));
}
for (int i = 0; i < ledgersToClose; ++i)
env.close();
BEAST_EXPECT(env.syncClose());
return newTxns;
};
@@ -945,7 +947,7 @@ public:
*
* also test subscribe to the account before it is created
*/
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto wscTxHistory = makeWSClient(env.app().config());
Json::Value request;
request[jss::account_history_tx_stream] = Json::objectValue;
@@ -988,7 +990,7 @@ public:
* subscribe genesis account tx history without txns
* subscribe to bob's account after it is created
*/
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto wscTxHistory = makeWSClient(env.app().config());
Json::Value request;
request[jss::account_history_tx_stream] = Json::objectValue;
@@ -998,6 +1000,7 @@ public:
if (!BEAST_EXPECT(goodSubRPC(jv)))
return;
IdxHashVec genesisFullHistoryVec;
BEAST_EXPECT(env.syncClose());
if (!BEAST_EXPECT(!getTxHash(*wscTxHistory, genesisFullHistoryVec, 1).first))
return;
@@ -1016,6 +1019,7 @@ public:
if (!BEAST_EXPECT(goodSubRPC(jv)))
return;
IdxHashVec bobFullHistoryVec;
BEAST_EXPECT(env.syncClose());
r = getTxHash(*wscTxHistory, bobFullHistoryVec, 1);
if (!BEAST_EXPECT(r.first && r.second))
return;
@@ -1050,6 +1054,7 @@ public:
"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh";
jv = wscTxHistory->invoke("subscribe", request);
genesisFullHistoryVec.clear();
BEAST_EXPECT(env.syncClose());
BEAST_EXPECT(getTxHash(*wscTxHistory, genesisFullHistoryVec, 31).second);
jv = wscTxHistory->invoke("unsubscribe", request);
@@ -1062,13 +1067,13 @@ public:
* subscribe account and subscribe account tx history
* and compare txns streamed
*/
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto wscAccount = makeWSClient(env.app().config());
auto wscTxHistory = makeWSClient(env.app().config());
std::array<Account, 2> accounts = {alice, bob};
env.fund(XRP(222222), accounts);
env.close();
BEAST_EXPECT(env.syncClose());
// subscribe account
Json::Value stream = Json::objectValue;
@@ -1131,18 +1136,18 @@ public:
* alice issues USD to carol
* mix USD and XRP payments
*/
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
auto const USD_a = alice["USD"];
std::array<Account, 2> accounts = {alice, carol};
env.fund(XRP(333333), accounts);
env.trust(USD_a(20000), carol);
env.close();
BEAST_EXPECT(env.syncClose());
auto mixedPayments = [&]() -> int {
sendPayments(env, alice, carol, 1, 0);
env(pay(alice, carol, USD_a(100)));
env.close();
BEAST_EXPECT(env.syncClose());
return 2;
};
@@ -1152,6 +1157,7 @@ public:
request[jss::account_history_tx_stream][jss::account] = carol.human();
auto ws = makeWSClient(env.app().config());
auto jv = ws->invoke("subscribe", request);
BEAST_EXPECT(env.syncClose());
{
// take out existing txns from the stream
IdxHashVec tempVec;
@@ -1169,10 +1175,10 @@ public:
/*
* long transaction history
*/
Env env(*this);
Env env(*this, single_thread_io(envconfig()));
std::array<Account, 2> accounts = {alice, carol};
env.fund(XRP(444444), accounts);
env.close();
BEAST_EXPECT(env.syncClose());
// many payments, and close lots of ledgers
auto oneRound = [&](int numPayments) {
@@ -1185,6 +1191,7 @@ public:
request[jss::account_history_tx_stream][jss::account] = carol.human();
auto wscLong = makeWSClient(env.app().config());
auto jv = wscLong->invoke("subscribe", request);
BEAST_EXPECT(env.syncClose());
{
// take out existing txns from the stream
IdxHashVec tempVec;
@@ -1222,7 +1229,7 @@ public:
jtx::testable_amendments() | featurePermissionedDomains | featureCredentials |
featurePermissionedDEX};
Env env(*this, all);
Env env(*this, single_thread_io(envconfig()), all);
PermissionedDEX permDex(env);
auto const alice = permDex.alice;
auto const bob = permDex.bob;
@@ -1241,10 +1248,10 @@ public:
if (!BEAST_EXPECT(jv[jss::status] == "success"))
return;
env(offer(alice, XRP(10), USD(10)), domain(domainID), txflags(tfHybrid));
env.close();
BEAST_EXPECT(env.syncClose());
env(pay(bob, carol, USD(5)), path(~USD), sendmax(XRP(5)), domain(domainID));
env.close();
BEAST_EXPECT(env.syncClose());
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
if (jv[jss::changes].size() != 1)
@@ -1284,9 +1291,9 @@ public:
Account const bob{"bob"};
Account const broker{"broker"};
Env env{*this, features};
Env env{*this, single_thread_io(envconfig()), features};
env.fund(XRP(10000), alice, bob, broker);
env.close();
BEAST_EXPECT(env.syncClose());
auto wsc = test::makeWSClient(env.app().config());
Json::Value stream;
@@ -1350,12 +1357,12 @@ public:
// Verify the NFTokenIDs are correct in the NFTokenMint tx meta
uint256 const nftId1{token::getNextID(env, alice, 0u, tfTransferable)};
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId1);
uint256 const nftId2{token::getNextID(env, alice, 0u, tfTransferable)};
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId2);
// Alice creates one sell offer for each NFT
@@ -1363,32 +1370,32 @@ public:
// meta
uint256 const aliceOfferIndex1 = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftId1, drops(1)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(aliceOfferIndex1);
uint256 const aliceOfferIndex2 = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftId2, drops(1)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(aliceOfferIndex2);
// Alice cancels two offers she created
// Verify the NFTokenIDs are correct in the NFTokenCancelOffer tx
// meta
env(token::cancelOffer(alice, {aliceOfferIndex1, aliceOfferIndex2}));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenIDsInCancelOffer({nftId1, nftId2});
// Bobs creates a buy offer for nftId1
// Verify the offer id is correct in the NFTokenCreateOffer tx meta
auto const bobBuyOfferIndex = keylet::nftoffer(bob, env.seq(bob)).key;
env(token::createOffer(bob, nftId1, drops(1)), token::owner(alice));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(bobBuyOfferIndex);
// Alice accepts bob's buy offer
// Verify the NFTokenID is correct in the NFTokenAcceptOffer tx meta
env(token::acceptBuyOffer(alice, bobBuyOfferIndex));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId1);
}
@@ -1397,7 +1404,7 @@ public:
// Alice mints a NFT
uint256 const nftId{token::getNextID(env, alice, 0u, tfTransferable)};
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId);
// Alice creates sell offer and set broker as destination
@@ -1405,18 +1412,18 @@ public:
env(token::createOffer(alice, nftId, drops(1)),
token::destination(broker),
txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(offerAliceToBroker);
// Bob creates buy offer
uint256 const offerBobToBroker = keylet::nftoffer(bob, env.seq(bob)).key;
env(token::createOffer(bob, nftId, drops(1)), token::owner(alice));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(offerBobToBroker);
// Check NFTokenID meta for NFTokenAcceptOffer in brokered mode
env(token::brokerOffers(broker, offerBobToBroker, offerAliceToBroker));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId);
}
@@ -1426,24 +1433,24 @@ public:
// Alice mints a NFT
uint256 const nftId{token::getNextID(env, alice, 0u, tfTransferable)};
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenID(nftId);
// Alice creates 2 sell offers for the same NFT
uint256 const aliceOfferIndex1 = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftId, drops(1)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(aliceOfferIndex1);
uint256 const aliceOfferIndex2 = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftId, drops(1)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(aliceOfferIndex2);
// Make sure the metadata only has 1 nft id, since both offers are
// for the same nft
env(token::cancelOffer(alice, {aliceOfferIndex1, aliceOfferIndex2}));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenIDsInCancelOffer({nftId});
}
@@ -1451,7 +1458,7 @@ public:
{
uint256 const aliceMintWithOfferIndex1 = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::mint(alice), token::amount(XRP(0)));
env.close();
BEAST_EXPECT(env.syncClose());
verifyNFTokenOfferID(aliceMintWithOfferIndex1);
}
}

View File

@@ -1072,6 +1072,12 @@ public:
return trapTxID_;
}
size_t
getNumberOfThreads() const override
{
return get_number_of_threads();
}
private:
// For a newly-started validator, this is the greatest persisted ledger
// and new validations must be greater than this.

View File

@@ -157,6 +157,10 @@ public:
* than the last ledger it persisted. */
virtual LedgerIndex
getMaxDisallowedLedger() = 0;
/** Returns the number of io_context (I/O worker) threads used by the application. */
virtual size_t
getNumberOfThreads() const = 0;
};
std::unique_ptr<Application>

View File

@@ -23,4 +23,10 @@ public:
{
return io_context_;
}
size_t
get_number_of_threads() const
{
return threads_.size();
}
};

View File

@@ -6,6 +6,7 @@
#include <xrpl/basics/Log.h>
#include <xrpl/beast/core/CurrentThreadName.h>
#include <xrpl/git/Git.h>
#include <xrpl/protocol/BuildInfo.h>
#include <xrpl/server/Vacuum.h>
@@ -476,12 +477,8 @@ run(int argc, char** argv)
if (vm.count("version"))
{
std::cout << "rippled version " << BuildInfo::getVersionString() << std::endl;
#ifdef GIT_COMMIT_HASH
std::cout << "Git commit hash: " << GIT_COMMIT_HASH << std::endl;
#endif
#ifdef GIT_BRANCH
std::cout << "Git build branch: " << GIT_BRANCH << std::endl;
#endif
std::cout << "Git commit hash: " << xrpl::git::getCommitHash() << std::endl;
std::cout << "Git build branch: " << xrpl::git::getBuildBranch() << std::endl;
return 0;
}

View File

@@ -40,6 +40,7 @@
#include <xrpl/core/PerfLog.h>
#include <xrpl/crypto/RFC1751.h>
#include <xrpl/crypto/csprng.h>
#include <xrpl/git/Git.h>
#include <xrpl/ledger/AmendmentTable.h>
#include <xrpl/ledger/OrderBookDB.h>
#include <xrpl/protocol/BuildInfo.h>
@@ -2593,17 +2594,14 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
}
}
#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
if (!xrpl::git::getCommitHash().empty() || !xrpl::git::getBuildBranch().empty())
{
auto& x = (info[jss::git] = Json::objectValue);
#ifdef GIT_COMMIT_HASH
x[jss::hash] = GIT_COMMIT_HASH;
#endif
#ifdef GIT_BRANCH
x[jss::branch] = GIT_BRANCH;
#endif
if (!xrpl::git::getCommitHash().empty())
x[jss::hash] = xrpl::git::getCommitHash();
if (!xrpl::git::getBuildBranch().empty())
x[jss::branch] = xrpl::git::getBuildBranch();
}
#endif
}
info[jss::io_latency_ms] = static_cast<Json::UInt>(registry_.app().getIOLatency().count());

View File

@@ -131,6 +131,10 @@ doGatewayBalances(RPC::JsonContext& context)
if (sle->getType() == ltESCROW)
{
auto const& escrow = sle->getFieldAmount(sfAmount);
// Gateway Balance should not include MPTs
if (escrow.holds<MPTIssue>())
return;
auto& bal = locked[escrow.getCurrency()];
if (bal == beast::zero)
{