mirror of
https://github.com/XRPLF/rippled.git
synced 2026-03-27 15:12:28 +00:00
Compare commits
19 Commits
pratik/std
...
bthomee/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1490a1401e | ||
|
|
5df14e58d7 | ||
|
|
750f762c01 | ||
|
|
5ef8813891 | ||
|
|
0ef3ca9674 | ||
|
|
d57a9b1ad3 | ||
|
|
509677abfd | ||
|
|
addc1e8e25 | ||
|
|
faf69da4b0 | ||
|
|
76e3b4fb0f | ||
|
|
e8bdbf975a | ||
|
|
2c765f6eb0 | ||
|
|
a9269fa846 | ||
|
|
15fd9feae5 | ||
|
|
b9d07730f3 | ||
|
|
85b65c8e9a | ||
|
|
8f182e825a | ||
|
|
cd78569d94 | ||
|
|
2c7af360c2 |
4
.github/ISSUE_TEMPLATE/feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: Suggest a new feature for the rippled project
|
||||
title: "[Title with short description] (Version: [rippled version])"
|
||||
about: Suggest a new feature for the xrpld project
|
||||
title: "[Title with short description] (Version: [xrpld version])"
|
||||
labels: Feature Request
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
10
.github/scripts/levelization/README.md
vendored
10
.github/scripts/levelization/README.md
vendored
@@ -1,9 +1,9 @@
|
||||
# Levelization
|
||||
|
||||
Levelization is the term used to describe efforts to prevent rippled from
|
||||
Levelization is the term used to describe efforts to prevent xrpld from
|
||||
having or creating cyclic dependencies.
|
||||
|
||||
rippled code is organized into directories under `src/xrpld`, `src/libxrpl` (and
|
||||
xrpld code is organized into directories under `src/xrpld`, `src/libxrpl` (and
|
||||
`src/test`) representing modules. The modules are intended to be
|
||||
organized into "tiers" or "levels" such that a module from one level can
|
||||
only include code from lower levels. Additionally, a module
|
||||
@@ -22,7 +22,7 @@ levelization violations they find (by moving files or individual
|
||||
classes). At the very least, don't make things worse.
|
||||
|
||||
The table below summarizes the _desired_ division of modules, based on the current
|
||||
state of the rippled code. The levels are numbered from
|
||||
state of the xrpld code. The levels are numbered from
|
||||
the bottom up with the lower level, lower numbered, more independent
|
||||
modules listed first, and the higher level, higher numbered modules with
|
||||
more dependencies listed later.
|
||||
@@ -72,10 +72,10 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
|
||||
|
||||
The [levelization](generate.py) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
as long as it is in the expected location in the xrpld repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
do an analysis of all the `#include`s in
|
||||
the rippled source. The only caveat is that it runs much slower
|
||||
the xrpld source. The only caveat is that it runs much slower
|
||||
under Windows than in Linux. It hasn't yet been tested under MacOS.
|
||||
It generates many files of [results](results):
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ libxrpl.protocol > xrpl.json
|
||||
libxrpl.protocol > xrpl.protocol
|
||||
libxrpl.protocol_autogen > xrpl.protocol_autogen
|
||||
libxrpl.rdb > xrpl.basics
|
||||
libxrpl.rdb > xrpl.core
|
||||
libxrpl.rdb > xrpl.rdb
|
||||
libxrpl.resource > xrpl.basics
|
||||
libxrpl.resource > xrpl.json
|
||||
@@ -90,6 +91,7 @@ test.core > xrpl.server
|
||||
test.csf > xrpl.basics
|
||||
test.csf > xrpld.consensus
|
||||
test.csf > xrpl.json
|
||||
test.csf > xrpl.ledger
|
||||
test.csf > xrpl.protocol
|
||||
test.json > test.jtx
|
||||
test.json > xrpl.json
|
||||
@@ -108,7 +110,6 @@ test.jtx > xrpl.tx
|
||||
test.ledger > test.jtx
|
||||
test.ledger > test.toplevel
|
||||
test.ledger > xrpl.basics
|
||||
test.ledger > xrpld.app
|
||||
test.ledger > xrpld.core
|
||||
test.ledger > xrpl.ledger
|
||||
test.ledger > xrpl.protocol
|
||||
@@ -125,6 +126,7 @@ test.overlay > xrpl.basics
|
||||
test.overlay > xrpld.app
|
||||
test.overlay > xrpld.overlay
|
||||
test.overlay > xrpld.peerfinder
|
||||
test.overlay > xrpl.ledger
|
||||
test.overlay > xrpl.nodestore
|
||||
test.overlay > xrpl.protocol
|
||||
test.overlay > xrpl.shamap
|
||||
@@ -183,7 +185,6 @@ xrpl.conditions > xrpl.basics
|
||||
xrpl.conditions > xrpl.protocol
|
||||
xrpl.core > xrpl.basics
|
||||
xrpl.core > xrpl.json
|
||||
xrpl.core > xrpl.ledger
|
||||
xrpl.core > xrpl.protocol
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
@@ -234,6 +235,7 @@ xrpld.app > xrpl.shamap
|
||||
xrpld.app > xrpl.tx
|
||||
xrpld.consensus > xrpl.basics
|
||||
xrpld.consensus > xrpl.json
|
||||
xrpld.consensus > xrpl.ledger
|
||||
xrpld.consensus > xrpl.protocol
|
||||
xrpld.core > xrpl.basics
|
||||
xrpld.core > xrpl.core
|
||||
|
||||
3
.github/scripts/rename/README.md
vendored
3
.github/scripts/rename/README.md
vendored
@@ -34,6 +34,8 @@ run from the repository root.
|
||||
6. `.github/scripts/rename/config.sh`: This script will rename the config from
|
||||
`rippled.cfg` to `xrpld.cfg`, and updating the code accordingly. The old
|
||||
filename will still be accepted.
|
||||
7. `.github/scripts/rename/docs.sh`: This script will rename any lingering
|
||||
references of `ripple(d)` to `xrpl(d)` in code, comments, and documentation.
|
||||
|
||||
You can run all these scripts from the repository root as follows:
|
||||
|
||||
@@ -44,4 +46,5 @@ You can run all these scripts from the repository root as follows:
|
||||
./.github/scripts/rename/binary.sh .
|
||||
./.github/scripts/rename/namespace.sh .
|
||||
./.github/scripts/rename/config.sh .
|
||||
./.github/scripts/rename/docs.sh .
|
||||
```
|
||||
|
||||
3
.github/scripts/rename/config.sh
vendored
3
.github/scripts/rename/config.sh
vendored
@@ -52,16 +52,15 @@ for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/ripplevalidators/xrplvalidators/g' src/test/core/Config_test.cpp # cspell: disable-line
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's@ripple/@xrpld/@g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/Rippled/File/g' src/test/core/Config_test.cpp
|
||||
|
||||
|
||||
# Restore the old config file name in the code that maintains support for now.
|
||||
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/g' src/xrpld/core/detail/Config.cpp
|
||||
|
||||
|
||||
77
.github/scripts/rename/docs.sh
vendored
Executable file
77
.github/scripts/rename/docs.sh
vendored
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames all remaining references to `ripple` and `rippled` to
|
||||
# `xrpl` and `xrpld`, respectively, in code, comments, and documentation.
|
||||
# Usage: .github/scripts/rename/docs.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" -o -name "*.proto" \) -not -path "./.github/scripts/*" | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's@([^+-/])rippled@\1xrpld@g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's@([^+-/])Rippled@\1Xrpld@g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/^rippled/xrpld/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/^Rippled/Xrpld/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's@(/etc)?/opt/rippled?@/etc/opt/xrpld@g' "${FILE}"
|
||||
# cspell: disable
|
||||
${SED_COMMAND} -i 's/(r|R)ipple (e|E)poch/XRPL epoch/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (n|N)etwork/XRPL network/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (p|P)ayment/XRPL payment/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (p|P)rotocol/XRPL protocol/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (r|R)epository/XRPL repository/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (s|S)erialization/XRPL serialization/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple (s|S)specific/XRPL specific/g' "${FILE}"
|
||||
# cspell: enable
|
||||
${SED_COMMAND} -i 's/a xrpl/an xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/RippleLib/XrplLib/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple-lib/XrplLib/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's@src/ripple/@src/xrpld/@g' "${FILE}"
|
||||
${SED_COMMAND} -i 's@ripple/app/@xrpld/app/@g' "${FILE}"
|
||||
${SED_COMMAND} -i 's@https://github.com/ripple/rippled@https://github.com/XRPLF/rippled@g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleLockEscrowMPT/lockEscrowMPT/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleUnlockEscrowMPT/unlockEscrowMPT/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippleCredit/directSend/g' "${FILE}"
|
||||
done
|
||||
${SED_COMMAND} -i 's/ripple_libs/xrpl_libs/' BUILD.md
|
||||
${SED_COMMAND} -i 's/Ripple integrators/XRPL developers/' README.md
|
||||
${SED_COMMAND} -i 's/sanitizer-configuration-for-rippled/sanitizer-configuration-for-xrpld/' docs/build/sanitizers.md
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/' .github/scripts/levelization/README.md
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/' .github/scripts/strategy-matrix/generate.py
|
||||
${SED_COMMAND} -i 's@/rippled@/xrpld@' docs/build/install.md
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/' docs/Doxyfile
|
||||
${SED_COMMAND} -i 's/ripple_basics/basics/' include/xrpl/basics/CountedObject.h
|
||||
${SED_COMMAND} -i 's/<ripple/<xrpl/' include/xrpl/protocol/AccountID.h
|
||||
${SED_COMMAND} -i 's/Ripple:/the XRPL:/g' include/xrpl/protocol/SecretKey.h
|
||||
${SED_COMMAND} -i 's/Ripple:/the XRPL:/g' include/xrpl/protocol/Seed.h
|
||||
${SED_COMMAND} -i 's/ripple/xrpl/' src/test/README.md
|
||||
${SED_COMMAND} -i 's/www.ripple.com/www.xrpl.org/g' src/test/protocol/Seed_test.cpp
|
||||
|
||||
# Restore specific changes.
|
||||
${SED_COMMAND} -i 's@b5efcc/src/xrpld@b5efcc/src/ripple@' include/xrpl/protocol/README.md
|
||||
${SED_COMMAND} -i 's/dbPrefix_ = "xrpldb"/dbPrefix_ = "rippledb"/' src/xrpld/app/misc/SHAMapStoreImp.h # cspell: disable-line
|
||||
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/' src/xrpld/core/detail/Config.cpp
|
||||
|
||||
echo "Renaming complete."
|
||||
3
.github/scripts/rename/namespace.sh
vendored
3
.github/scripts/rename/namespace.sh
vendored
@@ -37,10 +37,11 @@ DIRECTORIES=("include" "src" "tests")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" \) | while read -r FILE; do
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/namespace ripple/namespace xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple::/xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/"ripple:/"xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/(BEAST_DEFINE_TESTSUITE.+)ripple(.+)/\1xrpl\2/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
|
||||
2
.github/scripts/strategy-matrix/generate.py
vendored
2
.github/scripts/strategy-matrix/generate.py
vendored
@@ -235,7 +235,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
# GCC-Asan xrpld-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
|
||||
25
.github/workflows/conflicting-pr.yml
vendored
Normal file
25
.github/workflows/conflicting-pr.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Label PRs with merge conflicts
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated.
|
||||
push:
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolved.
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs.
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if PRs are dirty
|
||||
uses: eps1lon/actions-label-merge-conflict@1df065ebe6e3310545d4f4c4e862e43bdca146f0 # v3.0.3
|
||||
with:
|
||||
dirtyLabel: "PR: has conflicts"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This PR has conflicts, please resolve them in order for the PR to be reviewed."
|
||||
commentOnClean: "All conflicts have been resolved. Assigned reviewers can now start or resume their review."
|
||||
1
.github/workflows/publish-docs.yml
vendored
1
.github/workflows/publish-docs.yml
vendored
@@ -6,7 +6,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
paths:
|
||||
- ".github/workflows/publish-docs.yml"
|
||||
- "*.md"
|
||||
|
||||
@@ -199,7 +199,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload the binary (Linux)
|
||||
if: ${{ github.repository == 'XRPLF/rippled' && runner.os == 'Linux' }}
|
||||
if: ${{ github.event.repository.visibility == 'public' && runner.os == 'Linux' }}
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
|
||||
with:
|
||||
name: xrpld-${{ inputs.config_name }}
|
||||
|
||||
2
.github/workflows/reusable-check-rename.yml
vendored
2
.github/workflows/reusable-check-rename.yml
vendored
@@ -33,6 +33,8 @@ jobs:
|
||||
run: .github/scripts/rename/config.sh .
|
||||
- name: Check include guards
|
||||
run: .github/scripts/rename/include.sh .
|
||||
- name: Check documentation
|
||||
run: .github/scripts/rename/docs.sh .
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
run-clang-tidy -j ${{ steps.nproc.outputs.nproc }} -p "${BUILD_DIR}" ${TARGETS} 2>&1 | tee clang-tidy-output.txt
|
||||
|
||||
- name: Upload clang-tidy output
|
||||
if: steps.run_clang_tidy.outcome != 'success'
|
||||
if: ${{ github.event.repository.visibility == 'public' && steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
|
||||
with:
|
||||
name: clang-tidy-results
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -71,6 +71,8 @@ DerivedData
|
||||
/.zed/
|
||||
|
||||
# AI tools.
|
||||
/.agent
|
||||
/.agents
|
||||
/.augment
|
||||
/.claude
|
||||
/CLAUDE.md
|
||||
|
||||
@@ -4,23 +4,23 @@ This changelog is intended to list all updates to the [public API methods](https
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
The API version controls the API behavior you see. This includes what properties you see in responses, what parameters you're permitted to send in requests, and so on. You specify the API version in each of your requests. When a breaking change is introduced to the `rippled` API, a new version is released. To avoid breaking your code, you should set (or increase) your version when you're ready to upgrade.
|
||||
The API version controls the API behavior you see. This includes what properties you see in responses, what parameters you're permitted to send in requests, and so on. You specify the API version in each of your requests. When a breaking change is introduced to the `xrpld` API, a new version is released. To avoid breaking your code, you should set (or increase) your version when you're ready to upgrade.
|
||||
|
||||
The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conventions/request-formatting/#commandline-format) always uses the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
|
||||
|
||||
For a log of breaking changes, see the **API Version [number]** headings. In general, breaking changes are associated with a particular API Version number. For non-breaking changes, scroll to the **XRP Ledger version [x.y.z]** headings. Non-breaking changes are associated with a particular XRP Ledger (`rippled`) release.
|
||||
For a log of breaking changes, see the **API Version [number]** headings. In general, breaking changes are associated with a particular API Version number. For non-breaking changes, scroll to the **XRP Ledger version [x.y.z]** headings. Non-breaking changes are associated with a particular XRP Ledger (`xrpld`) release.
|
||||
|
||||
## API Version 3 (Beta)
|
||||
|
||||
API version 3 is currently a beta API. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. See [API-VERSION-3.md](API-VERSION-3.md) for the full list of changes in API version 3.
|
||||
API version 3 is currently a beta API. It requires enabling `[beta_rpc_api]` in the xrpld configuration to use. See [API-VERSION-3.md](API-VERSION-3.md) for the full list of changes in API version 3.
|
||||
|
||||
## API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. See [API-VERSION-2.md](API-VERSION-2.md) for the full list of changes in API version 2.
|
||||
API version 2 is available in `xrpld` version 2.0.0 and later. See [API-VERSION-2.md](API-VERSION-2.md) for the full list of changes in API version 2.
|
||||
|
||||
## API Version 1
|
||||
|
||||
This version is supported by all `rippled` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
|
||||
This version is supported by all `xrpld` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
|
||||
|
||||
## Unreleased
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
|
||||
API version 2 is available in `xrpld` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# API Version 3
|
||||
|
||||
API version 3 is currently a **beta API**. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. To use this API, clients specify `"api_version" : 3` in each request.
|
||||
API version 3 is currently a **beta API**. It requires enabling `[beta_rpc_api]` in the xrpld configuration to use. To use this API, clients specify `"api_version" : 3` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
|
||||
4
BUILD.md
4
BUILD.md
@@ -603,8 +603,8 @@ If you want to experiment with a new package, follow these steps:
|
||||
`default_options` property (with syntax `'$package:$option': $value`).
|
||||
3. Modify [`CMakeLists.txt`](./CMakeLists.txt):
|
||||
- Add a call to `find_package($package REQUIRED)`.
|
||||
- Link a library from the package to the target `ripple_libs`
|
||||
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
|
||||
- Link a library from the package to the target `xrpl_libs`
|
||||
(search for the existing call to `target_link_libraries(xrpl_libs INTERFACE ...)`).
|
||||
4. Start coding! Don't forget to include whatever headers you need from the package.
|
||||
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,142 +0,0 @@
|
||||
# Boost.Coroutine to C++20 Migration — Task List
|
||||
|
||||
> Parent document: [BoostToStdCoroutineSwitchPlan.md](BoostToStdCoroutineSwitchPlan.md)
|
||||
|
||||
---
|
||||
|
||||
## Milestone 1: New Coroutine Primitives
|
||||
|
||||
- [ ] **1.1** Design `CoroTask<T>` class with `promise_type`
|
||||
- Define `promise_type` with `initial_suspend`, `final_suspend`, `unhandled_exception`, `return_value`/`return_void`
|
||||
- Implement `FinalAwaiter` for continuation support
|
||||
- Implement move-only RAII handle wrapper
|
||||
- Support both `CoroTask<T>` and `CoroTask<void>`
|
||||
|
||||
- [ ] **1.2** Design and implement `JobQueueAwaiter`
|
||||
- `await_suspend()` calls `jq_.addJob(type, name, [h]{ h.resume(); })`
|
||||
- Handle `addJob()` failure (shutdown) — resume with error flag or throw
|
||||
- Integrate `nSuspend_` counter increment/decrement
|
||||
|
||||
- [ ] **1.3** Implement `LocalValues` swap in new coroutine resume path
|
||||
- Before `handle.resume()`: save thread-local, install coroutine-local
|
||||
- After `handle.resume()` returns: restore thread-local
|
||||
- Ensure this works when coroutine migrates between threads
|
||||
|
||||
- [ ] **1.4** Add `postCoroTask()` template to `JobQueue`
|
||||
- Accept callable returning `CoroTask<void>`
|
||||
- Schedule initial execution on JobQueue (mirror `postCoro()` behavior)
|
||||
- Return a handle/shared_ptr for join/cancel
|
||||
|
||||
- [ ] **1.5** Write unit tests (`src/test/core/CoroTask_test.cpp`)
|
||||
- Test `CoroTask<void>` runs to completion
|
||||
- Test `CoroTask<int>` returns value
|
||||
- Test exception propagation across co_await
|
||||
- Test coroutine destruction before completion
|
||||
- Test `JobQueueAwaiter` schedules on correct thread
|
||||
- Test `LocalValue` isolation across 4+ coroutines
|
||||
- Test shutdown rejection (addJob returns false)
|
||||
- Test `correct_order` equivalent (yield → join → post → complete)
|
||||
- Test `incorrect_order` equivalent (post → yield → complete)
|
||||
- Test multiple sequential co_await points
|
||||
|
||||
- [ ] **1.6** Verify build on GCC 12+, Clang 16+
|
||||
- [ ] **1.7** Run ASAN + TSAN on new tests
|
||||
- [ ] **1.8** Run full `--unittest` suite (no regressions)
|
||||
- [ ] **1.9** Self-review and create PR #1
|
||||
|
||||
---
|
||||
|
||||
## Milestone 2: Entry Point Migration
|
||||
|
||||
- [ ] **2.1** Migrate `ServerHandler::onRequest()` (`ServerHandler.cpp:287`)
|
||||
- Replace `m_jobQueue.postCoro(jtCLIENT_RPC, ...)` with `postCoroTask()`
|
||||
- Update lambda to return `CoroTask<void>` (add `co_return`)
|
||||
- Update `processSession` to accept new coroutine type
|
||||
|
||||
- [ ] **2.2** Migrate `ServerHandler::onWSMessage()` (`ServerHandler.cpp:325`)
|
||||
- Replace `m_jobQueue.postCoro(jtCLIENT_WEBSOCKET, ...)` with `postCoroTask()`
|
||||
- Update lambda signature
|
||||
|
||||
- [ ] **2.3** Migrate `GRPCServer::CallData::process()` (`GRPCServer.cpp:102`)
|
||||
- Replace `app_.getJobQueue().postCoro(JobType::jtRPC, ...)` with `postCoroTask()`
|
||||
- Update `process(shared_ptr<Coro> coro)` overload signature
|
||||
|
||||
- [ ] **2.4** Update `RPC::Context` (`Context.h:27`)
|
||||
- Replace `std::shared_ptr<JobQueue::Coro> coro{}` with new coroutine wrapper type
|
||||
- Ensure all code that accesses `context.coro` compiles
|
||||
|
||||
- [ ] **2.5** Update `ServerHandler.h` signatures
|
||||
- `processSession()` and `processRequest()` parameter types
|
||||
|
||||
- [ ] **2.6** Update `GRPCServer.h` signatures
|
||||
- `process()` method parameter types
|
||||
|
||||
- [ ] **2.7** Run full `--unittest` suite
|
||||
- [ ] **2.8** Manual smoke test: HTTP + WS + gRPC RPC requests
|
||||
- [ ] **2.9** Run ASAN + TSAN
|
||||
- [ ] **2.10** Self-review and create PR #2
|
||||
|
||||
---
|
||||
|
||||
## Milestone 3: Handler Migration
|
||||
|
||||
- [ ] **3.1** Migrate `doRipplePathFind()` (`RipplePathFind.cpp`)
|
||||
- Replace `context.coro->yield()` with `co_await PathFindAwaiter{...}`
|
||||
- Replace continuation lambda's `coro->post()` / `coro->resume()` with awaiter scheduling
|
||||
- Handle shutdown case (post failure) in awaiter
|
||||
|
||||
- [ ] **3.2** Create `PathFindAwaiter` (or use generic `JobQueueAwaiter`)
|
||||
- Encapsulate the continuation + yield pattern from `RipplePathFind.cpp` lines 108-132
|
||||
|
||||
- [ ] **3.3** Update `Path_test.cpp`
|
||||
- Replace `postCoro` usage with `postCoroTask`
|
||||
- Ensure `context.coro` usage matches new type
|
||||
|
||||
- [ ] **3.4** Update `AMMTest.cpp`
|
||||
- Replace `postCoro` usage with `postCoroTask`
|
||||
|
||||
- [ ] **3.5** Rewrite `Coroutine_test.cpp` for new API
|
||||
- `correct_order`: postCoroTask → co_await → join → resume → complete
|
||||
- `incorrect_order`: post before yield equivalent
|
||||
- `thread_specific_storage`: 4 coroutines with LocalValue isolation
|
||||
|
||||
- [ ] **3.6** Update `JobQueue_test.cpp` `testPostCoro`
|
||||
- Migrate to `postCoroTask` API
|
||||
|
||||
- [ ] **3.7** Verify `ripple_path_find` works end-to-end with new coroutines
|
||||
- [ ] **3.8** Test shutdown-during-pathfind scenario
|
||||
- [ ] **3.9** Run full `--unittest` suite
|
||||
- [ ] **3.10** Run ASAN + TSAN
|
||||
- [ ] **3.11** Self-review and create PR #3
|
||||
|
||||
---
|
||||
|
||||
## Milestone 4: Cleanup & Validation
|
||||
|
||||
- [ ] **4.1** Delete `include/xrpl/core/Coro.ipp`
|
||||
- [ ] **4.2** Remove from `JobQueue.h`:
|
||||
- `#include <boost/coroutine2/all.hpp>`
|
||||
- `struct Coro_create_t`
|
||||
- `class Coro` (entire class)
|
||||
- `postCoro()` template
|
||||
- Comment block (lines 322-377) describing old race condition
|
||||
- [ ] **4.3** Update `cmake/deps/Boost.cmake`:
|
||||
- Remove `coroutine` from `find_package(Boost REQUIRED COMPONENTS ...)`
|
||||
- Remove `Boost::coroutine` from `target_link_libraries`
|
||||
- [ ] **4.4** Update `cmake/XrplInterface.cmake`:
|
||||
- Remove `BOOST_COROUTINES2_NO_DEPRECATION_WARNING`
|
||||
- [ ] **4.5** Run memory benchmark
|
||||
- Create N=1000 coroutines, compare RSS: before vs after
|
||||
- Document results
|
||||
- [ ] **4.6** Run context switch benchmark
|
||||
- 100K yield/resume cycles, compare latency: before vs after
|
||||
- Document results
|
||||
- [ ] **4.7** Run RPC throughput benchmark
|
||||
- Concurrent `ripple_path_find` requests, compare throughput
|
||||
- Document results
|
||||
- [ ] **4.8** Run full `--unittest` suite
|
||||
- [ ] **4.9** Run ASAN, TSAN, UBSan
|
||||
- Confirm `__asan_handle_no_return` warnings are gone
|
||||
- [ ] **4.10** Verify build on all supported compilers
|
||||
- [ ] **4.11** Self-review and create PR #4
|
||||
- [ ] **4.12** Document final benchmark results in PR description
|
||||
@@ -533,7 +533,7 @@ All releases, including release candidates and betas, are handled
|
||||
differently from typical PRs. Most importantly, never use
|
||||
the Github UI to merge a release.
|
||||
|
||||
Rippled uses a linear workflow model that can be summarized as:
|
||||
Xrpld uses a linear workflow model that can be summarized as:
|
||||
|
||||
1. In between releases, developers work against the `develop` branch.
|
||||
2. Periodically, a maintainer will build and tag a beta version from
|
||||
|
||||
22
README.md
22
README.md
@@ -8,11 +8,11 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe
|
||||
|
||||
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
|
||||
|
||||
## rippled
|
||||
## xrpld
|
||||
|
||||
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
||||
The server software that powers the XRP Ledger is called `xrpld` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `xrpld` server software is written primarily in C++ and runs on a variety of platforms. The `xrpld` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
||||
|
||||
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.)
|
||||
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (xrpld Reporting Mode has been replaced by Clio.)
|
||||
|
||||
### Build from Source
|
||||
|
||||
@@ -41,19 +41,19 @@ If you are interested in running an **API Server** (including a **Full History S
|
||||
|
||||
Here are some good places to start learning the source code:
|
||||
|
||||
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
|
||||
- Read the markdown files in the source tree: `src/xrpld/**/*.md`.
|
||||
- Read [the levelization document](.github/scripts/levelization) to get an idea of the internal dependency graph.
|
||||
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
|
||||
|
||||
### Repository Contents
|
||||
|
||||
| Folder | Contents |
|
||||
| :--------- | :----------------------------------------------- |
|
||||
| `./bin` | Scripts and data files for Ripple integrators. |
|
||||
| `./Builds` | Platform-specific guides for building `rippled`. |
|
||||
| `./docs` | Source documentation files and doxygen config. |
|
||||
| `./cfg` | Example configuration files. |
|
||||
| `./src` | Source code. |
|
||||
| Folder | Contents |
|
||||
| :--------- | :--------------------------------------------- |
|
||||
| `./bin` | Scripts and data files for XRPL developers. |
|
||||
| `./Builds` | Platform-specific guides for building `xrpld`. |
|
||||
| `./docs` | Source documentation files and doxygen config. |
|
||||
| `./cfg` | Example configuration files. |
|
||||
| `./src` | Source code. |
|
||||
|
||||
Some of the directories under `src` are external repositories included using
|
||||
git-subtree. See those directories' README files for more details.
|
||||
|
||||
@@ -6,7 +6,7 @@ For more details on operating an XRP Ledger server securely, please visit https:
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Software constantly evolves. In order to focus resources, we only generally only accept vulnerability reports that affect recent and current versions of the software. We always accept reports for issues present in the **master**, **release** or **develop** branches, and with proposed, [open pull requests](https://github.com/ripple/rippled/pulls).
|
||||
Software constantly evolves. In order to focus resources, we only generally only accept vulnerability reports that affect recent and current versions of the software. We always accept reports for issues present in the **master**, **release** or **develop** branches, and with proposed, [open pull requests](https://github.com/XRPLF/rippled/pulls).
|
||||
|
||||
## Identifying and Reporting Vulnerabilities
|
||||
|
||||
@@ -59,11 +59,11 @@ While we commit to responding with 24 hours of your initial report with our tria
|
||||
|
||||
## Bug Bounty Program
|
||||
|
||||
[Ripple](https://ripple.com) is generously sponsoring a bug bounty program for vulnerabilities in [`rippled`](https://github.com/XRPLF/rippled) (and other related projects, like [`xrpl.js`](https://github.com/XRPLF/xrpl.js), [`xrpl-py`](https://github.com/XRPLF/xrpl-py), [`xrpl4j`](https://github.com/XRPLF/xrpl4j)).
|
||||
[Ripple](https://ripple.com) is generously sponsoring a bug bounty program for vulnerabilities in [`xrpld`](https://github.com/XRPLF/rippled) (and other related projects, like [`xrpl.js`](https://github.com/XRPLF/xrpl.js), [`xrpl-py`](https://github.com/XRPLF/xrpl-py), [`xrpl4j`](https://github.com/XRPLF/xrpl4j)).
|
||||
|
||||
This program allows us to recognize and reward individuals or groups that identify and report bugs. In summary, in order to qualify for a bounty, the bug must be:
|
||||
|
||||
1. **In scope**. Only bugs in software under the scope of the program qualify. Currently, that means `rippled`, `xrpl.js`, `xrpl-py`, `xrpl4j`.
|
||||
1. **In scope**. Only bugs in software under the scope of the program qualify. Currently, that means `xrpld`, `xrpl.js`, `xrpl-py`, `xrpl4j`.
|
||||
2. **Relevant**. A security issue, posing a danger to user funds, privacy, or the operation of the XRP Ledger.
|
||||
3. **Original and previously unknown**. Bugs that are already known and discussed in public do not qualify. Previously reported bugs, even if publicly unknown, are not eligible.
|
||||
4. **Specific**. We welcome general security advice or recommendations, but we cannot pay bounties for that.
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
# https://vl.ripple.com
|
||||
# https://unl.xrplf.org
|
||||
# http://127.0.0.1:8000
|
||||
# file:///etc/opt/ripple/vl.txt
|
||||
# file:///etc/opt/xrpld/vl.txt
|
||||
#
|
||||
# [validator_list_keys]
|
||||
#
|
||||
@@ -43,11 +43,11 @@
|
||||
# ED307A760EE34F2D0CAA103377B1969117C38B8AA0AA1E2A24DAC1F32FC97087ED
|
||||
#
|
||||
|
||||
# The default validator list publishers that the rippled instance
|
||||
# The default validator list publishers that the xrpld instance
|
||||
# trusts.
|
||||
#
|
||||
# WARNING: Changing these values can cause your rippled instance to see a
|
||||
# validated ledger that contradicts other rippled instances'
|
||||
# WARNING: Changing these values can cause your xrpld instance to see a
|
||||
# validated ledger that contradicts other xrpld instances'
|
||||
# validated ledgers (aka a ledger fork) if your validator list(s)
|
||||
# do not sufficiently overlap with the list(s) used by others.
|
||||
# See: https://arxiv.org/pdf/1802.07242.pdf
|
||||
|
||||
@@ -840,7 +840,7 @@
|
||||
#
|
||||
# 0: Disable the ledger replay feature [default]
|
||||
# 1: Enable the ledger replay feature. With this feature enabled, when
|
||||
# acquiring a ledger from the network, a xrpld node only downloads
|
||||
# acquiring a ledger from the network, an xrpld node only downloads
|
||||
# the ledger header and the transactions instead of the whole ledger.
|
||||
# And the ledger is built by applying the transactions to the parent
|
||||
# ledger.
|
||||
|
||||
@@ -140,6 +140,28 @@ function(setup_protocol_autogen)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Check pip index URL configuration
|
||||
execute_process(
|
||||
COMMAND ${VENV_PIP} config get global.index-url
|
||||
OUTPUT_VARIABLE PIP_INDEX_URL
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_QUIET
|
||||
)
|
||||
|
||||
# Default PyPI URL
|
||||
set(DEFAULT_PIP_INDEX "https://pypi.org/simple")
|
||||
|
||||
# Show warning if using non-default index
|
||||
if(PIP_INDEX_URL AND NOT PIP_INDEX_URL STREQUAL "")
|
||||
if(NOT PIP_INDEX_URL STREQUAL DEFAULT_PIP_INDEX)
|
||||
message(
|
||||
WARNING
|
||||
"Private pip index URL detected: ${PIP_INDEX_URL}\n"
|
||||
"You may need to connect to VPN to access this URL."
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "Installing Python dependencies...")
|
||||
execute_process(
|
||||
COMMAND ${VENV_PIP} install --upgrade pip
|
||||
|
||||
31
conan.lock
31
conan.lock
@@ -1,16 +1,16 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#cac0f6daea041b0ccf42934163defb20%1765284699.337",
|
||||
"zlib/1.3.1#cac0f6daea041b0ccf42934163defb20%1774439233.809",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
|
||||
"sqlite3/3.51.0#66aa11eabd0e34954c5c1c061ad44abe%1763899256.358",
|
||||
"soci/4.0.3#fe32b9ad5eb47e79ab9e45a68f363945%1774450067.231",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1765850147.878",
|
||||
"secp256k1/0.7.1#481881709eb0bdd0185a12b912bbe8ad%1770910500.329",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1765850186.86",
|
||||
"re2/20251105#8579cfd0bda4daf0683f9e3898f964b4%1772560729.95",
|
||||
"protobuf/6.32.1#b54f00da2e0f61d821330b5b638b0f80%1768401317.762",
|
||||
"openssl/3.5.5#e6399de266349245a4542fc5f6c71552%1774367199.56",
|
||||
"re2/20251105#8579cfd0bda4daf0683f9e3898f964b4%1774398111.888",
|
||||
"protobuf/6.33.5#d96d52ba5baaaa532f47bda866ad87a5%1773224203.27",
|
||||
"openssl/3.6.1#e6399de266349245a4542fc5f6c71552%1774458290.139",
|
||||
"nudb/2.0.9#0432758a24204da08fee953ec9ea03cb%1769436073.32",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492",
|
||||
@@ -18,24 +18,23 @@
|
||||
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1768312129.152",
|
||||
"grpc/1.72.0#aaade9421980b2d926dbfb613d56c38a%1774376249.106",
|
||||
"grpc/1.78.1#b1a9e74b145cc471bed4dc64dc6eb2c1%1772623605.068",
|
||||
"ed25519/2015.03#ae761bdc52730a843f0809bdf6c1b1f6%1765850143.772",
|
||||
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772",
|
||||
"c-ares/1.34.6#545240bb1c40e2cacd4362d6b8967650%1766500685.317",
|
||||
"c-ares/1.34.6#545240bb1c40e2cacd4362d6b8967650%1774439234.681",
|
||||
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837",
|
||||
"boost/1.90.0#d5e8defe7355494953be18524a7f135b%1769454080.269",
|
||||
"abseil/20250127.0#bb0baf1f362bc4a725a24eddd419b8f7%1774365460.196"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#cac0f6daea041b0ccf42934163defb20%1765284699.337",
|
||||
"strawberryperl/5.32.1.1#8d114504d172cfea8ea1662d09b6333e%1751971032.423",
|
||||
"protobuf/6.32.1#b54f00da2e0f61d821330b5b638b0f80%1768401317.762",
|
||||
"zlib/1.3.1#cac0f6daea041b0ccf42934163defb20%1774439233.809",
|
||||
"strawberryperl/5.32.1.1#8d114504d172cfea8ea1662d09b6333e%1774447376.964",
|
||||
"protobuf/6.33.5#d96d52ba5baaaa532f47bda866ad87a5%1773224203.27",
|
||||
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1765850144.707",
|
||||
"msys2/cci.latest#d22fe7b2808f5fd34d0a7923ace9c54f%1770657326.649",
|
||||
"m4/1.4.19#5d7a4994e5875d76faf7acf3ed056036%1774365463.87",
|
||||
"cmake/4.3.0#b939a42e98f593fb34d3a8c5cc860359%1773780142.26",
|
||||
"cmake/3.31.11#f325c933f618a1fcebc1e1c0babfd1ba%1769622857.944",
|
||||
"b2/5.4.2#ffd6084a119587e70f11cd45d1a386e2%1766594659.866",
|
||||
"cmake/4.3.0#b939a42e98f593fb34d3a8c5cc860359%1774439249.183",
|
||||
"b2/5.4.2#ffd6084a119587e70f11cd45d1a386e2%1774439233.447",
|
||||
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
|
||||
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86",
|
||||
"abseil/20250127.0#bb0baf1f362bc4a725a24eddd419b8f7%1774365460.196"
|
||||
@@ -47,13 +46,13 @@
|
||||
"boost/1.90.0"
|
||||
],
|
||||
"protobuf/[>=5.27.0 <7]": [
|
||||
"protobuf/6.32.1"
|
||||
"protobuf/6.33.5"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"sqlite3/[>=3.44 <4]": [
|
||||
"sqlite3/3.49.1"
|
||||
"sqlite3/3.51.0"
|
||||
],
|
||||
"boost/1.83.0": [
|
||||
"boost/1.90.0"
|
||||
|
||||
21
conanfile.py
21
conanfile.py
@@ -4,7 +4,6 @@ import re
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
from conan import ConanFile
|
||||
from conan import __version__ as conan_version
|
||||
|
||||
|
||||
class Xrpl(ConanFile):
|
||||
@@ -30,10 +29,10 @@ class Xrpl(ConanFile):
|
||||
|
||||
requires = [
|
||||
"ed25519/2015.03",
|
||||
"grpc/1.72.0",
|
||||
"grpc/1.78.1",
|
||||
"libarchive/3.8.1",
|
||||
"nudb/2.0.9",
|
||||
"openssl/3.5.5",
|
||||
"openssl/3.6.1",
|
||||
"secp256k1/0.7.1",
|
||||
"soci/4.0.3",
|
||||
"zlib/1.3.1",
|
||||
@@ -44,7 +43,7 @@ class Xrpl(ConanFile):
|
||||
]
|
||||
|
||||
tool_requires = [
|
||||
"protobuf/6.32.1",
|
||||
"protobuf/6.33.5",
|
||||
]
|
||||
|
||||
default_options = {
|
||||
@@ -137,20 +136,16 @@ class Xrpl(ConanFile):
|
||||
self.default_options["fPIC"] = False
|
||||
|
||||
def requirements(self):
|
||||
# Conan 2 requires transitive headers to be specified
|
||||
transitive_headers_opt = (
|
||||
{"transitive_headers": True} if conan_version.split(".")[0] == "2" else {}
|
||||
)
|
||||
self.requires("boost/1.90.0", force=True, **transitive_headers_opt)
|
||||
self.requires("date/3.0.4", **transitive_headers_opt)
|
||||
self.requires("boost/1.90.0", force=True, transitive_headers=True)
|
||||
self.requires("date/3.0.4", transitive_headers=True)
|
||||
self.requires("lz4/1.10.0", force=True)
|
||||
self.requires("protobuf/6.32.1", force=True)
|
||||
self.requires("sqlite3/3.49.1", force=True)
|
||||
self.requires("protobuf/6.33.5", force=True)
|
||||
self.requires("sqlite3/3.51.0", force=True)
|
||||
if self.options.jemalloc:
|
||||
self.requires("jemalloc/5.3.0")
|
||||
if self.options.rocksdb:
|
||||
self.requires("rocksdb/10.5.1")
|
||||
self.requires("xxhash/0.8.3", **transitive_headers_opt)
|
||||
self.requires("xxhash/0.8.3", transitive_headers=True)
|
||||
|
||||
exports_sources = (
|
||||
"CMakeLists.txt",
|
||||
|
||||
@@ -70,7 +70,6 @@ words:
|
||||
- coeffs
|
||||
- coldwallet
|
||||
- compr
|
||||
- cppcoro
|
||||
- conanfile
|
||||
- conanrun
|
||||
- confs
|
||||
@@ -106,7 +105,6 @@ words:
|
||||
- fmtdur
|
||||
- fsanitize
|
||||
- funclets
|
||||
- gantt
|
||||
- gcov
|
||||
- gcovr
|
||||
- ghead
|
||||
@@ -150,7 +148,6 @@ words:
|
||||
- ltype
|
||||
- mcmodel
|
||||
- MEMORYSTATUSEX
|
||||
- Mankawde
|
||||
- Merkle
|
||||
- Metafuncton
|
||||
- misprediction
|
||||
@@ -199,7 +196,6 @@ words:
|
||||
- permissioned
|
||||
- pointee
|
||||
- populator
|
||||
- pratik
|
||||
- preauth
|
||||
- preauthorization
|
||||
- preauthorize
|
||||
@@ -208,7 +204,6 @@ words:
|
||||
- protobuf
|
||||
- protos
|
||||
- ptrs
|
||||
- Pratik
|
||||
- pushd
|
||||
- pyenv
|
||||
- pyparsing
|
||||
@@ -216,8 +211,6 @@ words:
|
||||
- queuable
|
||||
- Raphson
|
||||
- replayer
|
||||
- repost
|
||||
- reposts
|
||||
- rerere
|
||||
- retriable
|
||||
- RIPD
|
||||
@@ -249,7 +242,6 @@ words:
|
||||
- soci
|
||||
- socidb
|
||||
- sslws
|
||||
- stackful
|
||||
- statsd
|
||||
- STATSDCOLLECTOR
|
||||
- stissue
|
||||
|
||||
@@ -558,7 +558,7 @@ network delay. A test case specifies:
|
||||
1. a UNL with different number of validators for different test cases,
|
||||
1. a network with zero or more non-validator nodes,
|
||||
1. a sequence of validator reliability change events (by killing/restarting
|
||||
nodes, or by running modified rippled that does not send all validation
|
||||
nodes, or by running modified xrpld that does not send all validation
|
||||
messages),
|
||||
1. the correct outcomes.
|
||||
|
||||
@@ -566,7 +566,7 @@ For all the test cases, the correct outcomes are verified by examining logs. We
|
||||
will grep the log to see if the correct negative UNLs are generated, and whether
|
||||
or not the network is making progress when it should be. The ripdtop tool will
|
||||
be helpful for monitoring validators' states and ledger progress. Some of the
|
||||
timing parameters of rippled will be changed to have faster ledger time. Most if
|
||||
timing parameters of xrpld will be changed to have faster ledger time. Most if
|
||||
not all test cases do not need client transactions.
|
||||
|
||||
For example, the test cases for the prototype:
|
||||
@@ -583,7 +583,7 @@ For example, the test cases for the prototype:
|
||||
|
||||
We considered testing with the current unit test framework, specifically the
|
||||
[Consensus Simulation
|
||||
Framework](https://github.com/ripple/rippled/blob/develop/src/test/csf/README.md)
|
||||
Framework](https://github.com/XRPLF/rippled/blob/develop/src/test/csf/README.md)
|
||||
(CSF). However, the CSF currently can only test the generic consensus algorithm
|
||||
as in the paper: [Analysis of the XRP Ledger Consensus
|
||||
Protocol](https://arxiv.org/abs/1802.07242).
|
||||
|
||||
@@ -4,7 +4,7 @@ skinparam sequenceArrowThickness 2
|
||||
skinparam roundcorner 20
|
||||
skinparam maxmessagesize 160
|
||||
|
||||
actor "Rippled Start" as RS
|
||||
actor "Xrpld Start" as RS
|
||||
participant "Timer" as T
|
||||
participant "NetworkOPs" as NOP
|
||||
participant "ValidatorList" as VL #lightgreen
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# `rippled` Docker Image
|
||||
# `xrpld` Docker Image
|
||||
|
||||
- Some info relating to Docker containers can be found here: [../Builds/containers](../Builds/containers)
|
||||
- Images for building and testing rippled can be found here: [thejohnfreeman/rippled-docker](https://github.com/thejohnfreeman/rippled-docker/)
|
||||
- These images do not have rippled. They have all the tools necessary to build rippled.
|
||||
- Images for building and testing xrpld can be found here: [thejohnfreeman/rippled-docker](https://github.com/thejohnfreeman/rippled-docker/)
|
||||
- These images do not have xrpld. They have all the tools necessary to build xrpld.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Project related configuration options
|
||||
#---------------------------------------------------------------------------
|
||||
DOXYFILE_ENCODING = UTF-8
|
||||
PROJECT_NAME = "rippled"
|
||||
PROJECT_NAME = "xrpld"
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_BRIEF =
|
||||
PROJECT_LOGO =
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Heap profiling of rippled with jemalloc
|
||||
## Heap profiling of xrpld with jemalloc
|
||||
|
||||
The jemalloc library provides a good API for doing heap analysis,
|
||||
including a mechanism to dump a description of the heap from within the
|
||||
@@ -7,26 +7,26 @@ activity in general, as well as how to acquire the software, are available on
|
||||
the jemalloc site:
|
||||
[https://github.com/jemalloc/jemalloc/wiki/Use-Case:-Heap-Profiling](https://github.com/jemalloc/jemalloc/wiki/Use-Case:-Heap-Profiling)
|
||||
|
||||
jemalloc is acquired separately from rippled, and is not affiliated
|
||||
jemalloc is acquired separately from xrpld, and is not affiliated
|
||||
with Ripple Labs. If you compile and install jemalloc from the
|
||||
source release with default options, it will install the library and header
|
||||
under `/usr/local/lib` and `/usr/local/include`, respectively. Heap
|
||||
profiling has been tested with rippled on a Linux platform. It should
|
||||
work on platforms on which both rippled and jemalloc are available.
|
||||
profiling has been tested with xrpld on a Linux platform. It should
|
||||
work on platforms on which both xrpld and jemalloc are available.
|
||||
|
||||
To link rippled with jemalloc, the argument
|
||||
To link xrpld with jemalloc, the argument
|
||||
`profile-jemalloc=<jemalloc_dir>` is provided after the optional target.
|
||||
The `<jemalloc_dir>` argument should be the same as that of the
|
||||
`--prefix` parameter passed to the jemalloc configure script when building.
|
||||
|
||||
## Examples:
|
||||
|
||||
Build rippled with jemalloc library under /usr/local/lib and
|
||||
Build xrpld with jemalloc library under /usr/local/lib and
|
||||
header under /usr/local/include:
|
||||
|
||||
$ scons profile-jemalloc=/usr/local
|
||||
|
||||
Build rippled using clang with the jemalloc library under /opt/local/lib
|
||||
Build xrpld using clang with the jemalloc library under /opt/local/lib
|
||||
and header under /opt/local/include:
|
||||
|
||||
$ scons clang profile-jemalloc=/opt/local
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
# Building documentation
|
||||
|
||||
## Dependencies
|
||||
|
||||
Install these dependencies:
|
||||
|
||||
- [Doxygen](http://www.doxygen.nl): All major platforms have [official binary
|
||||
distributions](http://www.doxygen.nl/download.html#srcbin), or you can
|
||||
build from [source](http://www.doxygen.nl/download.html#srcbin).
|
||||
- MacOS: We recommend installing via Homebrew: `brew install doxygen`.
|
||||
The executable will be installed in `/usr/local/bin` which is already
|
||||
in the default `PATH`.
|
||||
|
||||
If you use the official binary distribution, then you'll need to make
|
||||
Doxygen available to your command line. You can do this by adding
|
||||
a symbolic link from `/usr/local/bin` to the `doxygen` executable. For
|
||||
example,
|
||||
|
||||
```
|
||||
$ ln -s /Applications/Doxygen.app/Contents/Resources/doxygen /usr/local/bin/doxygen
|
||||
```
|
||||
|
||||
- [PlantUML](http://plantuml.com):
|
||||
1. Install a functioning Java runtime, if you don't already have one.
|
||||
2. Download [`plantuml.jar`](http://sourceforge.net/projects/plantuml/files/plantuml.jar/download).
|
||||
|
||||
- [Graphviz](https://www.graphviz.org):
|
||||
- Linux: Install from your package manager.
|
||||
- Windows: Use an [official installer](https://graphviz.gitlab.io/_pages/Download/Download_windows.html).
|
||||
- MacOS: Install via Homebrew: `brew install graphviz`.
|
||||
|
||||
## Docker
|
||||
|
||||
Instead of installing the above dependencies locally, you can use the official
|
||||
build environment Docker image, which has all of them installed already.
|
||||
|
||||
1. Install [Docker](https://docs.docker.com/engine/installation/)
|
||||
2. Pull the image:
|
||||
|
||||
```
|
||||
sudo docker pull rippleci/rippled-ci-builder:2944b78d22db
|
||||
```
|
||||
|
||||
3. Run the image from the project folder:
|
||||
|
||||
```
|
||||
sudo docker run -v $PWD:/opt/rippled --rm rippleci/rippled-ci-builder:2944b78d22db
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
There is a `docs` target in the CMake configuration.
|
||||
|
||||
```
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -Donly_docs=ON ..
|
||||
cmake --build . --target docs --parallel
|
||||
```
|
||||
|
||||
The output will be in `build/docs/html`.
|
||||
8
docs/build/depend.md
vendored
8
docs/build/depend.md
vendored
@@ -20,7 +20,7 @@ CMakeToolchain
|
||||
|
||||
```
|
||||
# If you want to depend on a version of libxrpl that is not in ConanCenter,
|
||||
# then you can export the recipe from the rippled project.
|
||||
# then you can export the recipe from the xrpld project.
|
||||
conan export <path>
|
||||
```
|
||||
|
||||
@@ -49,9 +49,9 @@ cmake --build . --parallel
|
||||
|
||||
## CMake subdirectory
|
||||
|
||||
The second method adds the [rippled][] project as a CMake
|
||||
The second method adds the [xrpld][] project as a CMake
|
||||
[subdirectory][add_subdirectory].
|
||||
This method works well when you keep the rippled project as a Git
|
||||
This method works well when you keep the xrpld project as a Git
|
||||
[submodule][].
|
||||
It's good for when you want to make changes to libxrpl as part of your own
|
||||
project.
|
||||
@@ -90,6 +90,6 @@ cmake --build . --parallel
|
||||
|
||||
[add_subdirectory]: https://cmake.org/cmake/help/latest/command/add_subdirectory.html
|
||||
[submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules
|
||||
[rippled]: https://github.com/ripple/rippled
|
||||
[xrpld]: https://github.com/XRPLF/rippled
|
||||
[Conan]: https://docs.conan.io/
|
||||
[CMake]: https://cmake.org/cmake/help/latest/
|
||||
|
||||
2
docs/build/environment.md
vendored
2
docs/build/environment.md
vendored
@@ -55,7 +55,7 @@ clang --version
|
||||
### Install Xcode Specific Version (Optional)
|
||||
|
||||
If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang.
|
||||
This will likely cause issues building rippled. You may want to install a specific version of Xcode:
|
||||
This will likely cause issues building xrpld. You may want to install a specific version of Xcode:
|
||||
|
||||
1. **Download Xcode**
|
||||
- Visit [Apple Developer Downloads](https://developer.apple.com/download/more/)
|
||||
|
||||
54
docs/build/install.md
vendored
54
docs/build/install.md
vendored
@@ -1,4 +1,4 @@
|
||||
This document contains instructions for installing rippled.
|
||||
This document contains instructions for installing xrpld.
|
||||
The APT package manager is common on Debian-based Linux distributions like
|
||||
Ubuntu,
|
||||
while the YUM package manager is common on Red Hat-based Linux distributions
|
||||
@@ -8,7 +8,7 @@ and the only supported option for installing custom builds.
|
||||
|
||||
## From source
|
||||
|
||||
From a source build, you can install rippled and libxrpl using CMake's
|
||||
From a source build, you can install xrpld and libxrpl using CMake's
|
||||
`--install` mode:
|
||||
|
||||
```
|
||||
@@ -16,7 +16,7 @@ cmake --install . --prefix /opt/local
|
||||
```
|
||||
|
||||
The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
`C:/Program Files/rippled` on Windows.
|
||||
`C:/Program Files/xrpld` on Windows.
|
||||
|
||||
[1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html
|
||||
|
||||
@@ -52,7 +52,7 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
|
||||
5. Add the appropriate Ripple repository for your operating system version:
|
||||
|
||||
echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \
|
||||
echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/xrpld-deb focal stable" | \
|
||||
sudo tee -a /etc/apt/sources.list.d/ripple.list
|
||||
|
||||
The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following:
|
||||
@@ -61,9 +61,9 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
- `bullseye` for **Debian 11 Bullseye**
|
||||
- `buster` for **Debian 10 Buster**
|
||||
|
||||
If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`:
|
||||
- `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release))
|
||||
- `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop))
|
||||
If you want access to development or pre-release versions of `xrpld`, use one of the following instead of `stable`:
|
||||
- `unstable` - Pre-release builds ([`release` branch](https://github.com/XRPLF/xrpld/tree/release))
|
||||
- `nightly` - Experimental/development builds ([`develop` branch](https://github.com/XRPLF/xrpld/tree/develop))
|
||||
|
||||
**Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers.
|
||||
|
||||
@@ -71,23 +71,23 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
|
||||
sudo apt -y update
|
||||
|
||||
7. Install the `rippled` software package:
|
||||
7. Install the `xrpld` software package:
|
||||
|
||||
sudo apt -y install rippled
|
||||
sudo apt -y install xrpld
|
||||
|
||||
8. Check the status of the `rippled` service:
|
||||
8. Check the status of the `xrpld` service:
|
||||
|
||||
systemctl status rippled.service
|
||||
systemctl status xrpld.service
|
||||
|
||||
The `rippled` service should start automatically. If not, you can start it manually:
|
||||
The `xrpld` service should start automatically. If not, you can start it manually:
|
||||
|
||||
sudo systemctl start rippled.service
|
||||
sudo systemctl start xrpld.service
|
||||
|
||||
9. Optional: allow `rippled` to bind to privileged ports.
|
||||
9. Optional: allow `xrpld` to bind to privileged ports.
|
||||
|
||||
This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.)
|
||||
|
||||
sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled
|
||||
sudo setcap 'cap_net_bind_service=+ep' /etc/opt/xrpld/bin/xrpld
|
||||
|
||||
## With the YUM package manager
|
||||
|
||||
@@ -106,8 +106,8 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/stable/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key
|
||||
baseurl=https://repos.ripple.com/repos/xrpld-rpm/stable/
|
||||
gpgkey=https://repos.ripple.com/repos/xrpld-rpm/stable/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
_Unstable_
|
||||
@@ -118,8 +118,8 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/unstable/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key
|
||||
baseurl=https://repos.ripple.com/repos/xrpld-rpm/unstable/
|
||||
gpgkey=https://repos.ripple.com/repos/xrpld-rpm/unstable/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
_Nightly_
|
||||
@@ -130,22 +130,22 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/nightly/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key
|
||||
baseurl=https://repos.ripple.com/repos/xrpld-rpm/nightly/
|
||||
gpgkey=https://repos.ripple.com/repos/xrpld-rpm/nightly/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
2. Fetch the latest repo updates:
|
||||
|
||||
sudo yum -y update
|
||||
|
||||
3. Install the new `rippled` package:
|
||||
3. Install the new `xrpld` package:
|
||||
|
||||
sudo yum install -y rippled
|
||||
sudo yum install -y xrpld
|
||||
|
||||
4. Configure the `rippled` service to start on boot:
|
||||
4. Configure the `xrpld` service to start on boot:
|
||||
|
||||
sudo systemctl enable rippled.service
|
||||
sudo systemctl enable xrpld.service
|
||||
|
||||
5. Start the `rippled` service:
|
||||
5. Start the `xrpld` service:
|
||||
|
||||
sudo systemctl start rippled.service
|
||||
sudo systemctl start xrpld.service
|
||||
|
||||
6
docs/build/sanitizers.md
vendored
6
docs/build/sanitizers.md
vendored
@@ -1,9 +1,9 @@
|
||||
# Sanitizer Configuration for Rippled
|
||||
# Sanitizer Configuration for Xrpld
|
||||
|
||||
This document explains how to properly configure and run sanitizers (AddressSanitizer, undefinedbehaviorSanitizer, ThreadSanitizer) with the xrpld project.
|
||||
Corresponding suppression files are located in the `sanitizers/suppressions` directory.
|
||||
|
||||
- [Sanitizer Configuration for Rippled](#sanitizer-configuration-for-rippled)
|
||||
- [Sanitizer Configuration for Xrpld](#sanitizer-configuration-for-xrpld)
|
||||
- [Building with Sanitizers](#building-with-sanitizers)
|
||||
- [Summary](#summary)
|
||||
- [Build steps:](#build-steps)
|
||||
@@ -100,7 +100,7 @@ export LSAN_OPTIONS="include=sanitizers/suppressions/runtime-lsan-options.txt:su
|
||||
|
||||
- Boost intrusive containers (used in `aged_unordered_container`) trigger false positives
|
||||
- Boost context switching (used in `Workers.cpp`) confuses ASAN's stack tracking
|
||||
- Since we usually don't build Boost (because we don't want to instrument Boost and detect issues in Boost code) with ASAN but use Boost containers in ASAN instrumented rippled code, it generates false positives.
|
||||
- Since we usually don't build Boost (because we don't want to instrument Boost and detect issues in Boost code) with ASAN but use Boost containers in ASAN instrumented xrpld code, it generates false positives.
|
||||
- Building dependencies with ASAN instrumentation reduces false positives. But we don't want to instrument dependencies like Boost with ASAN because it is slow (to compile as well as run tests) and not necessary.
|
||||
- See: https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow
|
||||
- More such flags are detailed [here](https://github.com/google/sanitizers/wiki/AddressSanitizerFlags)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Consensus is the task of reaching agreement within a distributed system in the
|
||||
presence of faulty or even malicious participants. This document outlines the
|
||||
[XRP Ledger Consensus Algorithm](https://arxiv.org/abs/1802.07242)
|
||||
as implemented in [rippled](https://github.com/ripple/rippled), but
|
||||
as implemented in [xrpld](https://github.com/XRPLF/rippled), but
|
||||
focuses on its utility as a generic consensus algorithm independent of the
|
||||
detailed mechanics of the Ripple Consensus Ledger. Most notably, the algorithm
|
||||
does not require fully synchronous communication between all nodes in the
|
||||
@@ -54,9 +54,9 @@ and was abandoned.
|
||||
|
||||
The remainder of this section describes the Consensus and Validation algorithms
|
||||
in more detail and is meant as a companion guide to understanding the generic
|
||||
implementation in `rippled`. The document **does not** discuss correctness,
|
||||
implementation in `xrpld`. The document **does not** discuss correctness,
|
||||
fault-tolerance or liveness properties of the algorithms or the full details of
|
||||
how they integrate within `rippled` to support the Ripple Consensus Ledger.
|
||||
how they integrate within `xrpld` to support the Ripple Consensus Ledger.
|
||||
|
||||
## Consensus Overview
|
||||
|
||||
|
||||
2
external/README.md
vendored
2
external/README.md
vendored
@@ -1,6 +1,6 @@
|
||||
# External Conan recipes
|
||||
|
||||
The subdirectories in this directory contain external libraries used by rippled.
|
||||
The subdirectories in this directory contain external libraries used by xrpld.
|
||||
|
||||
| Folder | Upstream | Description |
|
||||
| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- |
|
||||
|
||||
4
external/antithesis-sdk/CMakeLists.txt
vendored
4
external/antithesis-sdk/CMakeLists.txt
vendored
@@ -1,11 +1,11 @@
|
||||
cmake_minimum_required(VERSION 3.18)
|
||||
|
||||
# Note, version set explicitly by rippled project
|
||||
# Note, version set explicitly by xrpld project
|
||||
project(antithesis-sdk-cpp VERSION 0.4.4 LANGUAGES CXX)
|
||||
|
||||
add_library(antithesis-sdk-cpp INTERFACE antithesis_sdk.h)
|
||||
|
||||
# Note, both sections below created by rippled project
|
||||
# Note, both sections below created by xrpld project
|
||||
target_include_directories(antithesis-sdk-cpp INTERFACE
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
|
||||
@@ -99,7 +99,7 @@ private:
|
||||
Derived classes have their instances counted automatically. This is used
|
||||
for reporting purposes.
|
||||
|
||||
@ingroup ripple_basics
|
||||
@ingroup basics
|
||||
*/
|
||||
template <class Object>
|
||||
class CountedObject
|
||||
|
||||
@@ -59,7 +59,7 @@ concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
|
||||
still retaining the reference counts. For example, for SHAMapInnerNodes the
|
||||
children may be reset in that function. Note that std::shared_pointer WILL
|
||||
run the destructor when the strong count reaches zero, but may not free the
|
||||
memory used by the object until the weak count reaches zero. In rippled, we
|
||||
memory used by the object until the weak count reaches zero. In xrpld, we
|
||||
typically allocate shared pointers with the `make_shared` function. When
|
||||
that is used, the memory is not reclaimed until the weak count reaches zero.
|
||||
*/
|
||||
|
||||
@@ -33,7 +33,7 @@ enum class ReleaseWeakRefAction { noop, destroy };
|
||||
/** Implement the strong count, weak count, and bit flags for an intrusive
|
||||
pointer.
|
||||
|
||||
A class can satisfy the requirements of a xrpl::IntrusivePointer by
|
||||
A class can satisfy the requirements of an xrpl::IntrusivePointer by
|
||||
inheriting from this class.
|
||||
*/
|
||||
struct IntrusiveRefCounts
|
||||
|
||||
@@ -4,7 +4,7 @@ Utility functions and classes.
|
||||
|
||||
ripple/basic should contain no dependencies on other modules.
|
||||
|
||||
# Choosing a rippled container.
|
||||
# Choosing an xrpld container.
|
||||
|
||||
- `std::vector`
|
||||
- For ordered containers with most insertions or erases at the end.
|
||||
|
||||
@@ -26,7 +26,7 @@ public:
|
||||
explicit UptimeClock() = default;
|
||||
|
||||
static time_point
|
||||
now(); // seconds since rippled program start
|
||||
now(); // seconds since xrpld program start
|
||||
|
||||
private:
|
||||
static std::atomic<rep> now_;
|
||||
|
||||
@@ -1,699 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <coroutine>
|
||||
#include <exception>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
template <typename T = void>
|
||||
class CoroTask;
|
||||
|
||||
/**
|
||||
* CoroTask<void> -- coroutine return type for void-returning coroutines.
|
||||
*
|
||||
* Class / Dependency Diagram
|
||||
* ==========================
|
||||
*
|
||||
* CoroTask<void>
|
||||
* +-----------------------------------------------+
|
||||
* | - handle_ : Handle (coroutine_handle<promise>) |
|
||||
* +-----------------------------------------------+
|
||||
* | + handle(), done() |
|
||||
* | + await_ready/suspend/resume (Awaiter iface) |
|
||||
* +-----------------------------------------------+
|
||||
* | owns
|
||||
* v
|
||||
* promise_type
|
||||
* +-----------------------------------------------+
|
||||
* | - exception_ : std::exception_ptr |
|
||||
* | - continuation_ : std::coroutine_handle<> |
|
||||
* +-----------------------------------------------+
|
||||
* | + get_return_object() -> CoroTask |
|
||||
* | + initial_suspend() -> suspend_always (lazy) |
|
||||
* | + final_suspend() -> FinalAwaiter |
|
||||
* | + return_void() |
|
||||
* | + unhandled_exception() |
|
||||
* +-----------------------------------------------+
|
||||
* | returns at final_suspend
|
||||
* v
|
||||
* FinalAwaiter
|
||||
* +-----------------------------------------------+
|
||||
* | await_suspend(h): |
|
||||
* | if continuation_ set -> symmetric transfer |
|
||||
* | else -> noop_coroutine |
|
||||
* +-----------------------------------------------+
|
||||
*
|
||||
* Design Notes
|
||||
* ------------
|
||||
* - Lazy start: initial_suspend returns suspend_always, so the coroutine
|
||||
* body does not execute until the handle is explicitly resumed.
|
||||
* - Symmetric transfer: await_suspend returns a coroutine_handle instead
|
||||
* of void/bool, allowing the scheduler to jump directly to the next
|
||||
* coroutine without growing the call stack.
|
||||
* - Continuation chaining: when one CoroTask is co_await-ed inside
|
||||
* another, the caller's handle is stored as continuation_ so
|
||||
* FinalAwaiter can resume it when this task finishes.
|
||||
* - Move-only: the handle is exclusively owned; copy is deleted.
|
||||
*
|
||||
* Usage Examples
|
||||
* ==============
|
||||
*
|
||||
* 1. Basic void coroutine (the most common case in rippled):
|
||||
*
|
||||
* CoroTask<void> doWork(std::shared_ptr<CoroTaskRunner> runner) {
|
||||
* // do something
|
||||
* co_await runner->suspend(); // yield control
|
||||
* // resumed later via runner->post() or runner->resume()
|
||||
* co_return;
|
||||
* }
|
||||
*
|
||||
* 2. co_await-ing one CoroTask<void> from another (chaining):
|
||||
*
|
||||
* CoroTask<void> inner() {
|
||||
* // ...
|
||||
* co_return;
|
||||
* }
|
||||
* CoroTask<void> outer() {
|
||||
* co_await inner(); // continuation_ links outer -> inner
|
||||
* co_return; // FinalAwaiter resumes outer
|
||||
* }
|
||||
*
|
||||
* 3. Exceptions propagate through co_await:
|
||||
*
|
||||
* CoroTask<void> failing() {
|
||||
* throw std::runtime_error("oops");
|
||||
* co_return;
|
||||
* }
|
||||
* CoroTask<void> caller() {
|
||||
* try { co_await failing(); }
|
||||
* catch (std::runtime_error const&) { // caught here }
|
||||
* }
|
||||
*
|
||||
* Caveats / Pitfalls
|
||||
* ==================
|
||||
*
|
||||
* BUG-RISK: Dangling references in coroutine parameters.
|
||||
* Coroutine parameters are copied into the frame, but references
|
||||
* are NOT -- they are stored as-is. If the referent goes out of scope
|
||||
* before the coroutine finishes, you get use-after-free.
|
||||
*
|
||||
* // BROKEN -- local dies before coroutine runs:
|
||||
* CoroTask<void> bad(int& ref) { co_return; }
|
||||
* void launch() {
|
||||
* int local = 42;
|
||||
* auto task = bad(local); // frame stores &local
|
||||
* } // local destroyed; frame holds dangling ref
|
||||
*
|
||||
* // FIX -- pass by value, or ensure lifetime via shared_ptr.
|
||||
*
|
||||
* BUG-RISK: GCC 14 corrupts reference captures in coroutine lambdas.
|
||||
* When a lambda that returns CoroTask captures by reference ([&]),
|
||||
* GCC 14 may generate a corrupted coroutine frame. Always capture
|
||||
* by explicit pointer-to-value instead:
|
||||
*
|
||||
* // BROKEN on GCC 14:
|
||||
* jq.postCoroTask(t, n, [&](auto) -> CoroTask<void> { ... });
|
||||
*
|
||||
* // FIX -- capture pointers explicitly:
|
||||
* jq.postCoroTask(t, n, [ptr = &val](auto) -> CoroTask<void> { ... });
|
||||
*
|
||||
* BUG-RISK: Resuming a destroyed or completed CoroTask.
|
||||
* Calling handle().resume() after the coroutine has already run to
|
||||
* completion (done() == true) is undefined behavior. The CoroTaskRunner
|
||||
* guards against this with an XRPL_ASSERT, but standalone usage of
|
||||
* CoroTask must check done() before resuming.
|
||||
*
|
||||
* BUG-RISK: Moving a CoroTask that is being awaited.
|
||||
* If task A is co_await-ed by task B (so A.continuation_ == B), moving
|
||||
* or destroying A will invalidate the continuation link. Never move
|
||||
* or reassign a CoroTask while it is mid-execution or being awaited.
|
||||
*
|
||||
* LIMITATION: CoroTask is fire-and-forget for the top-level owner.
|
||||
* There is no built-in notification when the coroutine finishes.
|
||||
* The caller must use external synchronization (e.g. CoroTaskRunner::join
|
||||
* or a gate/condition_variable) to know when it is done.
|
||||
*
|
||||
* LIMITATION: No cancellation token.
|
||||
* There is no way to cancel a suspended CoroTask from outside. The
|
||||
* coroutine body must cooperatively check a flag (e.g. jq_.isStopping())
|
||||
* after each co_await and co_return early if needed.
|
||||
*
|
||||
* LIMITATION: Stackless -- cannot suspend from nested non-coroutine calls.
|
||||
* If a coroutine calls a regular function that wants to "yield", it
|
||||
* cannot. Only the immediate coroutine body can use co_await.
|
||||
* This is acceptable for rippled because all yield() sites are shallow.
|
||||
*/
|
||||
template <>
|
||||
class CoroTask<void>
|
||||
{
|
||||
public:
|
||||
struct promise_type;
|
||||
using Handle = std::coroutine_handle<promise_type>;
|
||||
|
||||
/**
|
||||
* Coroutine promise. Compiler uses this to manage coroutine state.
|
||||
* Stores the exception (if any) and the continuation handle for
|
||||
* symmetric transfer back to the awaiting coroutine.
|
||||
*/
|
||||
struct promise_type
|
||||
{
|
||||
// Captured exception from the coroutine body, rethrown in
|
||||
// await_resume() when this task is co_await-ed by a caller.
|
||||
std::exception_ptr exception_;
|
||||
|
||||
// Handle to the coroutine that is co_await-ing this task.
|
||||
// Set by await_suspend(). FinalAwaiter uses it for symmetric
|
||||
// transfer back to the caller. Null if this is a top-level task.
|
||||
std::coroutine_handle<> continuation_;
|
||||
|
||||
/**
|
||||
* Create the CoroTask return object.
|
||||
* Called by the compiler at coroutine creation.
|
||||
*/
|
||||
CoroTask
|
||||
get_return_object()
|
||||
{
|
||||
return CoroTask{Handle::from_promise(*this)};
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy start. The coroutine body does not execute until the
|
||||
* handle is explicitly resumed (e.g. by CoroTaskRunner::resume).
|
||||
*/
|
||||
std::suspend_always
|
||||
initial_suspend() noexcept
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Awaiter returned by final_suspend(). Uses symmetric transfer:
|
||||
* if a continuation exists, transfers control directly to it
|
||||
* (tail-call, no stack growth). Otherwise returns noop_coroutine
|
||||
* so the coroutine frame stays alive for the owner to destroy.
|
||||
*/
|
||||
struct FinalAwaiter
|
||||
{
|
||||
/**
|
||||
* Always false. We need await_suspend to run for
|
||||
* symmetric transfer.
|
||||
*/
|
||||
bool
|
||||
await_ready() noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Symmetric transfer: returns the continuation handle so
|
||||
* the compiler emits a tail-call instead of a nested resume.
|
||||
* If no continuation is set, returns noop_coroutine to
|
||||
* suspend at final_suspend without destroying the frame.
|
||||
*
|
||||
* @param h Handle to this completing coroutine
|
||||
*
|
||||
* @return Continuation handle, or noop_coroutine
|
||||
*/
|
||||
std::coroutine_handle<>
|
||||
await_suspend(Handle h) noexcept
|
||||
{
|
||||
if (auto cont = h.promise().continuation_)
|
||||
return cont;
|
||||
return std::noop_coroutine();
|
||||
}
|
||||
|
||||
void
|
||||
await_resume() noexcept
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns FinalAwaiter for symmetric transfer at coroutine end.
|
||||
*/
|
||||
FinalAwaiter
|
||||
final_suspend() noexcept
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by the compiler for `co_return;` (void coroutine).
|
||||
*/
|
||||
void
|
||||
return_void()
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by the compiler when an exception escapes the coroutine
|
||||
* body. Captures it for later rethrowing in await_resume().
|
||||
*/
|
||||
void
|
||||
unhandled_exception()
|
||||
{
|
||||
exception_ = std::current_exception();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Default constructor. Creates an empty (null handle) task.
|
||||
*/
|
||||
CoroTask() = default;
|
||||
|
||||
/**
|
||||
* Takes ownership of a compiler-generated coroutine handle.
|
||||
*
|
||||
* @param h Coroutine handle to own
|
||||
*/
|
||||
explicit CoroTask(Handle h) : handle_(h)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys the coroutine frame if this task owns one.
|
||||
*/
|
||||
~CoroTask()
|
||||
{
|
||||
if (handle_)
|
||||
handle_.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Move constructor. Transfers handle ownership, leaves other empty.
|
||||
*/
|
||||
CoroTask(CoroTask&& other) noexcept : handle_(std::exchange(other.handle_, {}))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Move assignment. Destroys current frame (if any), takes other's.
|
||||
*/
|
||||
CoroTask&
|
||||
operator=(CoroTask&& other) noexcept
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
if (handle_)
|
||||
handle_.destroy();
|
||||
handle_ = std::exchange(other.handle_, {});
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoroTask(CoroTask const&) = delete;
|
||||
CoroTask&
|
||||
operator=(CoroTask const&) = delete;
|
||||
|
||||
/**
|
||||
* @return The underlying coroutine_handle
|
||||
*/
|
||||
Handle
|
||||
handle() const
|
||||
{
|
||||
return handle_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the coroutine has run to completion (or thrown)
|
||||
*/
|
||||
bool
|
||||
done() const
|
||||
{
|
||||
return handle_ && handle_.done();
|
||||
}
|
||||
|
||||
// -- Awaiter interface: allows `co_await someCoroTask;` --
|
||||
|
||||
/**
|
||||
* Always false. This task is lazy, so co_await always suspends
|
||||
* the caller to set up the continuation link.
|
||||
*/
|
||||
bool
|
||||
await_ready() const noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores the caller's handle as our continuation, then returns
|
||||
* our handle for symmetric transfer (caller suspends, we resume).
|
||||
*
|
||||
* @param caller Handle of the coroutine doing co_await on us
|
||||
*
|
||||
* @return Our handle for symmetric transfer
|
||||
*/
|
||||
std::coroutine_handle<>
|
||||
await_suspend(std::coroutine_handle<> caller) noexcept
|
||||
{
|
||||
XRPL_ASSERT(handle_, "xrpl::CoroTask<void>::await_suspend : handle is valid");
|
||||
handle_.promise().continuation_ = caller;
|
||||
return handle_; // Symmetric transfer
|
||||
}
|
||||
|
||||
/**
|
||||
* Called in the awaiting coroutine's context after this task
|
||||
* completes. Rethrows any exception captured by
|
||||
* unhandled_exception().
|
||||
*/
|
||||
void
|
||||
await_resume()
|
||||
{
|
||||
XRPL_ASSERT(handle_, "xrpl::CoroTask<void>::await_resume : handle is valid");
|
||||
if (auto& ep = handle_.promise().exception_)
|
||||
std::rethrow_exception(ep);
|
||||
}
|
||||
|
||||
private:
|
||||
// Exclusively-owned coroutine handle. Null after move or default
|
||||
// construction. Destroyed in the destructor.
|
||||
Handle handle_;
|
||||
};
|
||||
|
||||
/**
|
||||
* CoroTask<T> -- coroutine return type for value-returning coroutines.
|
||||
*
|
||||
* Class / Dependency Diagram
|
||||
* ==========================
|
||||
*
|
||||
* CoroTask<T>
|
||||
* +-----------------------------------------------+
|
||||
* | - handle_ : Handle (coroutine_handle<promise>) |
|
||||
* +-----------------------------------------------+
|
||||
* | + handle(), done() |
|
||||
* | + await_ready/suspend/resume (Awaiter iface) |
|
||||
* +-----------------------------------------------+
|
||||
* | owns
|
||||
* v
|
||||
* promise_type
|
||||
* +-----------------------------------------------+
|
||||
* | - result_ : variant<monostate, T, |
|
||||
* | exception_ptr> |
|
||||
* | - continuation_ : std::coroutine_handle<> |
|
||||
* +-----------------------------------------------+
|
||||
* | + get_return_object() -> CoroTask |
|
||||
* | + initial_suspend() -> suspend_always (lazy) |
|
||||
* | + final_suspend() -> FinalAwaiter |
|
||||
* | + return_value(T) -> stores in result_[1] |
|
||||
* | + unhandled_exception -> stores in result_[2] |
|
||||
* +-----------------------------------------------+
|
||||
* | returns at final_suspend
|
||||
* v
|
||||
* FinalAwaiter (same symmetric-transfer pattern as CoroTask<void>)
|
||||
*
|
||||
* Value Extraction
|
||||
* ----------------
|
||||
* await_resume() inspects the variant:
|
||||
* - index 2 (exception_ptr) -> rethrow
|
||||
* - index 1 (T) -> return value via move
|
||||
*
|
||||
* Usage Examples
|
||||
* ==============
|
||||
*
|
||||
* 1. Simple value return:
|
||||
*
|
||||
* CoroTask<int> computeAnswer() { co_return 42; }
|
||||
*
|
||||
* CoroTask<void> caller() {
|
||||
* int v = co_await computeAnswer(); // v == 42
|
||||
* }
|
||||
*
|
||||
* 2. Chaining value-returning coroutines:
|
||||
*
|
||||
* CoroTask<int> add(int a, int b) { co_return a + b; }
|
||||
* CoroTask<int> doubleSum(int a, int b) {
|
||||
* int s = co_await add(a, b);
|
||||
* co_return s * 2;
|
||||
* }
|
||||
*
|
||||
* 3. Exception propagation from inner to outer:
|
||||
*
|
||||
* CoroTask<int> failing() {
|
||||
* throw std::runtime_error("bad");
|
||||
* co_return 0; // never reached
|
||||
* }
|
||||
* CoroTask<void> caller() {
|
||||
* try {
|
||||
* int v = co_await failing(); // throws here
|
||||
* } catch (std::runtime_error const& e) {
|
||||
* // e.what() == "bad"
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* Caveats / Pitfalls (in addition to CoroTask<void> caveats above)
|
||||
* ================================================================
|
||||
*
|
||||
* BUG-RISK: await_resume() moves the value out of the variant.
|
||||
* Calling co_await on the same CoroTask<T> instance twice is undefined
|
||||
* behavior -- the second call will see a moved-from T. CoroTask is
|
||||
* single-shot: one co_return, one co_await.
|
||||
*
|
||||
* BUG-RISK: T must be move-constructible.
|
||||
* return_value(T) takes by value and moves into the variant.
|
||||
* Types that are not movable cannot be used as T.
|
||||
*
|
||||
* LIMITATION: No co_yield support.
|
||||
* CoroTask<T> only supports a single co_return. It does not implement
|
||||
* yield_value(), so using co_yield inside a CoroTask<T> coroutine is a
|
||||
* compile error. For streaming values, a different return type
|
||||
* (e.g. Generator<T>) would be needed.
|
||||
*
|
||||
* LIMITATION: Result is only accessible via co_await.
|
||||
* There is no .get() or .result() method. The value can only be
|
||||
* extracted by co_await-ing the CoroTask<T> from inside another
|
||||
* coroutine. For extracting results in non-coroutine code, pass a
|
||||
* pointer to the caller and write through it (as the tests do).
|
||||
*/
|
||||
template <typename T>
|
||||
class CoroTask
|
||||
{
|
||||
static_assert(
|
||||
std::is_move_constructible_v<T>,
|
||||
"CoroTask<T> requires T to be move-constructible");
|
||||
|
||||
public:
|
||||
struct promise_type;
|
||||
using Handle = std::coroutine_handle<promise_type>;
|
||||
|
||||
/**
|
||||
* Coroutine promise for value-returning coroutines.
|
||||
* Stores the result as a variant: monostate (not yet set),
|
||||
* T (co_return value), or exception_ptr (unhandled exception).
|
||||
*/
|
||||
struct promise_type
|
||||
{
|
||||
// Tri-state result:
|
||||
// index 0 (monostate) -- coroutine has not yet completed
|
||||
// index 1 (T) -- co_return value stored here
|
||||
// index 2 (exception) -- unhandled exception captured here
|
||||
std::variant<std::monostate, T, std::exception_ptr> result_;
|
||||
|
||||
// Handle to the coroutine co_await-ing this task. Used by
|
||||
// FinalAwaiter for symmetric transfer. Null for top-level tasks.
|
||||
std::coroutine_handle<> continuation_;
|
||||
|
||||
/**
|
||||
* Create the CoroTask return object.
|
||||
* Called by the compiler at coroutine creation.
|
||||
*/
|
||||
CoroTask
|
||||
get_return_object()
|
||||
{
|
||||
return CoroTask{Handle::from_promise(*this)};
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy start. Coroutine body does not run until explicitly resumed.
|
||||
*/
|
||||
std::suspend_always
|
||||
initial_suspend() noexcept
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Symmetric-transfer awaiter at coroutine completion.
|
||||
* Same pattern as CoroTask<void>::FinalAwaiter.
|
||||
*/
|
||||
struct FinalAwaiter
|
||||
{
|
||||
bool
|
||||
await_ready() noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns continuation for symmetric transfer, or
|
||||
* noop_coroutine if this is a top-level task.
|
||||
*
|
||||
* @param h Handle to this completing coroutine
|
||||
*
|
||||
* @return Continuation handle, or noop_coroutine
|
||||
*/
|
||||
std::coroutine_handle<>
|
||||
await_suspend(Handle h) noexcept
|
||||
{
|
||||
if (auto cont = h.promise().continuation_)
|
||||
return cont;
|
||||
return std::noop_coroutine();
|
||||
}
|
||||
|
||||
void
|
||||
await_resume() noexcept
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
FinalAwaiter
|
||||
final_suspend() noexcept
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by the compiler for `co_return value;`.
|
||||
* Moves the value into result_ at index 1.
|
||||
*
|
||||
* @param value The value to store
|
||||
*/
|
||||
void
|
||||
return_value(T value)
|
||||
{
|
||||
result_.template emplace<1>(std::move(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Captures unhandled exceptions at index 2 of result_.
|
||||
* Rethrown later in await_resume().
|
||||
*/
|
||||
void
|
||||
unhandled_exception()
|
||||
{
|
||||
result_.template emplace<2>(std::current_exception());
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Default constructor. Creates an empty (null handle) task.
|
||||
*/
|
||||
CoroTask() = default;
|
||||
|
||||
/**
|
||||
* Takes ownership of a compiler-generated coroutine handle.
|
||||
*
|
||||
* @param h Coroutine handle to own
|
||||
*/
|
||||
explicit CoroTask(Handle h) : handle_(h)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys the coroutine frame if this task owns one.
|
||||
*/
|
||||
~CoroTask()
|
||||
{
|
||||
if (handle_)
|
||||
handle_.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Move constructor. Transfers handle ownership, leaves other empty.
|
||||
*/
|
||||
CoroTask(CoroTask&& other) noexcept : handle_(std::exchange(other.handle_, {}))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Move assignment. Destroys current frame (if any), takes other's.
|
||||
*/
|
||||
CoroTask&
|
||||
operator=(CoroTask&& other) noexcept
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
if (handle_)
|
||||
handle_.destroy();
|
||||
handle_ = std::exchange(other.handle_, {});
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
CoroTask(CoroTask const&) = delete;
|
||||
CoroTask&
|
||||
operator=(CoroTask const&) = delete;
|
||||
|
||||
/**
|
||||
* @return The underlying coroutine_handle
|
||||
*/
|
||||
Handle
|
||||
handle() const
|
||||
{
|
||||
return handle_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the coroutine has run to completion (or thrown)
|
||||
*/
|
||||
bool
|
||||
done() const
|
||||
{
|
||||
return handle_ && handle_.done();
|
||||
}
|
||||
|
||||
// -- Awaiter interface: allows `T val = co_await someCoroTask;` --
|
||||
|
||||
/**
|
||||
* Always false. co_await always suspends to set up continuation.
|
||||
*/
|
||||
bool
|
||||
await_ready() const noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores caller as continuation, returns our handle for
|
||||
* symmetric transfer.
|
||||
*
|
||||
* @param caller Handle of the coroutine doing co_await on us
|
||||
*
|
||||
* @return Our handle for symmetric transfer
|
||||
*/
|
||||
std::coroutine_handle<>
|
||||
await_suspend(std::coroutine_handle<> caller) noexcept
|
||||
{
|
||||
XRPL_ASSERT(handle_, "xrpl::CoroTask<T>::await_suspend : handle is valid");
|
||||
handle_.promise().continuation_ = caller;
|
||||
return handle_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the result: rethrows if exception, otherwise moves
|
||||
* the T value out of the variant. Single-shot: calling twice
|
||||
* on the same task is undefined (moved-from T).
|
||||
*
|
||||
* @return The co_return-ed value
|
||||
*/
|
||||
T
|
||||
await_resume()
|
||||
{
|
||||
XRPL_ASSERT(handle_, "xrpl::CoroTask<T>::await_resume : handle is valid");
|
||||
auto& result = handle_.promise().result_;
|
||||
if (auto* ep = std::get_if<2>(&result))
|
||||
std::rethrow_exception(*ep);
|
||||
return std::get<1>(std::move(result));
|
||||
}
|
||||
|
||||
private:
|
||||
// Exclusively-owned coroutine handle. Null after move or default
|
||||
// construction. Destroyed in the destructor.
|
||||
Handle handle_;
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -1,374 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @file CoroTaskRunner.ipp
|
||||
*
|
||||
* CoroTaskRunner inline implementation.
|
||||
*
|
||||
* This file contains the business logic for managing C++20 coroutines
|
||||
* on the JobQueue. It is included at the bottom of JobQueue.h.
|
||||
*
|
||||
* Data Flow: suspend / post / resume cycle
|
||||
* =========================================
|
||||
*
|
||||
* coroutine body CoroTaskRunner JobQueue
|
||||
* -------------- -------------- --------
|
||||
* |
|
||||
* co_await runner->suspend()
|
||||
* |
|
||||
* +--- await_suspend ------> onSuspend()
|
||||
* | ++nSuspend_ ------------> nSuspend_
|
||||
* | [coroutine is now suspended]
|
||||
* |
|
||||
* . (externally or by yieldAndPost())
|
||||
* .
|
||||
* +--- (caller calls) -----> post()
|
||||
* | ++runCount_
|
||||
* | addJob(resume) ----------> job enqueued
|
||||
* | |
|
||||
* | [worker picks up]
|
||||
* | |
|
||||
* +--- <----- resume() <-----------------------------------+
|
||||
* | --nSuspend_ ------> nSuspend_
|
||||
* | swap in LocalValues (lvs_)
|
||||
* | task_.handle().resume()
|
||||
* | |
|
||||
* | [coroutine body continues here]
|
||||
* | |
|
||||
* | swap out LocalValues
|
||||
* | --runCount_
|
||||
* | cv_.notify_all()
|
||||
* v
|
||||
*
|
||||
* Thread Safety
|
||||
* =============
|
||||
* - mutex_ : guards task_.handle().resume() so that post()-before-suspend
|
||||
* races cannot resume the coroutine while it is still running.
|
||||
* (See the race condition discussion in JobQueue.h)
|
||||
* - mutex_run_ : guards runCount_ counter; used by join() to wait until
|
||||
* all in-flight resume operations complete.
|
||||
* - jq_.m_mutex: guards nSuspend_ increments/decrements.
|
||||
*
|
||||
* Common Mistakes When Modifying This File
|
||||
* =========================================
|
||||
*
|
||||
* 1. Changing lock ordering.
|
||||
* resume() acquires locks sequentially (never held simultaneously):
|
||||
* jq_.m_mutex (released immediately), then mutex_ (held across resume),
|
||||
* then mutex_run_ (released after decrement). post() acquires only
|
||||
* mutex_run_. Any new code path must follow the same order.
|
||||
*
|
||||
* 2. Removing the shared_from_this() capture in post().
|
||||
* The lambda passed to addJob captures [this, sp = shared_from_this()].
|
||||
* If you remove sp, 'this' can be destroyed before the job runs,
|
||||
* causing use-after-free. The sp capture is load-bearing.
|
||||
*
|
||||
* 3. Forgetting to decrement nSuspend_ on a new code path.
|
||||
* Every ++nSuspend_ must have a matching --nSuspend_. If you add a new
|
||||
* suspension path (e.g. a new awaiter) and forget to decrement on resume
|
||||
* or on failure, JobQueue::stop() will hang.
|
||||
*
|
||||
* 4. Calling task_.handle().resume() without holding mutex_.
|
||||
* This allows a race where the coroutine runs on two threads
|
||||
* simultaneously. Always hold mutex_ around resume().
|
||||
*
|
||||
* 5. Swapping LocalValues outside of the mutex_ critical section.
|
||||
* The swap-in and swap-out of LocalValues must bracket the resume()
|
||||
* call. If you move the swap-out before the lock_guard(mutex_) is
|
||||
* released, you break LocalValue isolation for any code that runs
|
||||
* after the coroutine suspends but before the lock is dropped.
|
||||
*/
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
/**
|
||||
* Construct a CoroTaskRunner. Sets runCount_ to 0; does not
|
||||
* create the coroutine. Call init() afterwards.
|
||||
*
|
||||
* @param jq The JobQueue this coroutine will run on
|
||||
* @param type Job type for scheduling priority
|
||||
* @param name Human-readable name for logging
|
||||
*/
|
||||
inline JobQueue::CoroTaskRunner::CoroTaskRunner(
|
||||
create_t,
|
||||
JobQueue& jq,
|
||||
JobType type,
|
||||
std::string const& name)
|
||||
: jq_(jq), type_(type), name_(name), runCount_(0)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize with a coroutine-returning callable.
|
||||
* Stores the callable on the heap (FuncStore) so it outlives the
|
||||
* coroutine frame. Coroutine frames store a reference to the
|
||||
* callable's implicit object parameter (the lambda). If the callable
|
||||
* is a temporary, that reference dangles after the caller returns.
|
||||
* Keeping the callable alive here ensures the coroutine's captures
|
||||
* remain valid.
|
||||
*
|
||||
* @param f Callable: CoroTask<void>(shared_ptr<CoroTaskRunner>)
|
||||
*/
|
||||
template <class F>
|
||||
void
|
||||
JobQueue::CoroTaskRunner::init(F&& f)
|
||||
{
|
||||
using Fn = std::decay_t<F>;
|
||||
auto store = std::make_unique<FuncStore<Fn>>(std::forward<F>(f));
|
||||
task_ = store->func(shared_from_this());
|
||||
storedFunc_ = std::move(store);
|
||||
}
|
||||
|
||||
/**
|
||||
* Destructor. Waits for any in-flight resume() to complete, then
|
||||
* asserts (debug) that the coroutine has finished or
|
||||
* expectEarlyExit() was called.
|
||||
*
|
||||
* The join() call is necessary because with async dispatch the
|
||||
* coroutine runs on a worker thread. The gate signal (which wakes
|
||||
* the test thread) can arrive before resume() has set finished_.
|
||||
* join() synchronizes via mutex_run_, establishing a happens-before
|
||||
* edge: finished_ = true -> unlock(mutex_run_) in resume() ->
|
||||
* lock(mutex_run_) in join() -> read finished_.
|
||||
*/
|
||||
inline JobQueue::CoroTaskRunner::~CoroTaskRunner()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
join();
|
||||
XRPL_ASSERT(finished_, "xrpl::JobQueue::CoroTaskRunner::~CoroTaskRunner : is finished");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the JobQueue's suspended-coroutine count (nSuspend_).
|
||||
*/
|
||||
inline void
|
||||
JobQueue::CoroTaskRunner::onSuspend()
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
++jq_.nSuspend_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement nSuspend_ without resuming.
|
||||
*/
|
||||
inline void
|
||||
JobQueue::CoroTaskRunner::onUndoSuspend()
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a SuspendAwaiter whose await_suspend() increments nSuspend_
|
||||
* before the coroutine actually suspends. The caller must later call
|
||||
* post() or resume() to continue execution.
|
||||
*
|
||||
* @return Awaiter for use with `co_await runner->suspend()`
|
||||
*/
|
||||
inline auto
|
||||
JobQueue::CoroTaskRunner::suspend()
|
||||
{
|
||||
/**
|
||||
* Custom awaiter for suspend(). Always suspends (await_ready
|
||||
* returns false) and increments nSuspend_ in await_suspend().
|
||||
*/
|
||||
struct SuspendAwaiter
|
||||
{
|
||||
CoroTaskRunner& runner_; // The runner that owns this coroutine.
|
||||
|
||||
/**
|
||||
* Always returns false so the coroutine suspends.
|
||||
*/
|
||||
bool
|
||||
await_ready() const noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the coroutine suspends. Increments nSuspend_
|
||||
* so the JobQueue knows a coroutine is waiting.
|
||||
*/
|
||||
void
|
||||
await_suspend(std::coroutine_handle<>) const
|
||||
{
|
||||
runner_.onSuspend();
|
||||
}
|
||||
|
||||
void
|
||||
await_resume() const noexcept
|
||||
{
|
||||
}
|
||||
};
|
||||
return SuspendAwaiter{*this};
|
||||
}
|
||||
|
||||
/**
|
||||
* Suspend and immediately repost on the JobQueue. Equivalent to
|
||||
* `co_await JobQueueAwaiter{runner}` but uses an inline struct
|
||||
* to work around a GCC-12 codegen bug (see declaration in JobQueue.h).
|
||||
*
|
||||
* If the JobQueue is stopping (post fails), the suspend count is
|
||||
* undone and the coroutine is resumed immediately via h.resume().
|
||||
*
|
||||
* @return An inline YieldPostAwaiter
|
||||
*/
|
||||
inline auto
|
||||
JobQueue::CoroTaskRunner::yieldAndPost()
|
||||
{
|
||||
struct YieldPostAwaiter
|
||||
{
|
||||
CoroTaskRunner& runner_;
|
||||
|
||||
bool
|
||||
await_ready() const noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
await_suspend(std::coroutine_handle<> h)
|
||||
{
|
||||
runner_.onSuspend();
|
||||
if (!runner_.post())
|
||||
{
|
||||
runner_.onUndoSuspend();
|
||||
h.resume();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
await_resume() const noexcept
|
||||
{
|
||||
}
|
||||
};
|
||||
return YieldPostAwaiter{*this};
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule coroutine resumption as a job on the JobQueue.
|
||||
* A shared_ptr capture (sp) prevents this CoroTaskRunner from being
|
||||
* destroyed while the job is queued but not yet executed.
|
||||
*
|
||||
* @return false if the JobQueue rejected the job (shutting down)
|
||||
*/
|
||||
inline bool
|
||||
JobQueue::CoroTaskRunner::post()
|
||||
{
|
||||
{
|
||||
std::lock_guard lk(mutex_run_);
|
||||
++runCount_;
|
||||
}
|
||||
|
||||
// sp prevents 'this' from being destroyed while the job is pending
|
||||
if (jq_.addJob(type_, name_, [this, sp = shared_from_this()]() { resume(); }))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// The coroutine will not run. Undo the runCount_ increment.
|
||||
std::lock_guard lk(mutex_run_);
|
||||
--runCount_;
|
||||
cv_.notify_all();
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume the coroutine on the current thread.
|
||||
*
|
||||
* Steps:
|
||||
* 1. Decrement nSuspend_ (under jq_.m_mutex)
|
||||
* 2. Swap in this coroutine's LocalValues for thread-local isolation
|
||||
* 3. Resume the coroutine handle (under mutex_)
|
||||
* 4. Swap out LocalValues, restoring the thread's previous state
|
||||
* 5. Decrement runCount_ and notify join() waiters
|
||||
*
|
||||
* @pre post() must have been called before resume(). Direct calls
|
||||
* without a prior post() will corrupt runCount_ and break join().
|
||||
* Note: runCount_ is NOT incremented here — post() already did that.
|
||||
* This ensures join() stays blocked for the entire post->resume lifetime.
|
||||
*/
|
||||
inline void
|
||||
JobQueue::CoroTaskRunner::resume()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
}
|
||||
auto saved = detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(&lvs_);
|
||||
std::lock_guard lock(mutex_);
|
||||
XRPL_ASSERT(
|
||||
task_.handle() && !task_.done(),
|
||||
"xrpl::JobQueue::CoroTaskRunner::resume : task handle is valid and not done");
|
||||
task_.handle().resume();
|
||||
detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(saved);
|
||||
if (task_.done())
|
||||
{
|
||||
finished_ = true;
|
||||
// Break the shared_ptr cycle: frame -> shared_ptr<runner> -> this.
|
||||
// Use std::move (not task_ = {}) so task_.handle_ is null BEFORE the
|
||||
// frame is destroyed. operator= would destroy the frame while handle_
|
||||
// still holds the old value -- a re-entrancy hazard on GCC-12 if
|
||||
// frame destruction triggers runner cleanup.
|
||||
[[maybe_unused]] auto completed = std::move(task_);
|
||||
}
|
||||
std::lock_guard lk(mutex_run_);
|
||||
--runCount_;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the coroutine has not yet run to completion
|
||||
*/
|
||||
inline bool
|
||||
JobQueue::CoroTaskRunner::runnable() const
|
||||
{
|
||||
// After normal completion, task_ is reset to break the shared_ptr cycle
|
||||
// (handle_ becomes null). A null handle means the coroutine is done.
|
||||
return task_.handle() && !task_.done();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle early termination when the coroutine never ran (e.g. JobQueue
|
||||
* is stopping). Decrements nSuspend_ and destroys the coroutine frame
|
||||
* to break the shared_ptr cycle: frame -> lambda -> runner -> frame.
|
||||
*/
|
||||
inline void
|
||||
JobQueue::CoroTaskRunner::expectEarlyExit()
|
||||
{
|
||||
if (!finished_)
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
finished_ = true;
|
||||
}
|
||||
// Break the shared_ptr cycle: frame -> shared_ptr<runner> -> this.
|
||||
// The coroutine is at initial_suspend and never ran user code, so
|
||||
// destroying it is safe. Use std::move (not task_ = {}) so
|
||||
// task_.handle_ is null before the frame is destroyed.
|
||||
{
|
||||
[[maybe_unused]] auto completed = std::move(task_);
|
||||
}
|
||||
storedFunc_.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Block until all pending/active resume operations complete.
|
||||
* Uses cv_ + mutex_run_ to wait until runCount_ reaches 0 or
|
||||
* finished_ becomes true. The finished_ check handles the case
|
||||
* where resume() is called directly (without post()), which
|
||||
* decrements runCount_ below zero. In that scenario runCount_
|
||||
* never returns to 0, but finished_ becoming true guarantees
|
||||
* the coroutine is done and no more resumes will occur.
|
||||
*/
|
||||
inline void
|
||||
JobQueue::CoroTaskRunner::join()
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(mutex_run_);
|
||||
cv_.wait(lk, [this]() { return runCount_ == 0 || finished_; });
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
#include <xrpl/basics/LocalValue.h>
|
||||
#include <xrpl/core/ClosureCounter.h>
|
||||
#include <xrpl/core/CoroTask.h>
|
||||
#include <xrpl/core/JobTypeData.h>
|
||||
#include <xrpl/core/JobTypes.h>
|
||||
#include <xrpl/core/detail/Workers.h>
|
||||
@@ -11,7 +10,6 @@
|
||||
#include <boost/context/protected_fixedsize_stack.hpp>
|
||||
#include <boost/coroutine2/all.hpp>
|
||||
|
||||
#include <coroutine>
|
||||
#include <set>
|
||||
|
||||
namespace xrpl {
|
||||
@@ -122,419 +120,6 @@ public:
|
||||
join();
|
||||
};
|
||||
|
||||
/** C++20 coroutine lifecycle manager. Replaces Coro for new code.
|
||||
*
|
||||
* Class / Inheritance / Dependency Diagram
|
||||
* =========================================
|
||||
*
|
||||
* std::enable_shared_from_this<CoroTaskRunner>
|
||||
* ^
|
||||
* | (public inheritance)
|
||||
* |
|
||||
* CoroTaskRunner
|
||||
* +---------------------------------------------------+
|
||||
* | - lvs_ : detail::LocalValues |
|
||||
* | - jq_ : JobQueue& |
|
||||
* | - type_ : JobType |
|
||||
* | - name_ : std::string |
|
||||
* | - runCount_ : int (in-flight resumes) |
|
||||
* | - mutex_ : std::mutex (coroutine guard) |
|
||||
* | - mutex_run_ : std::mutex (join guard) |
|
||||
* | - cv_ : condition_variable |
|
||||
* | - task_ : CoroTask<void> |
|
||||
* | - storedFunc_ : unique_ptr<FuncBase> (type-erased)|
|
||||
* +---------------------------------------------------+
|
||||
* | + init(F&&) : set up coroutine callable |
|
||||
* | + onSuspend() : ++jq_.nSuspend_ |
|
||||
* | + onUndoSuspend() : --jq_.nSuspend_ |
|
||||
* | + suspend() : returns SuspendAwaiter |
|
||||
* | + post() : schedule resume on JobQueue |
|
||||
* | + resume() : resume coroutine on caller |
|
||||
* | + runnable() : !task_.done() |
|
||||
* | + expectEarlyExit() : teardown for failed post |
|
||||
* | + join() : block until not running |
|
||||
* +---------------------------------------------------+
|
||||
* | |
|
||||
* | owns | references
|
||||
* v v
|
||||
* CoroTask<void> JobQueue
|
||||
* (coroutine frame) (thread pool + nSuspend_)
|
||||
*
|
||||
* FuncBase / FuncStore<F> (type-erased heap storage
|
||||
* for the coroutine lambda)
|
||||
*
|
||||
* Coroutine Lifecycle (Control Flow)
|
||||
* ===================================
|
||||
*
|
||||
* Caller thread JobQueue worker thread
|
||||
* ------------- ----------------------
|
||||
* postCoroTask(f)
|
||||
* |
|
||||
* +-- check stopping_ (reject if JQ shutting down)
|
||||
* +-- ++nSuspend_ (lazy start counts as suspended)
|
||||
* +-- make_shared<CoroTaskRunner>
|
||||
* +-- init(f)
|
||||
* | +-- store lambda on heap (FuncStore)
|
||||
* | +-- task_ = f(shared_from_this())
|
||||
* | [coroutine created, suspended at initial_suspend]
|
||||
* +-- post()
|
||||
* | +-- ++runCount_
|
||||
* | +-- addJob(type_, [resume]{})
|
||||
* | resume()
|
||||
* | |
|
||||
* | +-- --nSuspend_
|
||||
* | +-- swap in LocalValues
|
||||
* | +-- task_.handle().resume()
|
||||
* | | [coroutine body runs]
|
||||
* | | ...
|
||||
* | | co_await suspend()
|
||||
* | | +-- ++nSuspend_
|
||||
* | | [coroutine suspends]
|
||||
* | +-- swap out LocalValues
|
||||
* | +-- --runCount_
|
||||
* | +-- cv_.notify_all()
|
||||
* |
|
||||
* post() <-- called externally or by yieldAndPost()
|
||||
* +-- ++runCount_
|
||||
* +-- addJob(type_, [resume]{})
|
||||
* resume()
|
||||
* |
|
||||
* +-- [coroutine body continues]
|
||||
* +-- co_return
|
||||
* +-- --runCount_
|
||||
* +-- cv_.notify_all()
|
||||
* join()
|
||||
* +-- cv_.wait([]{runCount_ == 0})
|
||||
* +-- [done]
|
||||
*
|
||||
* Usage Examples
|
||||
* ==============
|
||||
*
|
||||
* 1. Fire-and-forget coroutine (most common pattern):
|
||||
*
|
||||
* jq.postCoroTask(jtCLIENT, "MyWork",
|
||||
* [](auto runner) -> CoroTask<void> {
|
||||
* doSomeWork();
|
||||
* co_await runner->suspend(); // yield to other jobs
|
||||
* doMoreWork();
|
||||
* co_return;
|
||||
* });
|
||||
*
|
||||
* 2. Manually controlling suspend / resume (external trigger):
|
||||
*
|
||||
* auto runner = jq.postCoroTask(jtCLIENT, "ExtTrigger",
|
||||
* [&result](auto runner) -> CoroTask<void> {
|
||||
* startAsyncOperation(callback);
|
||||
* co_await runner->suspend();
|
||||
* // callback called runner->post() to get here
|
||||
* result = collectResult();
|
||||
* co_return;
|
||||
* });
|
||||
* // ... later, from the callback:
|
||||
* runner->post(); // reschedule the coroutine on the JobQueue
|
||||
*
|
||||
* 3. Using yieldAndPost() for automatic suspend + repost:
|
||||
*
|
||||
* jq.postCoroTask(jtCLIENT, "AutoRepost",
|
||||
* [](auto runner) -> CoroTask<void> {
|
||||
* step1();
|
||||
* co_await runner->yieldAndPost(); // yield + auto-repost
|
||||
* step2();
|
||||
* co_await runner->yieldAndPost();
|
||||
* step3();
|
||||
* co_return;
|
||||
* });
|
||||
*
|
||||
* 4. Checking shutdown after co_await (cooperative cancellation):
|
||||
*
|
||||
* jq.postCoroTask(jtCLIENT, "Cancellable",
|
||||
* [&jq](auto runner) -> CoroTask<void> {
|
||||
* while (moreWork()) {
|
||||
* co_await runner->yieldAndPost();
|
||||
* if (jq.isStopping())
|
||||
* co_return; // bail out cleanly
|
||||
* processNextItem();
|
||||
* }
|
||||
* co_return;
|
||||
* });
|
||||
*
|
||||
* Caveats / Pitfalls
|
||||
* ==================
|
||||
*
|
||||
* BUG-RISK: Calling suspend() without a matching post()/resume().
|
||||
* After co_await runner->suspend(), the coroutine is parked and
|
||||
* nSuspend_ is incremented. If nothing ever calls post() or
|
||||
* resume(), the coroutine is leaked and JobQueue::stop() will
|
||||
* hang forever waiting for nSuspend_ to reach zero.
|
||||
*
|
||||
* BUG-RISK: Calling post() on an already-running coroutine.
|
||||
* post() schedules a resume() job. If the coroutine has not
|
||||
* actually suspended yet (no co_await executed), the resume job
|
||||
* will try to call handle().resume() while the coroutine is still
|
||||
* running on another thread. This is UB. The mutex_ prevents
|
||||
* data corruption but the logic is wrong — always co_await
|
||||
* suspend() before calling post(). (The test incorrect_order()
|
||||
* shows this works only because mutex_ serializes the calls.)
|
||||
*
|
||||
* BUG-RISK: Dropping the shared_ptr<CoroTaskRunner> before join().
|
||||
* The CoroTaskRunner destructor asserts that finished_ is true
|
||||
* (the coroutine completed). If you let the last shared_ptr die
|
||||
* while the coroutine is still running or suspended, you get an
|
||||
* assertion failure in debug and UB in release. Always call
|
||||
* join() or expectEarlyExit() first.
|
||||
*
|
||||
* BUG-RISK: Lambda captures outliving the coroutine frame.
|
||||
* The lambda passed to postCoroTask is heap-allocated (FuncStore)
|
||||
* to prevent dangling. But objects captured by pointer still need
|
||||
* their own lifetime management. If you capture a raw pointer to
|
||||
* a stack variable, and the stack frame exits before the coroutine
|
||||
* finishes, the pointer dangles. Use shared_ptr or ensure the
|
||||
* pointed-to object outlives the coroutine.
|
||||
*
|
||||
* BUG-RISK: Forgetting co_return in a void coroutine.
|
||||
* If the coroutine body falls off the end without co_return,
|
||||
* the compiler may silently treat it as co_return (per standard),
|
||||
* but some compilers warn. Always write explicit co_return.
|
||||
*
|
||||
* LIMITATION: CoroTaskRunner only supports CoroTask<void>.
|
||||
* The task_ member is CoroTask<void>. To return values from
|
||||
* the top-level coroutine, write through a captured pointer
|
||||
* (as the tests demonstrate), or co_await inner CoroTask<T>
|
||||
* coroutines that return values.
|
||||
*
|
||||
* LIMITATION: One coroutine per CoroTaskRunner.
|
||||
* init() must be called exactly once. You cannot reuse a
|
||||
* CoroTaskRunner to run a second coroutine. Create a new one
|
||||
* via postCoroTask() instead.
|
||||
*
|
||||
* LIMITATION: No timeout on join().
|
||||
* join() blocks indefinitely. If the coroutine is suspended
|
||||
* and never posted, join() will deadlock. Use timed waits
|
||||
* on the gate pattern (condition_variable + wait_for) in tests.
|
||||
*/
|
||||
class CoroTaskRunner : public std::enable_shared_from_this<CoroTaskRunner>
|
||||
{
|
||||
private:
|
||||
// Per-coroutine thread-local storage. Swapped in before resume()
|
||||
// and swapped out after, so each coroutine sees its own LocalValue
|
||||
// state regardless of which worker thread executes it.
|
||||
detail::LocalValues lvs_;
|
||||
|
||||
// Back-reference to the owning JobQueue. Used to post jobs,
|
||||
// increment/decrement nSuspend_, and acquire jq_.m_mutex.
|
||||
JobQueue& jq_;
|
||||
|
||||
// Job type passed to addJob() when posting this coroutine.
|
||||
JobType type_;
|
||||
|
||||
// Human-readable name for this coroutine job (for logging).
|
||||
std::string name_;
|
||||
|
||||
// Number of in-flight resume operations (pending + active).
|
||||
// Incremented by post(), decremented when resume() finishes.
|
||||
// Guarded by mutex_run_. join() blocks until this reaches 0.
|
||||
//
|
||||
// A counter (not a bool) is needed because post() can be called
|
||||
// from within the coroutine body (e.g. via yieldAndPost()),
|
||||
// enqueuing a second resume while the first is still running.
|
||||
// A bool would be clobbered: R2.post() sets true, then R1's
|
||||
// cleanup sets false — losing the fact that R2 is still pending.
|
||||
int runCount_;
|
||||
|
||||
// Serializes all coroutine resume() calls, preventing concurrent
|
||||
// execution of the coroutine body on multiple threads. Handles the
|
||||
// race where post() enqueues a resume before the coroutine has
|
||||
// actually suspended (post-before-suspend pattern).
|
||||
std::mutex mutex_;
|
||||
|
||||
// Guards runCount_. Used with cv_ for join() to wait
|
||||
// until all pending/active resume operations complete.
|
||||
std::mutex mutex_run_;
|
||||
|
||||
// Notified when runCount_ reaches zero, allowing
|
||||
// join() waiters to wake up.
|
||||
std::condition_variable cv_;
|
||||
|
||||
// The coroutine handle wrapper. Owns the coroutine frame.
|
||||
// Set by init(). Reset to empty in resume() upon coroutine
|
||||
// completion (to break the shared_ptr cycle) or in
|
||||
// expectEarlyExit() on early termination.
|
||||
CoroTask<void> task_;
|
||||
|
||||
/**
|
||||
* Type-erased base for heap-stored callables.
|
||||
* Prevents the coroutine lambda from being destroyed before
|
||||
* the coroutine frame is done with it.
|
||||
*
|
||||
* @see FuncStore
|
||||
*/
|
||||
struct FuncBase
|
||||
{
|
||||
virtual ~FuncBase() = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Concrete type-erased storage for a callable of type F.
|
||||
* The coroutine frame stores a reference to the lambda's implicit
|
||||
* object parameter. If the lambda is a temporary, that reference
|
||||
* dangles after the call returns. FuncStore keeps it alive on
|
||||
* the heap for the lifetime of the CoroTaskRunner.
|
||||
*/
|
||||
template <class F>
|
||||
struct FuncStore : FuncBase
|
||||
{
|
||||
F func; // The stored callable (coroutine lambda).
|
||||
explicit FuncStore(F&& f) : func(std::move(f))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
// Heap-allocated callable storage. Set by init(), ensures the
|
||||
// lambda outlives the coroutine frame that references it.
|
||||
std::unique_ptr<FuncBase> storedFunc_;
|
||||
|
||||
// True once the coroutine has completed or expectEarlyExit() was
|
||||
// called. Asserted in the destructor (debug) to catch leaked
|
||||
// runners. Available in all builds to guard expectEarlyExit()
|
||||
// against double-decrementing nSuspend_.
|
||||
bool finished_ = false;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Tag type for private construction. Prevents external code
|
||||
* from constructing CoroTaskRunner directly. Use postCoroTask().
|
||||
*/
|
||||
struct create_t
|
||||
{
|
||||
explicit create_t() = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Construct a CoroTaskRunner. Private by convention (create_t tag).
|
||||
*
|
||||
* @param jq The JobQueue this coroutine will run on
|
||||
* @param type Job type for scheduling priority
|
||||
* @param name Human-readable name for logging
|
||||
*/
|
||||
CoroTaskRunner(create_t, JobQueue&, JobType, std::string const&);
|
||||
|
||||
CoroTaskRunner(CoroTaskRunner const&) = delete;
|
||||
CoroTaskRunner&
|
||||
operator=(CoroTaskRunner const&) = delete;
|
||||
|
||||
/**
|
||||
* Destructor. Asserts (debug) that the coroutine has finished
|
||||
* or expectEarlyExit() was called.
|
||||
*/
|
||||
~CoroTaskRunner();
|
||||
|
||||
/**
|
||||
* Initialize with a coroutine-returning callable.
|
||||
* Must be called exactly once, after the object is managed by
|
||||
* shared_ptr (because init uses shared_from_this internally).
|
||||
* This is handled automatically by postCoroTask().
|
||||
*
|
||||
* @param f Callable: CoroTask<void>(shared_ptr<CoroTaskRunner>)
|
||||
*/
|
||||
template <class F>
|
||||
void
|
||||
init(F&& f);
|
||||
|
||||
/**
|
||||
* Increment the JobQueue's suspended-coroutine count (nSuspend_).
|
||||
* Called when the coroutine is about to suspend. Every call
|
||||
* must be balanced by a corresponding decrement (via resume()
|
||||
* or onUndoSuspend()), or JobQueue::stop() will hang.
|
||||
*/
|
||||
void
|
||||
onSuspend();
|
||||
|
||||
/**
|
||||
* Decrement nSuspend_ without resuming.
|
||||
* Used to undo onSuspend() when a scheduled post() fails
|
||||
* (e.g. JobQueue is stopping).
|
||||
*/
|
||||
void
|
||||
onUndoSuspend();
|
||||
|
||||
/**
|
||||
* Suspend the coroutine.
|
||||
* The awaiter's await_suspend() increments nSuspend_ before the
|
||||
* coroutine actually suspends. The caller must later call post()
|
||||
* or resume() to continue execution.
|
||||
*
|
||||
* @return An awaiter for use with `co_await runner->suspend()`
|
||||
*/
|
||||
auto
|
||||
suspend();
|
||||
|
||||
/**
|
||||
* Suspend the coroutine and immediately repost it on the
|
||||
* JobQueue. Combines suspend() + post() atomically inside
|
||||
* await_suspend, so there is no window where an external
|
||||
* event could race between the two.
|
||||
*
|
||||
* Equivalent to JobQueueAwaiter but defined as an inline
|
||||
* awaiter returned from a member function. This avoids a
|
||||
* GCC-12 coroutine codegen bug where an external awaiter
|
||||
* struct (JobQueueAwaiter) used at multiple co_await points
|
||||
* corrupts the coroutine state machine's resume index,
|
||||
* causing the coroutine to hang on the third resumption.
|
||||
*
|
||||
* @return An awaiter for use with `co_await runner->yieldAndPost()`
|
||||
*/
|
||||
auto
|
||||
yieldAndPost();
|
||||
|
||||
/**
|
||||
* Schedule coroutine resumption as a job on the JobQueue.
|
||||
* Captures shared_from_this() to prevent this runner from being
|
||||
* destroyed while the job is queued.
|
||||
*
|
||||
* @return true if the job was accepted; false if the JobQueue
|
||||
* is stopping (caller must handle cleanup)
|
||||
*/
|
||||
bool
|
||||
post();
|
||||
|
||||
/**
|
||||
* Resume the coroutine on the current thread.
|
||||
* Decrements nSuspend_, swaps in LocalValues, resumes the
|
||||
* coroutine handle, swaps out LocalValues, and notifies join()
|
||||
* waiters. Lock ordering (sequential, non-overlapping):
|
||||
* jq_.m_mutex -> mutex_ -> mutex_run_.
|
||||
*
|
||||
* @pre post() must have been called before resume(). Direct
|
||||
* calls without a prior post() will corrupt runCount_
|
||||
* and break join().
|
||||
*/
|
||||
void
|
||||
resume();
|
||||
|
||||
/**
|
||||
* @return true if the coroutine has not yet run to completion
|
||||
*/
|
||||
bool
|
||||
runnable() const;
|
||||
|
||||
/**
|
||||
* Handle early termination when the coroutine never ran.
|
||||
* Decrements nSuspend_ and destroys the coroutine frame to
|
||||
* break the shared_ptr cycle (frame -> lambda -> runner -> frame).
|
||||
* Called by postCoroTask() when post() fails.
|
||||
*/
|
||||
void
|
||||
expectEarlyExit();
|
||||
|
||||
/**
|
||||
* Block until all pending/active resume operations complete.
|
||||
* Uses cv_ + mutex_run_ to wait until runCount_ reaches 0.
|
||||
* Warning: deadlocks if the coroutine is suspended and never posted.
|
||||
*/
|
||||
void
|
||||
join();
|
||||
};
|
||||
|
||||
using JobFunction = std::function<void()>;
|
||||
|
||||
JobQueue(
|
||||
@@ -581,19 +166,6 @@ public:
|
||||
std::shared_ptr<Coro>
|
||||
postCoro(JobType t, std::string const& name, F&& f);
|
||||
|
||||
/** Creates a C++20 coroutine and adds a job to the queue to run it.
|
||||
|
||||
@param t The type of job.
|
||||
@param name Name of the job.
|
||||
@param f Callable with signature
|
||||
CoroTask<void>(std::shared_ptr<CoroTaskRunner>).
|
||||
|
||||
@return shared_ptr to posted CoroTaskRunner. nullptr if not successful.
|
||||
*/
|
||||
template <class F>
|
||||
std::shared_ptr<CoroTaskRunner>
|
||||
postCoroTask(JobType t, std::string const& name, F&& f);
|
||||
|
||||
/** Jobs waiting at this priority.
|
||||
*/
|
||||
int
|
||||
@@ -808,7 +380,6 @@ private:
|
||||
} // namespace xrpl
|
||||
|
||||
#include <xrpl/core/Coro.ipp>
|
||||
#include <xrpl/core/CoroTaskRunner.ipp>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -831,69 +402,4 @@ JobQueue::postCoro(JobType t, std::string const& name, F&& f)
|
||||
return coro;
|
||||
}
|
||||
|
||||
// postCoroTask — entry point for launching a C++20 coroutine on the JobQueue.
|
||||
//
|
||||
// Control Flow
|
||||
// ============
|
||||
//
|
||||
// postCoroTask(t, name, f)
|
||||
// |
|
||||
// +-- 1. Check stopping_ — reject if JQ shutting down
|
||||
// |
|
||||
// +-- 2. ++nSuspend_ (mirrors Boost Coro ctor's implicit yield)
|
||||
// | The coroutine is "suspended" from the JobQueue's perspective
|
||||
// | even though it hasn't run yet — this keeps the JQ shutdown
|
||||
// | logic correct (it waits for nSuspend_ to reach 0).
|
||||
// |
|
||||
// +-- 3. Create CoroTaskRunner (shared_ptr, ref-counted)
|
||||
// |
|
||||
// +-- 4. runner->init(f)
|
||||
// | +-- Heap-allocate the lambda (FuncStore) to prevent
|
||||
// | | dangling captures in the coroutine frame
|
||||
// | +-- task_ = f(shared_from_this())
|
||||
// | [coroutine created but NOT started — lazy initial_suspend]
|
||||
// |
|
||||
// +-- 5. runner->post()
|
||||
// | +-- addJob(type_, [resume]{}) → resume on worker thread
|
||||
// | +-- failure (JQ stopping):
|
||||
// | +-- runner->expectEarlyExit()
|
||||
// | | --nSuspend_, destroy coroutine frame
|
||||
// | +-- return nullptr
|
||||
//
|
||||
// Why async post() instead of synchronous resume()?
|
||||
// ==================================================
|
||||
// The initial dispatch MUST use async post() so the coroutine body runs on
|
||||
// a JobQueue worker thread, not the caller's thread. resume() swaps the
|
||||
// caller's thread-local LocalValues with the coroutine's private copy.
|
||||
// If the coroutine mutates LocalValues (e.g. thread_specific_storage test),
|
||||
// those mutations bleed back into the caller's thread-local state after the
|
||||
// swap-out, corrupting subsequent tests that share the same thread pool.
|
||||
// Async post() avoids this by running the coroutine on a worker thread whose
|
||||
// LocalValues are managed by the thread pool, not by the caller.
|
||||
//
|
||||
template <class F>
|
||||
std::shared_ptr<JobQueue::CoroTaskRunner>
|
||||
JobQueue::postCoroTask(JobType t, std::string const& name, F&& f)
|
||||
{
|
||||
// Reject if the JQ is shutting down — matches addJob()'s stopping_ check.
|
||||
// Must check before incrementing nSuspend_ to avoid leaving an orphan
|
||||
// count that would cause stop() to hang.
|
||||
if (stopping_)
|
||||
return nullptr;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
++nSuspend_;
|
||||
}
|
||||
|
||||
auto runner = std::make_shared<CoroTaskRunner>(CoroTaskRunner::create_t{}, *this, t, name);
|
||||
runner->init(std::forward<F>(f));
|
||||
if (!runner->post())
|
||||
{
|
||||
runner->expectEarlyExit();
|
||||
runner.reset();
|
||||
}
|
||||
return runner;
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/core/JobQueue.h>
|
||||
|
||||
#include <coroutine>
|
||||
#include <memory>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
/**
|
||||
* Awaiter that suspends and immediately reschedules on the JobQueue.
|
||||
* Equivalent to calling yield() followed by post() in the old Coro API.
|
||||
*
|
||||
* Usage:
|
||||
* co_await JobQueueAwaiter{runner};
|
||||
*
|
||||
* What it waits for: The coroutine is re-queued as a job and resumes
|
||||
* when a worker thread picks it up.
|
||||
*
|
||||
* Which thread resumes: A JobQueue worker thread.
|
||||
*
|
||||
* What await_resume() returns: void.
|
||||
*
|
||||
* Dependency Diagram
|
||||
* ==================
|
||||
*
|
||||
* JobQueueAwaiter
|
||||
* +----------------------------------------------+
|
||||
* | + runner : shared_ptr<CoroTaskRunner> |
|
||||
* +----------------------------------------------+
|
||||
* | + await_ready() -> false (always suspend) |
|
||||
* | + await_suspend() -> bool (suspend or cancel) |
|
||||
* | + await_resume() -> void |
|
||||
* +----------------------------------------------+
|
||||
* | |
|
||||
* | uses | uses
|
||||
* v v
|
||||
* CoroTaskRunner JobQueue
|
||||
* .onSuspend() (via runner->post() -> addJob)
|
||||
* .onUndoSuspend()
|
||||
* .post()
|
||||
*
|
||||
* Control Flow (await_suspend)
|
||||
* ============================
|
||||
*
|
||||
* co_await JobQueueAwaiter{runner}
|
||||
* |
|
||||
* +-- await_ready() -> false
|
||||
* +-- await_suspend(handle)
|
||||
* |
|
||||
* +-- runner->onSuspend() // ++nSuspend_
|
||||
* +-- runner->post() // addJob to JobQueue
|
||||
* | |
|
||||
* | +-- success? return noop_coroutine()
|
||||
* | | // coroutine stays suspended;
|
||||
* | | // worker thread will call resume()
|
||||
* | +-- failure? (JQ stopping)
|
||||
* | +-- runner->onUndoSuspend() // --nSuspend_
|
||||
* | +-- return handle // symmetric transfer back
|
||||
* | // coroutine continues immediately
|
||||
* | // so it can clean up and co_return
|
||||
*
|
||||
* DEPRECATED — prefer `co_await runner->yieldAndPost()`
|
||||
* =====================================================
|
||||
*
|
||||
* GCC-12 has a coroutine codegen bug where using this external awaiter
|
||||
* struct at multiple co_await points in the same coroutine corrupts the
|
||||
* state machine's resume index. After the second co_await, the third
|
||||
* resumption enters handle().resume() but never reaches await_resume()
|
||||
* or any subsequent user code — the coroutine hangs indefinitely.
|
||||
*
|
||||
* The fix is `co_await runner->yieldAndPost()`, which defines the
|
||||
* awaiter as an inline struct inside a CoroTaskRunner member function.
|
||||
* GCC-12 handles inline awaiters correctly at multiple co_await points.
|
||||
*
|
||||
* This struct is retained for single-use scenarios and documentation
|
||||
* purposes. For any code that may use co_await in a loop or at
|
||||
* multiple points, always use `runner->yieldAndPost()`.
|
||||
*
|
||||
* Usage Examples
|
||||
* ==============
|
||||
*
|
||||
* 1. Yield and auto-repost (preferred — works on all compilers):
|
||||
*
|
||||
* CoroTask<void> handler(auto runner) {
|
||||
* doPartA();
|
||||
* co_await runner->yieldAndPost(); // yield + repost
|
||||
* doPartB(); // runs on a worker thread
|
||||
* co_return;
|
||||
* }
|
||||
*
|
||||
* 2. Multiple yield points in a loop:
|
||||
*
|
||||
* CoroTask<void> batchProcessor(auto runner) {
|
||||
* for (auto& item : items) {
|
||||
* process(item);
|
||||
* co_await runner->yieldAndPost(); // let other jobs run
|
||||
* }
|
||||
* co_return;
|
||||
* }
|
||||
*
|
||||
* 3. Graceful shutdown — checking after resume:
|
||||
*
|
||||
* CoroTask<void> longTask(auto runner, JobQueue& jq) {
|
||||
* while (hasWork()) {
|
||||
* co_await runner->yieldAndPost();
|
||||
* // If JQ is stopping, await_suspend resumes the coroutine
|
||||
* // immediately without re-queuing. Always check
|
||||
* // isStopping() to decide whether to proceed:
|
||||
* if (jq.isStopping())
|
||||
* co_return;
|
||||
* doNextChunk();
|
||||
* }
|
||||
* co_return;
|
||||
* }
|
||||
*
|
||||
* Caveats / Pitfalls
|
||||
* ==================
|
||||
*
|
||||
* BUG-RISK: Using a stale or null runner.
|
||||
* The runner shared_ptr must be valid and point to the CoroTaskRunner
|
||||
* that owns the coroutine currently executing. Passing a runner from
|
||||
* a different coroutine, or a default-constructed shared_ptr, is UB.
|
||||
*
|
||||
* BUG-RISK: Assuming resume happens on the same thread.
|
||||
* After co_await, the coroutine resumes on whatever worker thread
|
||||
* picks up the job. Do not rely on thread-local state unless it is
|
||||
* managed through LocalValue (which CoroTaskRunner automatically
|
||||
* swaps in/out).
|
||||
*
|
||||
* BUG-RISK: Ignoring the shutdown path.
|
||||
* When the JobQueue is stopping, post() fails and await_suspend()
|
||||
* resumes the coroutine immediately (symmetric transfer back to h).
|
||||
* The coroutine body continues on the same thread. If your code
|
||||
* after co_await assumes it was re-queued and is running on a worker
|
||||
* thread, that assumption breaks during shutdown. Always handle the
|
||||
* "JQ is stopping" case, either by checking jq.isStopping() or by
|
||||
* letting the coroutine fall through to co_return naturally.
|
||||
*
|
||||
* DIFFERENCE from runner->suspend() + runner->post():
|
||||
* Both JobQueueAwaiter and yieldAndPost() combine suspend + post
|
||||
* in one atomic operation. With the manual suspend()/post() pattern,
|
||||
* there is a window between the two calls where an external event
|
||||
* could race. The atomic awaiters remove that window — onSuspend()
|
||||
* and post() happen within the same await_suspend() call while the
|
||||
* coroutine is guaranteed to be suspended. Use yieldAndPost() unless
|
||||
* you need an external party to decide *when* to call post().
|
||||
*/
|
||||
struct JobQueueAwaiter
|
||||
{
|
||||
// The CoroTaskRunner that owns the currently executing coroutine.
|
||||
std::shared_ptr<JobQueue::CoroTaskRunner> runner;
|
||||
|
||||
/**
|
||||
* Always returns false so the coroutine suspends.
|
||||
*/
|
||||
bool
|
||||
await_ready() const noexcept
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment nSuspend (equivalent to yield()) and schedule resume
|
||||
* on the JobQueue (equivalent to post()). If the JobQueue is
|
||||
* stopping, undoes the suspend count and transfers back to the
|
||||
* coroutine so it can clean up and co_return.
|
||||
*
|
||||
* Returns a coroutine_handle<> (symmetric transfer) instead of
|
||||
* bool to work around a GCC-12 codegen bug where bool-returning
|
||||
* await_suspend leaves the coroutine in an invalid state —
|
||||
* neither properly suspended nor resumed — causing a hang.
|
||||
*
|
||||
* WARNING: GCC-12 has an additional codegen bug where using this
|
||||
* external awaiter struct at multiple co_await points in the same
|
||||
* coroutine corrupts the state machine's resume index, causing the
|
||||
* coroutine to hang on the third resumption. Prefer
|
||||
* `co_await runner->yieldAndPost()` which uses an inline awaiter
|
||||
* that GCC-12 handles correctly.
|
||||
*
|
||||
* @return noop_coroutine() to stay suspended (job posted);
|
||||
* the caller's handle to resume immediately (JQ stopping)
|
||||
*/
|
||||
std::coroutine_handle<>
|
||||
await_suspend(std::coroutine_handle<> h)
|
||||
{
|
||||
XRPL_ASSERT(runner, "xrpl::JobQueueAwaiter::await_suspend : runner is valid");
|
||||
runner->onSuspend();
|
||||
if (!runner->post())
|
||||
{
|
||||
// JobQueue is stopping. Undo the suspend count and
|
||||
// transfer back to the coroutine so it can clean up
|
||||
// and co_return.
|
||||
runner->onUndoSuspend();
|
||||
return h;
|
||||
}
|
||||
return std::noop_coroutine();
|
||||
}
|
||||
|
||||
void
|
||||
await_resume() const noexcept
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -3,7 +3,6 @@
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/SHAMapHash.h>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/ledger/CachedSLEs.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
@@ -23,6 +22,20 @@ class PerfLog;
|
||||
// This is temporary until we migrate all code to use ServiceRegistry.
|
||||
class Application;
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
class TaggedCache;
|
||||
class STLedgerEntry;
|
||||
using SLE = STLedgerEntry;
|
||||
using CachedSLEs = TaggedCache<uint256, SLE const>;
|
||||
|
||||
// Forward declarations
|
||||
class AcceptedLedger;
|
||||
class AmendmentTable;
|
||||
@@ -45,7 +58,7 @@ class NetworkIDService;
|
||||
class OpenLedger;
|
||||
class OrderBookDB;
|
||||
class Overlay;
|
||||
class PathRequests;
|
||||
class PathRequestManager;
|
||||
class PeerReservationTable;
|
||||
class PendingSaves;
|
||||
class RelationalDatabase;
|
||||
@@ -89,7 +102,7 @@ public:
|
||||
getNodeFamily() = 0;
|
||||
|
||||
virtual TimeKeeper&
|
||||
timeKeeper() = 0;
|
||||
getTimeKeeper() = 0;
|
||||
|
||||
virtual JobQueue&
|
||||
getJobQueue() = 0;
|
||||
@@ -98,7 +111,7 @@ public:
|
||||
getTempNodeCache() = 0;
|
||||
|
||||
virtual CachedSLEs&
|
||||
cachedSLEs() = 0;
|
||||
getCachedSLEs() = 0;
|
||||
|
||||
virtual NetworkIDService&
|
||||
getNetworkIDService() = 0;
|
||||
@@ -120,26 +133,26 @@ public:
|
||||
getValidations() = 0;
|
||||
|
||||
virtual ValidatorList&
|
||||
validators() = 0;
|
||||
getValidators() = 0;
|
||||
|
||||
virtual ValidatorSite&
|
||||
validatorSites() = 0;
|
||||
getValidatorSites() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
validatorManifests() = 0;
|
||||
getValidatorManifests() = 0;
|
||||
|
||||
virtual ManifestCache&
|
||||
publisherManifests() = 0;
|
||||
getPublisherManifests() = 0;
|
||||
|
||||
// Network services
|
||||
virtual Overlay&
|
||||
overlay() = 0;
|
||||
getOverlay() = 0;
|
||||
|
||||
virtual Cluster&
|
||||
cluster() = 0;
|
||||
getCluster() = 0;
|
||||
|
||||
virtual PeerReservationTable&
|
||||
peerReservations() = 0;
|
||||
getPeerReservations() = 0;
|
||||
|
||||
virtual Resource::Manager&
|
||||
getResourceManager() = 0;
|
||||
@@ -174,13 +187,13 @@ public:
|
||||
getLedgerReplayer() = 0;
|
||||
|
||||
virtual PendingSaves&
|
||||
pendingSaves() = 0;
|
||||
getPendingSaves() = 0;
|
||||
|
||||
virtual OpenLedger&
|
||||
openLedger() = 0;
|
||||
getOpenLedger() = 0;
|
||||
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
getOpenLedger() const = 0;
|
||||
|
||||
// Transaction and operation services
|
||||
virtual NetworkOPs&
|
||||
@@ -195,8 +208,8 @@ public:
|
||||
virtual TxQ&
|
||||
getTxQ() = 0;
|
||||
|
||||
virtual PathRequests&
|
||||
getPathRequests() = 0;
|
||||
virtual PathRequestManager&
|
||||
getPathRequestManager() = 0;
|
||||
|
||||
// Server services
|
||||
virtual ServerHandler&
|
||||
@@ -210,16 +223,16 @@ public:
|
||||
isStopping() const = 0;
|
||||
|
||||
virtual beast::Journal
|
||||
journal(std::string const& name) = 0;
|
||||
getJournal(std::string const& name) = 0;
|
||||
|
||||
virtual boost::asio::io_context&
|
||||
getIOContext() = 0;
|
||||
|
||||
virtual Logs&
|
||||
logs() = 0;
|
||||
getLogs() = 0;
|
||||
|
||||
virtual std::optional<uint256> const&
|
||||
trapTxID() const = 0;
|
||||
getTrapTxID() const = 0;
|
||||
|
||||
/** Retrieve the "wallet database" */
|
||||
virtual DatabaseCon&
|
||||
@@ -228,7 +241,7 @@ public:
|
||||
// Temporary: Get the underlying Application for functions that haven't
|
||||
// been migrated yet. This should be removed once all code is migrated.
|
||||
virtual Application&
|
||||
app() = 0;
|
||||
getApp() = 0;
|
||||
};
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpld/core/Config.h>
|
||||
#include <xrpld/core/TimeKeeper.h>
|
||||
|
||||
#include <xrpl/basics/CountedObject.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/ledger/CachedView.h>
|
||||
#include <xrpl/ledger/View.h>
|
||||
#include <xrpl/protocol/Fees.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/Rules.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/TxMeta.h>
|
||||
@@ -15,7 +14,7 @@
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
class Application;
|
||||
class ServiceRegistry;
|
||||
class Job;
|
||||
class TransactionMaster;
|
||||
|
||||
@@ -83,21 +82,26 @@ public:
|
||||
*/
|
||||
Ledger(
|
||||
create_genesis_t,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
std::vector<uint256> const& amendments,
|
||||
Family& family);
|
||||
|
||||
Ledger(LedgerHeader const& info, Config const& config, Family& family);
|
||||
Ledger(LedgerHeader const& info, Rules const& rules, Family& family);
|
||||
|
||||
/** Used for ledgers loaded from JSON files
|
||||
|
||||
@param acquire If true, acquires the ledger if not found locally
|
||||
|
||||
@note The fees parameter provides default values, but setup() may
|
||||
override them from the ledger state if fee-related SLEs exist.
|
||||
*/
|
||||
Ledger(
|
||||
LedgerHeader const& info,
|
||||
bool& loaded,
|
||||
bool acquire,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
Family& family,
|
||||
beast::Journal j);
|
||||
|
||||
@@ -113,7 +117,8 @@ public:
|
||||
Ledger(
|
||||
std::uint32_t ledgerSeq,
|
||||
NetClock::time_point closeTime,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
Family& family);
|
||||
|
||||
~Ledger() = default;
|
||||
@@ -322,7 +327,7 @@ public:
|
||||
walkLedger(beast::Journal j, bool parallel = false) const;
|
||||
|
||||
bool
|
||||
assertSensible(beast::Journal ledgerJ) const;
|
||||
isSensible() const;
|
||||
|
||||
void
|
||||
invariants() const;
|
||||
@@ -379,8 +384,26 @@ private:
|
||||
bool
|
||||
setup();
|
||||
|
||||
void
|
||||
defaultFees(Config const& config);
|
||||
/** @brief Deserialize a SHAMapItem containing a single STTx.
|
||||
*
|
||||
* @param item The SHAMapItem to deserialize.
|
||||
* @return A shared pointer to the deserialized transaction.
|
||||
* @throw May throw on deserialization error.
|
||||
*/
|
||||
static std::shared_ptr<STTx const>
|
||||
deserializeTx(SHAMapItem const& item);
|
||||
|
||||
/** @brief Deserialize a SHAMapItem containing STTx + STObject metadata.
|
||||
*
|
||||
* The SHAMapItem must contain two variable length serialization objects.
|
||||
*
|
||||
* @param item The SHAMapItem to deserialize.
|
||||
* @return A pair containing shared pointers to the deserialized transaction
|
||||
* and metadata.
|
||||
* @throw May throw on deserialization error.
|
||||
*/
|
||||
static std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>
|
||||
deserializeTxPlusMeta(SHAMapItem const& item);
|
||||
|
||||
bool mImmutable;
|
||||
|
||||
@@ -402,54 +425,4 @@ private:
|
||||
/** A ledger wrapped in a CachedView. */
|
||||
using CachedLedger = CachedView<Ledger>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// API
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
extern bool
|
||||
pendSaveValidated(
|
||||
Application& app,
|
||||
std::shared_ptr<Ledger const> const& ledger,
|
||||
bool isSynchronous,
|
||||
bool isCurrent);
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadLedgerHelper(LedgerHeader const& sinfo, Application& app, bool acquire);
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire = true);
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true);
|
||||
|
||||
// Fetch the ledger with the highest sequence contained in the database
|
||||
extern std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app);
|
||||
|
||||
/** Deserialize a SHAMapItem containing a single STTx
|
||||
|
||||
Throw:
|
||||
|
||||
May throw on deserializaton error
|
||||
*/
|
||||
std::shared_ptr<STTx const>
|
||||
deserializeTx(SHAMapItem const& item);
|
||||
|
||||
/** Deserialize a SHAMapItem containing STTx + STObject metadata
|
||||
|
||||
The SHAMap must contain two variable length
|
||||
serialization objects.
|
||||
|
||||
Throw:
|
||||
|
||||
May throw on deserializaton error
|
||||
*/
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>
|
||||
deserializeTxPlusMeta(SHAMapItem const& item);
|
||||
|
||||
uint256
|
||||
calculateLedgerHash(LedgerHeader const& info);
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -62,7 +62,7 @@ getNextLedgerTimeResolution(
|
||||
bool previousAgree,
|
||||
Seq ledgerSeq)
|
||||
{
|
||||
XRPL_ASSERT(ledgerSeq != Seq{0}, "ripple:getNextLedgerTimeResolution : valid ledger sequence");
|
||||
XRPL_ASSERT(ledgerSeq != Seq{0}, "xrpl::getNextLedgerTimeResolution : valid ledger sequence");
|
||||
|
||||
using namespace std::chrono;
|
||||
// Find the current resolution:
|
||||
@@ -72,7 +72,7 @@ getNextLedgerTimeResolution(
|
||||
previousResolution);
|
||||
XRPL_ASSERT(
|
||||
iter != std::end(ledgerPossibleTimeResolutions),
|
||||
"ripple:getNextLedgerTimeResolution : found time resolution");
|
||||
"xrpl::getNextLedgerTimeResolution : found time resolution");
|
||||
|
||||
// This should never happen, but just as a precaution
|
||||
if (iter == std::end(ledgerPossibleTimeResolutions))
|
||||
@@ -142,14 +142,14 @@ removeEmptyHolding(
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
TER
|
||||
rippleLockEscrowMPT(
|
||||
lockEscrowMPT(
|
||||
ApplyView& view,
|
||||
AccountID const& uGrantorID,
|
||||
STAmount const& saAmount,
|
||||
beast::Journal j);
|
||||
|
||||
TER
|
||||
rippleUnlockEscrowMPT(
|
||||
unlockEscrowMPT(
|
||||
ApplyView& view,
|
||||
AccountID const& uGrantorID,
|
||||
AccountID const& uGranteeID,
|
||||
|
||||
@@ -235,11 +235,11 @@ canTransfer(ReadView const& view, Asset const& asset, AccountID const& from, Acc
|
||||
// --> bCheckIssuer : normally require issuer to be involved.
|
||||
// [[nodiscard]] // nodiscard commented out so DirectStep.cpp compiles.
|
||||
|
||||
/** Calls static rippleCreditIOU if saAmount represents Issue.
|
||||
* Calls static rippleCreditMPT if saAmount represents MPTIssue.
|
||||
/** Calls static directSendIOU if saAmount represents Issue.
|
||||
* Calls static directSendMPT if saAmount represents MPTIssue.
|
||||
*/
|
||||
TER
|
||||
rippleCredit(
|
||||
directSend(
|
||||
ApplyView& view,
|
||||
AccountID const& uSenderID,
|
||||
AccountID const& uReceiverID,
|
||||
|
||||
@@ -51,7 +51,7 @@ A blob containing the payload. Stored in the following format.
|
||||
---
|
||||
|
||||
The `NodeStore` provides an interface that stores, in a persistent database, a
|
||||
collection of NodeObjects that rippled uses as its primary representation of
|
||||
collection of NodeObjects that xrpld uses as its primary representation of
|
||||
ledger entries. All ledger entries are stored as NodeObjects and as such, need
|
||||
to be persisted between launches. If a NodeObject is accessed and is not in
|
||||
memory, it will be retrieved from the database.
|
||||
@@ -110,7 +110,7 @@ The `NodeStore.Timing` test is used to execute a set of read/write workloads to
|
||||
compare current available nodestore backends. It can be executed with:
|
||||
|
||||
```
|
||||
$rippled --unittest=NodeStoreTiming
|
||||
$xrpld --unittest=NodeStoreTiming
|
||||
```
|
||||
|
||||
It is also possible to use alternate DB config params by passing config strings
|
||||
@@ -143,10 +143,10 @@ Through various executions and profiling some conclusions are presented below.
|
||||
just after ledger close, then that would provide similar, but more predictable
|
||||
guarantees. It would also remove an unneeded thread and unnecessary memory
|
||||
usage. An alternative point of view is that because there will always be many
|
||||
other rippled instances running there is no need for such guarantees. The nodes
|
||||
other xrpld instances running there is no need for such guarantees. The nodes
|
||||
will always be available from another peer.
|
||||
|
||||
- Lookup in a block was previously using binary search. With rippled's use case
|
||||
- Lookup in a block was previously using binary search. With xrpld's use case
|
||||
it is highly unlikely that two adjacent key/values will ever be requested one
|
||||
after the other. Therefore hash indexing of blocks makes much more sense.
|
||||
Rocksdb has a number of options for hash indexing both memtables and blocks and
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Protocol buffer definitions for gRPC
|
||||
|
||||
This folder contains the protocol buffer definitions used by the rippled gRPC API.
|
||||
This folder contains the protocol buffer definitions used by the xrpld gRPC API.
|
||||
The gRPC API attempts to mimic the JSON/Websocket API as much as possible.
|
||||
As of April 2020, the gRPC API supports a subset of the full rippled API:
|
||||
As of April 2020, the gRPC API supports a subset of the full xrpld API:
|
||||
tx, account_tx, account_info, fee and submit.
|
||||
|
||||
### Making Changes
|
||||
@@ -63,7 +63,7 @@ templated `CallData` class in GRPCServerImpl::setupListeners(). The template
|
||||
parameters should be the request type and the response type.
|
||||
|
||||
Finally, define the handler itself in the appropriate file under the
|
||||
src/ripple/rpc/handlers folder. If the method already has a JSON/Websocket
|
||||
src/xrpld/rpc/handlers folder. If the method already has a JSON/Websocket
|
||||
equivalent, write the gRPC handler in the same file, and abstract common logic
|
||||
into helper functions (see Tx.cpp or AccountTx.cpp for an example).
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
#include <xrpl/protocol/tokens.h>
|
||||
// VFALCO Uncomment when the header issues are resolved
|
||||
// #include <ripple/protocol/PublicKey.h>
|
||||
// #include <xrpl/protocol/PublicKey.h>
|
||||
#include <xrpl/basics/UnorderedContainers.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
|
||||
@@ -33,7 +33,7 @@ getFullVersionString();
|
||||
X: 16 bits identifying the particular implementation
|
||||
Y: 48 bits of data specific to the implementation
|
||||
|
||||
The rippled-specific format (implementation ID is: 0x18 0x3B) is:
|
||||
The xrpld-specific format (implementation ID is: 0x18 0x3B) is:
|
||||
|
||||
00011000-00111011-MMMMMMMM-mmmmmmmm-pppppppp-TTNNNNNN-00000000-00000000
|
||||
|
||||
@@ -55,23 +55,23 @@ encodeSoftwareVersion(std::string_view versionStr);
|
||||
std::uint64_t
|
||||
getEncodedVersion();
|
||||
|
||||
/** Check if the encoded software version is a rippled software version.
|
||||
/** Check if the encoded software version is an xrpld software version.
|
||||
|
||||
@param version another node's encoded software version
|
||||
@return true if the version is a rippled software version, false otherwise
|
||||
@return true if the version is an xrpld software version, false otherwise
|
||||
*/
|
||||
bool
|
||||
isRippledVersion(std::uint64_t version);
|
||||
isXrpldVersion(std::uint64_t version);
|
||||
|
||||
/** Check if the version is newer than the local node's rippled software
|
||||
/** Check if the version is newer than the local node's xrpld software
|
||||
version.
|
||||
|
||||
@param version another node's encoded software version
|
||||
@return true if the version is newer than the local node's rippled software
|
||||
@return true if the version is newer than the local node's xrpld software
|
||||
version, false otherwise.
|
||||
|
||||
@note This function only understands version numbers that are generated by
|
||||
rippled. Please see the encodeSoftwareVersion() function for detail.
|
||||
xrpld. Please see the encodeSoftwareVersion() function for detail.
|
||||
*/
|
||||
bool
|
||||
isNewerVersion(std::uint64_t version);
|
||||
|
||||
@@ -153,7 +153,7 @@ enum warning_code_i {
|
||||
warnRPC_AMENDMENT_BLOCKED = 1002,
|
||||
warnRPC_EXPIRED_VALIDATOR_LIST = 1003,
|
||||
// unused = 1004
|
||||
warnRPC_FIELDS_DEPRECATED = 2004, // rippled needs to maintain
|
||||
warnRPC_FIELDS_DEPRECATED = 2004, // xrpld needs to maintain
|
||||
// compatibility with Clio on this code.
|
||||
};
|
||||
|
||||
|
||||
@@ -37,10 +37,10 @@
|
||||
* 5) If a supported feature (`Supported::yes`) was _ever_ in a released
|
||||
* version, it can never be changed back to `Supported::no`, because
|
||||
* it _may_ still become enabled at any time. This would cause newer
|
||||
* versions of `rippled` to become amendment blocked.
|
||||
* versions of `xrpld` to become amendment blocked.
|
||||
* Instead, to prevent newer versions from voting on the feature, use
|
||||
* `VoteBehavior::Obsolete`. Obsolete features can not be voted for
|
||||
* by any versions of `rippled` built with that setting, but will still
|
||||
* by any versions of `xrpld` built with that setting, but will still
|
||||
* work correctly if they get enabled. If a feature remains obsolete
|
||||
* for long enough that _all_ clients that could vote for it are
|
||||
* amendment blocked, the feature can be removed from the code
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
// Deprecated constant for backwards compatibility with pre-XRPFees amendment.
|
||||
// This was the reference fee units used in the old fee calculation.
|
||||
inline constexpr std::uint32_t FEE_UNITS_DEPRECATED = 10;
|
||||
|
||||
/** Reflects the fee settings for a particular ledger.
|
||||
|
||||
The fees are always the same for any transactions applied
|
||||
@@ -11,15 +15,25 @@ namespace xrpl {
|
||||
*/
|
||||
struct Fees
|
||||
{
|
||||
XRPAmount base{0}; // Reference tx cost (drops)
|
||||
XRPAmount reserve{0}; // Reserve base (drops)
|
||||
XRPAmount increment{0}; // Reserve increment (drops)
|
||||
/** @brief Cost of a reference transaction in drops. */
|
||||
XRPAmount base{0};
|
||||
|
||||
/** @brief Minimum XRP an account must hold to exist on the ledger. */
|
||||
XRPAmount reserve{0};
|
||||
|
||||
/** @brief Additional XRP reserve required per owned ledger object. */
|
||||
XRPAmount increment{0};
|
||||
|
||||
explicit Fees() = default;
|
||||
Fees(Fees const&) = default;
|
||||
Fees&
|
||||
operator=(Fees const&) = default;
|
||||
|
||||
Fees(XRPAmount base_, XRPAmount reserve_, XRPAmount increment_)
|
||||
: base(base_), reserve(reserve_), increment(increment_)
|
||||
{
|
||||
}
|
||||
|
||||
/** Returns the account reserve given the owner count, in drops.
|
||||
|
||||
The reserve is calculated as the reserve base plus
|
||||
|
||||
@@ -72,4 +72,8 @@ deserializeHeader(Slice data, bool hasHash = false);
|
||||
LedgerHeader
|
||||
deserializePrefixedHeader(Slice data, bool hasHash = false);
|
||||
|
||||
/** Calculate the hash of a ledger header. */
|
||||
uint256
|
||||
calculateLedgerHash(LedgerHeader const& info);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -22,7 +22,7 @@ optional fields easier to read:
|
||||
- The operation `x[~sfFoo]` means "return the value of 'Foo'
|
||||
if it exists, or nothing if it doesn't." This usage of the
|
||||
tilde/bitwise NOT operator is not standard outside of the
|
||||
`rippled` codebase.
|
||||
`xrpld` codebase.
|
||||
- As a consequence of this, `x[~sfFoo] = y[~sfFoo]`
|
||||
assigns the value of Foo from y to x, including omitting
|
||||
Foo from x if it doesn't exist in y.
|
||||
@@ -33,7 +33,7 @@ or may not hold a value. For things not guaranteed to exist,
|
||||
you use `x[~sfFoo]` because you want such a container. It
|
||||
avoids having to look something up twice, once just to see if
|
||||
it exists and a second time to get/set its value.
|
||||
([Real example](https://github.com/ripple/rippled/blob/35f4698aed5dce02f771b34cfbb690495cb5efcc/src/ripple/app/tx/impl/PayChan.cpp#L229-L236))
|
||||
([Real example](https://github.com/XRPLF/rippled/blob/35f4698aed5dce02f771b34cfbb690495cb5efcc/src/ripple/app/tx/impl/PayChan.cpp#L229-L236))
|
||||
|
||||
The source of this "type magic" is in
|
||||
[SField.h](./SField.h#L296-L302).
|
||||
|
||||
@@ -13,7 +13,7 @@ class STAccount final : public STBase, public CountedObject<STAccount>
|
||||
private:
|
||||
// The original implementation of STAccount kept the value in an STBlob.
|
||||
// But an STAccount is always 160 bits, so we can store it with less
|
||||
// overhead in a xrpl::uint160. However, so the serialized format of the
|
||||
// overhead in an xrpl::uint160. However, so the serialized format of the
|
||||
// STAccount stays unchanged, we serialize and deserialize like an STBlob.
|
||||
AccountID value_;
|
||||
bool default_;
|
||||
|
||||
@@ -118,7 +118,7 @@ derivePublicKey(KeyType type, SecretKey const& sk);
|
||||
|
||||
/** Generate a key pair deterministically.
|
||||
|
||||
This algorithm is specific to Ripple:
|
||||
This algorithm is specific to the XRPL:
|
||||
|
||||
For secp256k1 key pairs, the seed is converted
|
||||
to a Generator and used to compute the key pair
|
||||
|
||||
@@ -80,7 +80,7 @@ randomSeed();
|
||||
|
||||
/** Generate a seed deterministically.
|
||||
|
||||
The algorithm is specific to Ripple:
|
||||
The algorithm is specific to the XRPL:
|
||||
|
||||
The seed is calculated as the first 128 bits
|
||||
of the SHA512-Half of the string text excluding
|
||||
|
||||
@@ -21,7 +21,7 @@ namespace unit {
|
||||
struct dropTag;
|
||||
/** "fee levels" are used by the transaction queue to compare the relative
|
||||
cost of transactions that require different levels of effort to process.
|
||||
See also: src/ripple/app/misc/FeeEscalation.md#fee-level */
|
||||
See also: src/xrpld/app/misc/FeeEscalation.md#fee-level */
|
||||
struct feelevelTag;
|
||||
/** unitless values are plain scalars wrapped in a ValueUnit. They are
|
||||
used for calculations in this header. */
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
|
||||
XRPL_FIX (Security3_1_3, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (BatchInnerSigs, Supported::no, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -578,7 +578,7 @@ LEDGER_ENTRY(ltLOAN, 0x0089, Loan, loan, ({
|
||||
// The unrounded true total value of the loan.
|
||||
//
|
||||
// - TrueTotalPrincipalOutstanding can be computed using the algorithm
|
||||
// in the ripple::detail::loanPrincipalFromPeriodicPayment function.
|
||||
// in the xrpl::detail::loanPrincipalFromPeriodicPayment function.
|
||||
//
|
||||
// - TrueTotalInterestOutstanding = TrueTotalLoanValue -
|
||||
// TrueTotalPrincipalOutstanding
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/core/PerfLog.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
#include <xrpl/core/StartUpType.h>
|
||||
#include <xrpl/rdb/DBInit.h>
|
||||
#include <xrpl/rdb/SociDB.h>
|
||||
@@ -94,7 +95,7 @@ public:
|
||||
struct CheckpointerSetup
|
||||
{
|
||||
JobQueue* jobQueue;
|
||||
Logs* logs;
|
||||
std::reference_wrapper<ServiceRegistry> registry;
|
||||
};
|
||||
|
||||
template <std::size_t N, std::size_t M>
|
||||
@@ -129,7 +130,7 @@ public:
|
||||
beast::Journal journal)
|
||||
: DatabaseCon(setup, dbName, pragma, initSQL, journal)
|
||||
{
|
||||
setupCheckpointing(checkpointerSetup.jobQueue, *checkpointerSetup.logs);
|
||||
setupCheckpointing(checkpointerSetup.jobQueue, checkpointerSetup.registry.get());
|
||||
}
|
||||
|
||||
template <std::size_t N, std::size_t M>
|
||||
@@ -154,7 +155,7 @@ public:
|
||||
beast::Journal journal)
|
||||
: DatabaseCon(dataDir, dbName, pragma, initSQL, journal)
|
||||
{
|
||||
setupCheckpointing(checkpointerSetup.jobQueue, *checkpointerSetup.logs);
|
||||
setupCheckpointing(checkpointerSetup.jobQueue, checkpointerSetup.registry.get());
|
||||
}
|
||||
|
||||
~DatabaseCon();
|
||||
@@ -177,7 +178,7 @@ public:
|
||||
|
||||
private:
|
||||
void
|
||||
setupCheckpointing(JobQueue*, Logs&);
|
||||
setupCheckpointing(JobQueue*, ServiceRegistry&);
|
||||
|
||||
template <std::size_t N, std::size_t M>
|
||||
DatabaseCon(
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
#pragma clang diagnostic ignored "-Wdeprecated"
|
||||
#endif
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/core/JobQueue.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
|
||||
#define SOCI_USE_BOOST
|
||||
#include <soci/soci.h>
|
||||
@@ -111,7 +111,7 @@ public:
|
||||
and so must outlive them both.
|
||||
*/
|
||||
std::shared_ptr<Checkpointer>
|
||||
makeCheckpointer(std::uintptr_t id, std::weak_ptr<soci::session>, JobQueue&, Logs&);
|
||||
makeCheckpointer(std::uintptr_t id, std::weak_ptr<soci::session>, JobQueue&, ServiceRegistry&);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
|
||||
@@ -72,6 +72,6 @@ drop connections to those IP addresses that occur commonly in the gossip.
|
||||
|
||||
## Access
|
||||
|
||||
In rippled, the Application holds a unique instance of Resource::Manager,
|
||||
In xrpld, the Application holds a unique instance of Resource::Manager,
|
||||
which may be retrieved by calling the method
|
||||
`Application::getResourceManager()`.
|
||||
|
||||
@@ -17,7 +17,7 @@ namespace xrpl {
|
||||
|
||||
Suppose the secret keys installed on a Ripple validator are compromised. Not
|
||||
only do you have to generate and install new key pairs on each validator,
|
||||
EVERY rippled needs to have its config updated with the new public keys, and
|
||||
EVERY xrpld needs to have its config updated with the new public keys, and
|
||||
is vulnerable to forged validation signatures until this is done. The
|
||||
solution is a new layer of indirection: A master secret key under
|
||||
restrictive access control is used to sign a "manifest": essentially, a
|
||||
@@ -39,11 +39,11 @@ namespace xrpl {
|
||||
seen for that validator, if any. On startup, the [validator_token] config
|
||||
entry (which contains the manifest for this validator) is decoded and
|
||||
added to the manifest cache. Other manifests are added as "gossip"
|
||||
received from rippled peers.
|
||||
received from xrpld peers.
|
||||
|
||||
When an ephemeral key is compromised, a new signing key pair is created,
|
||||
along with a new manifest vouching for it (with a higher sequence number),
|
||||
signed by the master key. When a rippled peer receives the new manifest,
|
||||
signed by the master key. When an xrpld peer receives the new manifest,
|
||||
it verifies it with the master key and (assuming it's valid) discards the
|
||||
old ephemeral key and stores the new one. If the master key itself gets
|
||||
compromised, a manifest with sequence number 0xFFFFFFFF will supersede a
|
||||
|
||||
@@ -63,8 +63,8 @@ enum class OperatingMode {
|
||||
needed.
|
||||
|
||||
A backend application or local client can trust a local instance of
|
||||
rippled / NetworkOPs. However, client software connecting to non-local
|
||||
instances of rippled will need to be hardened to protect against hostile
|
||||
xrpld / NetworkOPs. However, client software connecting to non-local
|
||||
instances of xrpld will need to be hardened to protect against hostile
|
||||
or unreliable servers.
|
||||
*/
|
||||
class NetworkOPs : public InfoSub::Source
|
||||
|
||||
@@ -112,7 +112,7 @@ When a `SHAMap` decides that it is safe to share a node of its own, it sets the
|
||||
node's sequence number to 0 (a `SHAMap` never has a sequence number of 0). This
|
||||
is done for every node in the trie when `SHAMap::walkSubTree` is executed.
|
||||
|
||||
Note that other objects in rippled also have sequence numbers (e.g. ledgers).
|
||||
Note that other objects in xrpld also have sequence numbers (e.g. ledgers).
|
||||
The `SHAMap` and node sequence numbers should not be confused with these other
|
||||
sequence numbers (no relation).
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
#include <xrpl/ledger/PaymentSandbox.h>
|
||||
#include <xrpl/protocol/STAmount.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
STPathSet const& spsPaths,
|
||||
|
||||
std::optional<uint256> const& domainID,
|
||||
Logs& l,
|
||||
ServiceRegistry& registry,
|
||||
Input const* const pInputs = nullptr);
|
||||
|
||||
// The view we are currently working on
|
||||
|
||||
@@ -771,7 +771,7 @@ flow(
|
||||
{
|
||||
// Rounding in the payment engine is causing this assert to
|
||||
// sometimes fire with "dust" amounts. This is causing issues when
|
||||
// running debug builds of rippled. While this issue still needs to
|
||||
// running debug builds of xrpld. While this issue still needs to
|
||||
// be resolved, the assert is causing more harm than good at this
|
||||
// point.
|
||||
// UNREACHABLE("xrpl::flow : rounding error");
|
||||
|
||||
@@ -7,7 +7,7 @@ race:boost/context/
|
||||
race:boost/asio/executor.hpp
|
||||
race:boost::asio
|
||||
|
||||
# Suppress tsan related issues in rippled code.
|
||||
# Suppress tsan related issues in xrpld code.
|
||||
race:src/libxrpl/basics/make_SSLContext.cpp
|
||||
race:src/libxrpl/basics/Number.cpp
|
||||
race:src/libxrpl/json/json_value.cpp
|
||||
@@ -46,7 +46,7 @@ race:xrpl/server/detail/ServerImpl.h
|
||||
race:xrpl/nodestore/detail/DatabaseNodeImp.h
|
||||
race:src/libxrpl/beast/utility/beast_Journal.cpp
|
||||
race:src/test/beast/LexicalCast_test.cpp
|
||||
race:ripple::ServerHandler
|
||||
race:ServerHandler
|
||||
|
||||
# More suppressions in external library code.
|
||||
race:crtstuff.c
|
||||
@@ -65,7 +65,7 @@ deadlock:src/xrpld/app/misc/detail/ValidatorSite.cpp
|
||||
signal:src/libxrpl/beast/utility/beast_Journal.cpp
|
||||
signal:src/xrpld/core/detail/Workers.cpp
|
||||
signal:src/xrpld/core/JobQueue.cpp
|
||||
signal:ripple::Workers::Worker
|
||||
signal:Workers::Worker
|
||||
|
||||
# Aggressive suppressing of deadlock tsan errors
|
||||
deadlock:pthread_create
|
||||
|
||||
@@ -74,7 +74,7 @@ vptr:boost
|
||||
# Google protobuf
|
||||
undefined:protobuf
|
||||
|
||||
# Suppress UBSan errors in rippled code by source file path
|
||||
# Suppress UBSan errors in xrpld code by source file path
|
||||
undefined:src/libxrpl/basics/base64.cpp
|
||||
undefined:src/libxrpl/basics/Number.cpp
|
||||
undefined:src/libxrpl/beast/utility/beast_Journal.cpp
|
||||
@@ -165,7 +165,7 @@ unsigned-integer-overflow:xrpl/nodestore/detail/varint.h
|
||||
unsigned-integer-overflow:xrpl/peerfinder/detail/Counts.h
|
||||
unsigned-integer-overflow:xrpl/protocol/nft.h
|
||||
|
||||
# Rippled intentional overflows and operations
|
||||
# Xrpld intentional overflows and operations
|
||||
# STAmount uses intentional negation of INT64_MIN and overflow in arithmetic
|
||||
signed-integer-overflow:src/libxrpl/protocol/STAmount.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/STAmount.cpp
|
||||
|
||||
@@ -9,14 +9,14 @@ namespace xrpl {
|
||||
std::atomic<UptimeClock::rep> UptimeClock::now_{0}; // seconds since start
|
||||
std::atomic<bool> UptimeClock::stop_{false}; // stop update thread
|
||||
|
||||
// On rippled shutdown, cancel and wait for the update thread
|
||||
// On xrpld shutdown, cancel and wait for the update thread
|
||||
UptimeClock::update_thread::~update_thread()
|
||||
{
|
||||
if (joinable())
|
||||
{
|
||||
stop_ = true;
|
||||
// This join() may take up to a 1s, but happens only
|
||||
// once at rippled shutdown.
|
||||
// once at xrpld shutdown.
|
||||
join();
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@ UptimeClock::start_clock()
|
||||
}};
|
||||
}
|
||||
|
||||
// This actually measures time since first use, instead of since rippled start.
|
||||
// This actually measures time since first use, instead of since xrpld start.
|
||||
// However the difference between these two epochs is a small fraction of a
|
||||
// second and unimportant.
|
||||
|
||||
@@ -50,7 +50,7 @@ UptimeClock::now()
|
||||
// start the update thread on first use
|
||||
static auto const init = start_clock();
|
||||
|
||||
// Return the number of seconds since rippled start
|
||||
// Return the number of seconds since xrpld start
|
||||
return time_point{duration{now_}};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include <xrpld/app/misc/CanonicalTXSet.h>
|
||||
#include <xrpl/ledger/CanonicalTXSet.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -1,19 +1,9 @@
|
||||
#include <xrpld/app/ledger/InboundLedgers.h>
|
||||
#include <xrpld/app/ledger/Ledger.h>
|
||||
#include <xrpld/app/ledger/LedgerToJson.h>
|
||||
#include <xrpld/app/ledger/PendingSaves.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/consensus/LedgerTiming.h>
|
||||
#include <xrpld/core/Config.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/core/HashRouter.h>
|
||||
#include <xrpl/core/JobQueue.h>
|
||||
#include <xrpl/json/to_string.h>
|
||||
#include <xrpl/nodestore/Database.h>
|
||||
#include <xrpl/nodestore/detail/DatabaseNodeImp.h>
|
||||
#include <xrpl/ledger/Ledger.h>
|
||||
#include <xrpl/ledger/LedgerTiming.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
@@ -21,7 +11,6 @@
|
||||
#include <xrpl/protocol/SecretKey.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
#include <xrpl/rdb/RelationalDatabase.h>
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
@@ -30,23 +19,6 @@ namespace xrpl {
|
||||
|
||||
create_genesis_t const create_genesis{};
|
||||
|
||||
uint256
|
||||
calculateLedgerHash(LedgerHeader const& info)
|
||||
{
|
||||
// VFALCO This has to match addRaw in View.h.
|
||||
return sha512Half(
|
||||
HashPrefix::ledgerMaster,
|
||||
std::uint32_t(info.seq),
|
||||
std::uint64_t(info.drops.drops()),
|
||||
info.parentHash,
|
||||
info.txHash,
|
||||
info.accountHash,
|
||||
std::uint32_t(info.parentCloseTime.time_since_epoch().count()),
|
||||
std::uint32_t(info.closeTime.time_since_epoch().count()),
|
||||
std::uint8_t(info.closeTimeResolution.count()),
|
||||
std::uint8_t(info.closeFlags));
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class Ledger::sles_iter_impl : public sles_type::iter_base
|
||||
@@ -138,8 +110,8 @@ public:
|
||||
{
|
||||
auto const& item = *iter_;
|
||||
if (metadata_)
|
||||
return deserializeTxPlusMeta(item);
|
||||
return {deserializeTx(item), nullptr};
|
||||
return Ledger::deserializeTxPlusMeta(item);
|
||||
return {Ledger::deserializeTx(item), nullptr};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -147,13 +119,15 @@ public:
|
||||
|
||||
Ledger::Ledger(
|
||||
create_genesis_t,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
std::vector<uint256> const& amendments,
|
||||
Family& family)
|
||||
: mImmutable(false)
|
||||
, txMap_(SHAMapType::TRANSACTION, family)
|
||||
, stateMap_(SHAMapType::STATE, family)
|
||||
, rules_{config.features}
|
||||
, fees_(fees)
|
||||
, rules_(rules)
|
||||
, j_(beast::Journal(beast::Journal::getNullSink()))
|
||||
{
|
||||
header_.seq = 1;
|
||||
@@ -182,19 +156,19 @@ Ledger::Ledger(
|
||||
// Whether featureXRPFees is supported will depend on startup options.
|
||||
if (std::find(amendments.begin(), amendments.end(), featureXRPFees) != amendments.end())
|
||||
{
|
||||
sle->at(sfBaseFeeDrops) = config.FEES.reference_fee;
|
||||
sle->at(sfReserveBaseDrops) = config.FEES.account_reserve;
|
||||
sle->at(sfReserveIncrementDrops) = config.FEES.owner_reserve;
|
||||
sle->at(sfBaseFeeDrops) = fees.base;
|
||||
sle->at(sfReserveBaseDrops) = fees.reserve;
|
||||
sle->at(sfReserveIncrementDrops) = fees.increment;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (auto const f = config.FEES.reference_fee.dropsAs<std::uint64_t>())
|
||||
if (auto const f = fees.base.dropsAs<std::uint64_t>())
|
||||
sle->at(sfBaseFee) = *f;
|
||||
if (auto const f = config.FEES.account_reserve.dropsAs<std::uint32_t>())
|
||||
if (auto const f = fees.reserve.dropsAs<std::uint32_t>())
|
||||
sle->at(sfReserveBase) = *f;
|
||||
if (auto const f = config.FEES.owner_reserve.dropsAs<std::uint32_t>())
|
||||
if (auto const f = fees.increment.dropsAs<std::uint32_t>())
|
||||
sle->at(sfReserveIncrement) = *f;
|
||||
sle->at(sfReferenceFeeUnits) = Config::FEE_UNITS_DEPRECATED;
|
||||
sle->at(sfReferenceFeeUnits) = FEE_UNITS_DEPRECATED;
|
||||
}
|
||||
rawInsert(sle);
|
||||
}
|
||||
@@ -207,13 +181,15 @@ Ledger::Ledger(
|
||||
LedgerHeader const& info,
|
||||
bool& loaded,
|
||||
bool acquire,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
Family& family,
|
||||
beast::Journal j)
|
||||
: mImmutable(true)
|
||||
, txMap_(SHAMapType::TRANSACTION, info.txHash, family)
|
||||
, stateMap_(SHAMapType::STATE, info.accountHash, family)
|
||||
, rules_(config.features)
|
||||
, fees_(fees)
|
||||
, rules_(rules)
|
||||
, header_(info)
|
||||
, j_(j)
|
||||
{
|
||||
@@ -235,7 +211,6 @@ Ledger::Ledger(
|
||||
txMap_.setImmutable();
|
||||
stateMap_.setImmutable();
|
||||
|
||||
defaultFees(config);
|
||||
if (!setup())
|
||||
loaded = false;
|
||||
|
||||
@@ -275,11 +250,11 @@ Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime)
|
||||
}
|
||||
}
|
||||
|
||||
Ledger::Ledger(LedgerHeader const& info, Config const& config, Family& family)
|
||||
Ledger::Ledger(LedgerHeader const& info, Rules const& rules, Family& family)
|
||||
: mImmutable(true)
|
||||
, txMap_(SHAMapType::TRANSACTION, info.txHash, family)
|
||||
, stateMap_(SHAMapType::STATE, info.accountHash, family)
|
||||
, rules_{config.features}
|
||||
, rules_(rules)
|
||||
, header_(info)
|
||||
, j_(beast::Journal(beast::Journal::getNullSink()))
|
||||
{
|
||||
@@ -289,18 +264,19 @@ Ledger::Ledger(LedgerHeader const& info, Config const& config, Family& family)
|
||||
Ledger::Ledger(
|
||||
std::uint32_t ledgerSeq,
|
||||
NetClock::time_point closeTime,
|
||||
Config const& config,
|
||||
Rules const& rules,
|
||||
Fees const& fees,
|
||||
Family& family)
|
||||
: mImmutable(false)
|
||||
, txMap_(SHAMapType::TRANSACTION, family)
|
||||
, stateMap_(SHAMapType::STATE, family)
|
||||
, rules_{config.features}
|
||||
, fees_(fees)
|
||||
, rules_(rules)
|
||||
, j_(beast::Journal(beast::Journal::getNullSink()))
|
||||
{
|
||||
header_.seq = ledgerSeq;
|
||||
header_.closeTime = closeTime;
|
||||
header_.closeTimeResolution = ledgerDefaultTimeResolution;
|
||||
defaultFees(config);
|
||||
setup();
|
||||
}
|
||||
|
||||
@@ -350,14 +326,14 @@ Ledger::addSLE(SLE const& sle)
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
std::shared_ptr<STTx const>
|
||||
deserializeTx(SHAMapItem const& item)
|
||||
Ledger::deserializeTx(SHAMapItem const& item)
|
||||
{
|
||||
SerialIter sit(item.slice());
|
||||
return std::make_shared<STTx const>(sit);
|
||||
}
|
||||
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>
|
||||
deserializeTxPlusMeta(SHAMapItem const& item)
|
||||
Ledger::deserializeTxPlusMeta(SHAMapItem const& item)
|
||||
{
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>> result;
|
||||
SerialIter sit(item.slice());
|
||||
@@ -636,20 +612,6 @@ Ledger::setup()
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
Ledger::defaultFees(Config const& config)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
fees_.base == 0 && fees_.reserve == 0 && fees_.increment == 0,
|
||||
"xrpl::Ledger::defaultFees : zero fees");
|
||||
if (fees_.base == 0)
|
||||
fees_.base = config.FEES.reference_fee;
|
||||
if (fees_.reserve == 0)
|
||||
fees_.reserve = config.FEES.account_reserve;
|
||||
if (fees_.increment == 0)
|
||||
fees_.increment = config.FEES.owner_reserve;
|
||||
}
|
||||
|
||||
std::shared_ptr<SLE>
|
||||
Ledger::peek(Keylet const& k) const
|
||||
{
|
||||
@@ -816,27 +778,17 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const
|
||||
}
|
||||
|
||||
bool
|
||||
Ledger::assertSensible(beast::Journal ledgerJ) const
|
||||
Ledger::isSensible() const
|
||||
{
|
||||
if (header_.hash.isNonZero() && header_.accountHash.isNonZero() &&
|
||||
(header_.accountHash == stateMap_.getHash().as_uint256()) &&
|
||||
(header_.txHash == txMap_.getHash().as_uint256()))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// LCOV_EXCL_START
|
||||
Json::Value j = getJson({*this, {}});
|
||||
|
||||
j[jss::accountTreeHash] = to_string(header_.accountHash);
|
||||
j[jss::transTreeHash] = to_string(header_.txHash);
|
||||
|
||||
JLOG(ledgerJ.fatal()) << "ledger is not sensible" << j;
|
||||
|
||||
UNREACHABLE("xrpl::Ledger::assertSensible : ledger is not sensible");
|
||||
|
||||
return false;
|
||||
// LCOV_EXCL_STOP
|
||||
if (header_.hash.isZero())
|
||||
return false;
|
||||
if (header_.accountHash.isZero())
|
||||
return false;
|
||||
if (header_.accountHash != stateMap_.getHash().as_uint256())
|
||||
return false;
|
||||
if (header_.txHash != txMap_.getHash().as_uint256())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// update the skip list with the information from our previous ledger
|
||||
@@ -925,76 +877,6 @@ Ledger::isVotingLedger() const
|
||||
return ::xrpl::isVotingLedger(header_.seq + 1);
|
||||
}
|
||||
|
||||
static bool
|
||||
saveValidatedLedger(Application& app, std::shared_ptr<Ledger const> const& ledger, bool current)
|
||||
{
|
||||
auto j = app.journal("Ledger");
|
||||
auto seq = ledger->header().seq;
|
||||
if (!app.pendingSaves().startWork(seq))
|
||||
{
|
||||
// The save was completed synchronously
|
||||
JLOG(j.debug()) << "Save aborted";
|
||||
return true;
|
||||
}
|
||||
|
||||
auto& db = app.getRelationalDatabase();
|
||||
|
||||
auto const res = db.saveValidatedLedger(ledger, current);
|
||||
|
||||
// Clients can now trust the database for
|
||||
// information about this ledger sequence.
|
||||
app.pendingSaves().finishWork(seq);
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Save, or arrange to save, a fully-validated ledger
|
||||
Returns false on error
|
||||
*/
|
||||
bool
|
||||
pendSaveValidated(
|
||||
Application& app,
|
||||
std::shared_ptr<Ledger const> const& ledger,
|
||||
bool isSynchronous,
|
||||
bool isCurrent)
|
||||
{
|
||||
if (!app.getHashRouter().setFlags(ledger->header().hash, HashRouterFlags::SAVED))
|
||||
{
|
||||
// We have tried to save this ledger recently
|
||||
auto stream = app.journal("Ledger").debug();
|
||||
JLOG(stream) << "Double pend save for " << ledger->header().seq;
|
||||
|
||||
if (!isSynchronous || !app.pendingSaves().pending(ledger->header().seq))
|
||||
{
|
||||
// Either we don't need it to be finished
|
||||
// or it is finished
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
XRPL_ASSERT(ledger->isImmutable(), "xrpl::pendSaveValidated : immutable ledger");
|
||||
|
||||
if (!app.pendingSaves().shouldWork(ledger->header().seq, isSynchronous))
|
||||
{
|
||||
auto stream = app.journal("Ledger").debug();
|
||||
JLOG(stream) << "Pend save with seq in pending saves " << ledger->header().seq;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// See if we can use the JobQueue.
|
||||
if (!isSynchronous &&
|
||||
app.getJobQueue().addJob(
|
||||
isCurrent ? jtPUBLEDGER : jtPUBOLDLEDGER,
|
||||
std::to_string(ledger->seq()),
|
||||
[&app, ledger, isCurrent]() { saveValidatedLedger(app, ledger, isCurrent); }))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// The JobQueue won't do the Job. Do the save synchronously.
|
||||
return saveValidatedLedger(app, ledger, isCurrent);
|
||||
}
|
||||
|
||||
void
|
||||
Ledger::unshare() const
|
||||
{
|
||||
@@ -1008,84 +890,5 @@ Ledger::invariants() const
|
||||
stateMap_.invariants();
|
||||
txMap_.invariants();
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/*
|
||||
* Make ledger using info loaded from database.
|
||||
*
|
||||
* @param LedgerHeader: Ledger information.
|
||||
* @param app: Link to the Application.
|
||||
* @param acquire: Acquire the ledger if not found locally.
|
||||
* @return Shared pointer to the ledger.
|
||||
*/
|
||||
std::shared_ptr<Ledger>
|
||||
loadLedgerHelper(LedgerHeader const& info, Application& app, bool acquire)
|
||||
{
|
||||
bool loaded = false;
|
||||
auto ledger = std::make_shared<Ledger>(
|
||||
info, loaded, acquire, app.config(), app.getNodeFamily(), app.journal("Ledger"));
|
||||
|
||||
if (!loaded)
|
||||
ledger.reset();
|
||||
|
||||
return ledger;
|
||||
}
|
||||
|
||||
static void
|
||||
finishLoadByIndexOrHash(
|
||||
std::shared_ptr<Ledger> const& ledger,
|
||||
Config const& config,
|
||||
beast::Journal j)
|
||||
{
|
||||
if (!ledger)
|
||||
return;
|
||||
|
||||
XRPL_ASSERT(
|
||||
ledger->header().seq < XRP_LEDGER_EARLIEST_FEES || ledger->read(keylet::fees()),
|
||||
"xrpl::finishLoadByIndexOrHash : valid ledger fees");
|
||||
ledger->setImmutable();
|
||||
|
||||
JLOG(j.trace()) << "Loaded ledger: " << to_string(ledger->header().hash);
|
||||
|
||||
ledger->setFull();
|
||||
}
|
||||
|
||||
std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app)
|
||||
{
|
||||
std::optional<LedgerHeader> const info = app.getRelationalDatabase().getNewestLedgerInfo();
|
||||
if (!info)
|
||||
return {std::shared_ptr<Ledger>(), {}, {}};
|
||||
return {loadLedgerHelper(*info, app, true), info->seq, info->hash};
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire)
|
||||
{
|
||||
if (std::optional<LedgerHeader> info =
|
||||
app.getRelationalDatabase().getLedgerInfoByIndex(ledgerIndex))
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
return ledger;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByHash(uint256 const& ledgerHash, Application& app, bool acquire)
|
||||
{
|
||||
if (std::optional<LedgerHeader> info =
|
||||
app.getRelationalDatabase().getLedgerInfoByHash(ledgerHash))
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
XRPL_ASSERT(
|
||||
!ledger || ledger->header().hash == ledgerHash,
|
||||
"xrpl::loadByHash : ledger hash match if loaded");
|
||||
return ledger;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -485,25 +485,20 @@ canTransfer(
|
||||
}
|
||||
|
||||
TER
|
||||
rippleLockEscrowMPT(
|
||||
ApplyView& view,
|
||||
AccountID const& sender,
|
||||
STAmount const& amount,
|
||||
beast::Journal j)
|
||||
lockEscrowMPT(ApplyView& view, AccountID const& sender, STAmount const& amount, beast::Journal j)
|
||||
{
|
||||
auto const mptIssue = amount.get<MPTIssue>();
|
||||
auto const mptID = keylet::mptIssuance(mptIssue.getMptID());
|
||||
auto sleIssuance = view.peek(mptID);
|
||||
if (!sleIssuance)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: MPT issuance not found for "
|
||||
<< mptIssue.getMptID();
|
||||
JLOG(j.error()) << "lockEscrowMPT: MPT issuance not found for " << mptIssue.getMptID();
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
if (amount.getIssuer() == sender)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: sender is the issuer, cannot lock MPTs.";
|
||||
JLOG(j.error()) << "lockEscrowMPT: sender is the issuer, cannot lock MPTs.";
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -514,7 +509,7 @@ rippleLockEscrowMPT(
|
||||
auto sle = view.peek(mptokenID);
|
||||
if (!sle)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: MPToken not found for " << sender;
|
||||
JLOG(j.error()) << "lockEscrowMPT: MPToken not found for " << sender;
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -524,8 +519,8 @@ rippleLockEscrowMPT(
|
||||
// Underflow check for subtraction
|
||||
if (!canSubtract(STAmount(mptIssue, amt), STAmount(mptIssue, pay)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: insufficient MPTAmount for "
|
||||
<< to_string(sender) << ": " << amt << " < " << pay;
|
||||
JLOG(j.error()) << "lockEscrowMPT: insufficient MPTAmount for " << to_string(sender)
|
||||
<< ": " << amt << " < " << pay;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -536,8 +531,8 @@ rippleLockEscrowMPT(
|
||||
|
||||
if (!canAdd(STAmount(mptIssue, locked), STAmount(mptIssue, pay)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: overflow on locked amount for "
|
||||
<< to_string(sender) << ": " << locked << " + " << pay;
|
||||
JLOG(j.error()) << "lockEscrowMPT: overflow on locked amount for " << to_string(sender)
|
||||
<< ": " << locked << " + " << pay;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -562,7 +557,7 @@ rippleLockEscrowMPT(
|
||||
// Overflow check for addition
|
||||
if (!canAdd(STAmount(mptIssue, issuanceEscrowed), STAmount(mptIssue, pay)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleLockEscrowMPT: overflow on issuance "
|
||||
JLOG(j.error()) << "lockEscrowMPT: overflow on issuance "
|
||||
"locked amount for "
|
||||
<< mptIssue.getMptID() << ": " << issuanceEscrowed << " + " << pay;
|
||||
return tecINTERNAL;
|
||||
@@ -583,7 +578,7 @@ rippleLockEscrowMPT(
|
||||
}
|
||||
|
||||
TER
|
||||
rippleUnlockEscrowMPT(
|
||||
unlockEscrowMPT(
|
||||
ApplyView& view,
|
||||
AccountID const& sender,
|
||||
AccountID const& receiver,
|
||||
@@ -593,8 +588,7 @@ rippleUnlockEscrowMPT(
|
||||
{
|
||||
if (!view.rules().enabled(fixTokenEscrowV1))
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
netAmount == grossAmount, "xrpl::rippleUnlockEscrowMPT : netAmount == grossAmount");
|
||||
XRPL_ASSERT(netAmount == grossAmount, "xrpl::unlockEscrowMPT : netAmount == grossAmount");
|
||||
}
|
||||
|
||||
auto const& issuer = netAmount.getIssuer();
|
||||
@@ -603,8 +597,7 @@ rippleUnlockEscrowMPT(
|
||||
auto sleIssuance = view.peek(mptID);
|
||||
if (!sleIssuance)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: MPT issuance not found for "
|
||||
<< mptIssue.getMptID();
|
||||
JLOG(j.error()) << "unlockEscrowMPT: MPT issuance not found for " << mptIssue.getMptID();
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -612,7 +605,7 @@ rippleUnlockEscrowMPT(
|
||||
{
|
||||
if (!sleIssuance->isFieldPresent(sfLockedAmount))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: no locked amount in issuance for "
|
||||
JLOG(j.error()) << "unlockEscrowMPT: no locked amount in issuance for "
|
||||
<< mptIssue.getMptID();
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
@@ -623,7 +616,7 @@ rippleUnlockEscrowMPT(
|
||||
// Underflow check for subtraction
|
||||
if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, redeem)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient locked amount for "
|
||||
JLOG(j.error()) << "unlockEscrowMPT: insufficient locked amount for "
|
||||
<< mptIssue.getMptID() << ": " << locked << " < " << redeem;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
@@ -647,7 +640,7 @@ rippleUnlockEscrowMPT(
|
||||
auto sle = view.peek(mptokenID);
|
||||
if (!sle)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: MPToken not found for " << receiver;
|
||||
JLOG(j.error()) << "unlockEscrowMPT: MPToken not found for " << receiver;
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -657,8 +650,8 @@ rippleUnlockEscrowMPT(
|
||||
// Overflow check for addition
|
||||
if (!canAdd(STAmount(mptIssue, current), STAmount(mptIssue, delta)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: overflow on MPTAmount for "
|
||||
<< to_string(receiver) << ": " << current << " + " << delta;
|
||||
JLOG(j.error()) << "unlockEscrowMPT: overflow on MPTAmount for " << to_string(receiver)
|
||||
<< ": " << current << " + " << delta;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -674,7 +667,7 @@ rippleUnlockEscrowMPT(
|
||||
// Underflow check for subtraction
|
||||
if (!canSubtract(STAmount(mptIssue, outstanding), STAmount(mptIssue, redeem)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient outstanding amount for "
|
||||
JLOG(j.error()) << "unlockEscrowMPT: insufficient outstanding amount for "
|
||||
<< mptIssue.getMptID() << ": " << outstanding << " < " << redeem;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
@@ -685,7 +678,7 @@ rippleUnlockEscrowMPT(
|
||||
|
||||
if (issuer == sender)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: sender is the issuer, "
|
||||
JLOG(j.error()) << "unlockEscrowMPT: sender is the issuer, "
|
||||
"cannot unlock MPTs.";
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
@@ -694,14 +687,13 @@ rippleUnlockEscrowMPT(
|
||||
auto sle = view.peek(mptokenID);
|
||||
if (!sle)
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: MPToken not found for " << sender;
|
||||
JLOG(j.error()) << "unlockEscrowMPT: MPToken not found for " << sender;
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
if (!sle->isFieldPresent(sfLockedAmount))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: no locked amount in MPToken for "
|
||||
<< to_string(sender);
|
||||
JLOG(j.error()) << "unlockEscrowMPT: no locked amount in MPToken for " << to_string(sender);
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -711,8 +703,8 @@ rippleUnlockEscrowMPT(
|
||||
// Underflow check for subtraction
|
||||
if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient locked amount for "
|
||||
<< to_string(sender) << ": " << locked << " < " << delta;
|
||||
JLOG(j.error()) << "unlockEscrowMPT: insufficient locked amount for " << to_string(sender)
|
||||
<< ": " << locked << " < " << delta;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -738,7 +730,7 @@ rippleUnlockEscrowMPT(
|
||||
// Underflow check for subtraction
|
||||
if (!canSubtract(STAmount(mptIssue, outstanding), STAmount(mptIssue, diff)))
|
||||
{ // LCOV_EXCL_START
|
||||
JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient outstanding amount for "
|
||||
JLOG(j.error()) << "unlockEscrowMPT: insufficient outstanding amount for "
|
||||
<< mptIssue.getMptID() << ": " << outstanding << " < " << diff;
|
||||
return tecINTERNAL;
|
||||
} // LCOV_EXCL_STOP
|
||||
|
||||
@@ -513,7 +513,7 @@ canTransfer(ReadView const& view, Asset const& asset, AccountID const& from, Acc
|
||||
// - Create trust line if needed.
|
||||
// --> bCheckIssuer : normally require issuer to be involved.
|
||||
static TER
|
||||
rippleCreditIOU(
|
||||
directSendIOU(
|
||||
ApplyView& view,
|
||||
AccountID const& uSenderID,
|
||||
AccountID const& uReceiverID,
|
||||
@@ -527,20 +527,20 @@ rippleCreditIOU(
|
||||
// Make sure issuer is involved.
|
||||
XRPL_ASSERT(
|
||||
!bCheckIssuer || uSenderID == issuer || uReceiverID == issuer,
|
||||
"xrpl::rippleCreditIOU : matching issuer or don't care");
|
||||
"xrpl::directSendIOU : matching issuer or don't care");
|
||||
(void)issuer;
|
||||
|
||||
// Disallow sending to self.
|
||||
XRPL_ASSERT(uSenderID != uReceiverID, "xrpl::rippleCreditIOU : sender is not receiver");
|
||||
XRPL_ASSERT(uSenderID != uReceiverID, "xrpl::directSendIOU : sender is not receiver");
|
||||
|
||||
bool const bSenderHigh = uSenderID > uReceiverID;
|
||||
auto const index = keylet::line(uSenderID, uReceiverID, currency);
|
||||
|
||||
XRPL_ASSERT(
|
||||
!isXRP(uSenderID) && uSenderID != noAccount(), "xrpl::rippleCreditIOU : sender is not XRP");
|
||||
!isXRP(uSenderID) && uSenderID != noAccount(), "xrpl::directSendIOU : sender is not XRP");
|
||||
XRPL_ASSERT(
|
||||
!isXRP(uReceiverID) && uReceiverID != noAccount(),
|
||||
"xrpl::rippleCreditIOU : receiver is not XRP");
|
||||
"xrpl::directSendIOU : receiver is not XRP");
|
||||
|
||||
// If the line exists, modify it accordingly.
|
||||
if (auto const sleRippleState = view.peek(index))
|
||||
@@ -556,7 +556,7 @@ rippleCreditIOU(
|
||||
|
||||
saBalance -= saAmount;
|
||||
|
||||
JLOG(j.trace()) << "rippleCreditIOU: " << to_string(uSenderID) << " -> "
|
||||
JLOG(j.trace()) << "directSendIOU: " << to_string(uSenderID) << " -> "
|
||||
<< to_string(uReceiverID) << " : before=" << saBefore.getFullText()
|
||||
<< " amount=" << saAmount.getFullText()
|
||||
<< " after=" << saBalance.getFullText();
|
||||
@@ -623,7 +623,7 @@ rippleCreditIOU(
|
||||
|
||||
saBalance.setIssuer(noAccount());
|
||||
|
||||
JLOG(j.debug()) << "rippleCreditIOU: "
|
||||
JLOG(j.debug()) << "directSendIOU: "
|
||||
"create line: "
|
||||
<< to_string(uSenderID) << " -> " << to_string(uReceiverID) << " : "
|
||||
<< saAmount.getFullText();
|
||||
@@ -675,7 +675,7 @@ rippleSendIOU(
|
||||
if (uSenderID == issuer || uReceiverID == issuer || issuer == noAccount())
|
||||
{
|
||||
// Direct send: redeeming IOUs and/or sending own IOUs.
|
||||
auto const ter = rippleCreditIOU(view, uSenderID, uReceiverID, saAmount, false, j);
|
||||
auto const ter = directSendIOU(view, uSenderID, uReceiverID, saAmount, false, j);
|
||||
if (!isTesSuccess(ter))
|
||||
return ter;
|
||||
saActual = saAmount;
|
||||
@@ -693,10 +693,10 @@ rippleSendIOU(
|
||||
<< to_string(uReceiverID) << " : deliver=" << saAmount.getFullText()
|
||||
<< " cost=" << saActual.getFullText();
|
||||
|
||||
TER terResult = rippleCreditIOU(view, issuer, uReceiverID, saAmount, true, j);
|
||||
TER terResult = directSendIOU(view, issuer, uReceiverID, saAmount, true, j);
|
||||
|
||||
if (tesSUCCESS == terResult)
|
||||
terResult = rippleCreditIOU(view, uSenderID, issuer, saActual, true, j);
|
||||
terResult = directSendIOU(view, uSenderID, issuer, saActual, true, j);
|
||||
|
||||
return terResult;
|
||||
}
|
||||
@@ -739,10 +739,10 @@ rippleSendMultiIOU(
|
||||
if (senderID == issuer || receiverID == issuer || issuer == noAccount())
|
||||
{
|
||||
// Direct send: redeeming IOUs and/or sending own IOUs.
|
||||
if (auto const ter = rippleCreditIOU(view, senderID, receiverID, amount, false, j))
|
||||
if (auto const ter = directSendIOU(view, senderID, receiverID, amount, false, j))
|
||||
return ter;
|
||||
actual += amount;
|
||||
// Do not add amount to takeFromSender, because rippleCreditIOU took
|
||||
// Do not add amount to takeFromSender, because directSendIOU took
|
||||
// it.
|
||||
|
||||
continue;
|
||||
@@ -762,13 +762,13 @@ rippleSendMultiIOU(
|
||||
<< to_string(receiverID) << " : deliver=" << amount.getFullText()
|
||||
<< " cost=" << actual.getFullText();
|
||||
|
||||
if (TER const terResult = rippleCreditIOU(view, issuer, receiverID, amount, true, j))
|
||||
if (TER const terResult = directSendIOU(view, issuer, receiverID, amount, true, j))
|
||||
return terResult;
|
||||
}
|
||||
|
||||
if (senderID != issuer && takeFromSender)
|
||||
{
|
||||
if (TER const terResult = rippleCreditIOU(view, senderID, issuer, takeFromSender, true, j))
|
||||
if (TER const terResult = directSendIOU(view, senderID, issuer, takeFromSender, true, j))
|
||||
return terResult;
|
||||
}
|
||||
|
||||
@@ -1022,7 +1022,7 @@ accountSendMultiIOU(
|
||||
}
|
||||
|
||||
static TER
|
||||
rippleCreditMPT(
|
||||
directSendMPT(
|
||||
ApplyView& view,
|
||||
AccountID const& uSenderID,
|
||||
AccountID const& uReceiverID,
|
||||
@@ -1122,7 +1122,7 @@ rippleSendMPT(
|
||||
}
|
||||
|
||||
// Direct send: redeeming MPTs and/or sending own MPTs.
|
||||
auto const ter = rippleCreditMPT(view, uSenderID, uReceiverID, saAmount, j);
|
||||
auto const ter = directSendMPT(view, uSenderID, uReceiverID, saAmount, j);
|
||||
if (!isTesSuccess(ter))
|
||||
return ter;
|
||||
saActual = saAmount;
|
||||
@@ -1138,11 +1138,11 @@ rippleSendMPT(
|
||||
<< to_string(uReceiverID) << " : deliver=" << saAmount.getFullText()
|
||||
<< " cost=" << saActual.getFullText();
|
||||
|
||||
if (auto const terResult = rippleCreditMPT(view, issuer, uReceiverID, saAmount, j);
|
||||
if (auto const terResult = directSendMPT(view, issuer, uReceiverID, saAmount, j);
|
||||
!isTesSuccess(terResult))
|
||||
return terResult;
|
||||
|
||||
return rippleCreditMPT(view, uSenderID, issuer, saActual, j);
|
||||
return directSendMPT(view, uSenderID, issuer, saActual, j);
|
||||
}
|
||||
|
||||
static TER
|
||||
@@ -1201,10 +1201,10 @@ rippleSendMultiMPT(
|
||||
}
|
||||
|
||||
// Direct send: redeeming MPTs and/or sending own MPTs.
|
||||
if (auto const ter = rippleCreditMPT(view, senderID, receiverID, amount, j))
|
||||
if (auto const ter = directSendMPT(view, senderID, receiverID, amount, j))
|
||||
return ter;
|
||||
actual += amount;
|
||||
// Do not add amount to takeFromSender, because rippleCreditMPT took
|
||||
// Do not add amount to takeFromSender, because directSendMPT took
|
||||
// it
|
||||
|
||||
continue;
|
||||
@@ -1221,12 +1221,12 @@ rippleSendMultiMPT(
|
||||
<< to_string(receiverID) << " : deliver=" << amount.getFullText()
|
||||
<< " cost=" << actualSend.getFullText();
|
||||
|
||||
if (auto const terResult = rippleCreditMPT(view, issuer, receiverID, amount, j))
|
||||
if (auto const terResult = directSendMPT(view, issuer, receiverID, amount, j))
|
||||
return terResult;
|
||||
}
|
||||
if (senderID != issuer && takeFromSender)
|
||||
{
|
||||
if (TER const terResult = rippleCreditMPT(view, senderID, issuer, takeFromSender, j))
|
||||
if (TER const terResult = directSendMPT(view, senderID, issuer, takeFromSender, j))
|
||||
return terResult;
|
||||
}
|
||||
|
||||
@@ -1278,7 +1278,7 @@ accountSendMultiMPT(
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
TER
|
||||
rippleCredit(
|
||||
directSend(
|
||||
ApplyView& view,
|
||||
AccountID const& uSenderID,
|
||||
AccountID const& uReceiverID,
|
||||
@@ -1290,12 +1290,12 @@ rippleCredit(
|
||||
[&]<ValidIssueType TIss>(TIss const& issue) {
|
||||
if constexpr (std::is_same_v<TIss, Issue>)
|
||||
{
|
||||
return rippleCreditIOU(view, uSenderID, uReceiverID, saAmount, bCheckIssuer, j);
|
||||
return directSendIOU(view, uSenderID, uReceiverID, saAmount, bCheckIssuer, j);
|
||||
}
|
||||
else
|
||||
{
|
||||
XRPL_ASSERT(!bCheckIssuer, "xrpl::rippleCredit : not checking issuer");
|
||||
return rippleCreditMPT(view, uSenderID, uReceiverID, saAmount, j);
|
||||
XRPL_ASSERT(!bCheckIssuer, "xrpl::directSend : not checking issuer");
|
||||
return directSendMPT(view, uSenderID, uReceiverID, saAmount, j);
|
||||
}
|
||||
},
|
||||
saAmount.asset().value());
|
||||
|
||||
@@ -160,7 +160,7 @@ getEncodedVersion()
|
||||
}
|
||||
|
||||
bool
|
||||
isRippledVersion(std::uint64_t version)
|
||||
isXrpldVersion(std::uint64_t version)
|
||||
{
|
||||
return (version & implementationVersionIdentifierMask) == implementationVersionIdentifier;
|
||||
}
|
||||
@@ -168,7 +168,7 @@ isRippledVersion(std::uint64_t version)
|
||||
bool
|
||||
isNewerVersion(std::uint64_t version)
|
||||
{
|
||||
if (isRippledVersion(version))
|
||||
if (isXrpldVersion(version))
|
||||
return version > getEncodedVersion();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -51,4 +53,21 @@ deserializePrefixedHeader(Slice data, bool hasHash)
|
||||
return deserializeHeader(data + 4, hasHash);
|
||||
}
|
||||
|
||||
uint256
|
||||
calculateLedgerHash(LedgerHeader const& info)
|
||||
{
|
||||
// VFALCO This has to match addRaw in View.h.
|
||||
return sha512Half(
|
||||
HashPrefix::ledgerMaster,
|
||||
std::uint32_t(info.seq),
|
||||
std::uint64_t(info.drops.drops()),
|
||||
info.parentHash,
|
||||
info.txHash,
|
||||
info.accountHash,
|
||||
std::uint32_t(info.parentCloseTime.time_since_epoch().count()),
|
||||
std::uint32_t(info.closeTime.time_since_epoch().count()),
|
||||
std::uint8_t(info.closeTimeResolution.count()),
|
||||
std::uint8_t(info.closeFlags));
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -70,7 +70,7 @@ getNFTokenIDFromPage(TxMeta const& transactionMeta)
|
||||
// field changing, but no NFTs within that page changing. In this
|
||||
// case, there will be no previous NFTs and we need to skip.
|
||||
// However, there will always be NFTs listed in the final fields,
|
||||
// as rippled outputs all fields in final fields even if they were
|
||||
// as xrpld outputs all fields in final fields even if they were
|
||||
// not changed.
|
||||
STObject const& previousFields =
|
||||
node.peekAtField(sfPreviousFields).downcast<STObject>();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
#include <xrpl/rdb/DatabaseCon.h>
|
||||
#include <xrpl/rdb/SociDB.h>
|
||||
|
||||
@@ -40,11 +40,14 @@ public:
|
||||
}
|
||||
|
||||
std::shared_ptr<Checkpointer>
|
||||
create(std::shared_ptr<soci::session> const& session, JobQueue& jobQueue, Logs& logs)
|
||||
create(
|
||||
std::shared_ptr<soci::session> const& session,
|
||||
JobQueue& jobQueue,
|
||||
ServiceRegistry& registry)
|
||||
{
|
||||
std::lock_guard lock{mutex_};
|
||||
auto const id = nextId_++;
|
||||
auto const r = makeCheckpointer(id, session, jobQueue, logs);
|
||||
auto const r = makeCheckpointer(id, session, jobQueue, registry);
|
||||
checkpointers_[id] = r;
|
||||
return r;
|
||||
}
|
||||
@@ -82,11 +85,11 @@ DatabaseCon::~DatabaseCon()
|
||||
std::unique_ptr<std::vector<std::string> const> DatabaseCon::Setup::globalPragma;
|
||||
|
||||
void
|
||||
DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l)
|
||||
DatabaseCon::setupCheckpointing(JobQueue* q, ServiceRegistry& registry)
|
||||
{
|
||||
if (q == nullptr)
|
||||
Throw<std::logic_error>("No JobQueue");
|
||||
checkpointer_ = checkpointers.create(session_, *q, l);
|
||||
checkpointer_ = checkpointers.create(session_, *q, registry);
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -187,8 +187,11 @@ public:
|
||||
std::uintptr_t id,
|
||||
std::weak_ptr<soci::session> session,
|
||||
JobQueue& q,
|
||||
Logs& logs)
|
||||
: id_(id), session_(std::move(session)), jobQueue_(q), j_(logs.journal("WALCheckpointer"))
|
||||
ServiceRegistry& registry)
|
||||
: id_(id)
|
||||
, session_(std::move(session))
|
||||
, jobQueue_(q)
|
||||
, j_(registry.getJournal("WALCheckpointer"))
|
||||
{
|
||||
if (auto [conn, keepAlive] = getConnection(); conn)
|
||||
{
|
||||
@@ -307,9 +310,9 @@ makeCheckpointer(
|
||||
std::uintptr_t id,
|
||||
std::weak_ptr<soci::session> session,
|
||||
JobQueue& queue,
|
||||
Logs& logs)
|
||||
ServiceRegistry& registry)
|
||||
{
|
||||
return std::make_shared<WALCheckpointer>(id, std::move(session), queue, logs);
|
||||
return std::make_shared<WALCheckpointer>(id, std::move(session), queue, registry);
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -12,7 +12,7 @@ doVacuumDB(DatabaseCon::Setup const& setup, beast::Journal j)
|
||||
boost::filesystem::path dbPath = setup.dataDir / TxDBName;
|
||||
|
||||
uintmax_t const dbSize = file_size(dbPath);
|
||||
XRPL_ASSERT(dbSize != static_cast<uintmax_t>(-1), "ripple:doVacuumDB : file_size succeeded");
|
||||
XRPL_ASSERT(dbSize != static_cast<uintmax_t>(-1), "xrpl::doVacuumDB : file_size succeeded");
|
||||
|
||||
if (auto available = space(dbPath.parent_path()).available; available < dbSize)
|
||||
{
|
||||
@@ -36,7 +36,7 @@ doVacuumDB(DatabaseCon::Setup const& setup, beast::Journal j)
|
||||
std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl;
|
||||
|
||||
session << "VACUUM;";
|
||||
XRPL_ASSERT(setup.globalPragma, "ripple:doVacuumDB : non-null global pragma");
|
||||
XRPL_ASSERT(setup.globalPragma, "xrpl::doVacuumDB : non-null global pragma");
|
||||
for (auto const& p : *setup.globalPragma)
|
||||
session << p;
|
||||
session << "PRAGMA page_size;", soci::into(pageSize);
|
||||
|
||||
@@ -12,8 +12,6 @@ namespace xrpl {
|
||||
Expected<std::vector<SignerEntries::SignerEntry>, NotTEC>
|
||||
SignerEntries::deserialize(STObject const& obj, beast::Journal journal, std::string_view annotation)
|
||||
{
|
||||
std::pair<std::vector<SignerEntry>, NotTEC> s;
|
||||
|
||||
if (!obj.isFieldPresent(sfSignerEntries))
|
||||
{
|
||||
JLOG(journal.trace()) << "Malformed " << annotation << ": Need signer entry array.";
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/core/NetworkIDService.h>
|
||||
#include <xrpl/json/to_string.h>
|
||||
@@ -302,7 +301,7 @@ Transactor::calculateOwnerReserveFee(ReadView const& view, STTx const& tx)
|
||||
// need to rethink charging an owner reserve as a transaction fee.
|
||||
// TODO: This function is static, and I don't want to add more parameters.
|
||||
// When it is finally refactored to be in a context that has access to the
|
||||
// Application, include "app().overlay().networkID() > 2 ||" in the
|
||||
// Application, include "app().getOverlay().networkID() > 2 ||" in the
|
||||
// condition.
|
||||
XRPL_ASSERT(
|
||||
view.fees().increment > view.fees().base * 100,
|
||||
@@ -677,8 +676,7 @@ Transactor::checkSign(
|
||||
}
|
||||
|
||||
// Look up the account.
|
||||
auto const idSigner =
|
||||
pkSigner.empty() ? idAccount : calcAccountID(PublicKey(makeSlice(pkSigner)));
|
||||
auto const idSigner = calcAccountID(PublicKey(makeSlice(pkSigner)));
|
||||
auto const sleAccount = view.read(keylet::account(idAccount));
|
||||
if (!sleAccount)
|
||||
return terNO_ACCOUNT;
|
||||
@@ -1097,7 +1095,7 @@ Transactor::operator()()
|
||||
}
|
||||
#endif
|
||||
|
||||
if (auto const& trap = ctx_.registry.trapTxID(); trap && *trap == ctx_.tx.getTransactionID())
|
||||
if (auto const& trap = ctx_.registry.getTrapTxID(); trap && *trap == ctx_.tx.getTransactionID())
|
||||
{
|
||||
trapTransaction(*trap);
|
||||
}
|
||||
@@ -1199,16 +1197,25 @@ Transactor::operator()()
|
||||
|
||||
// If necessary, remove any offers found unfunded during processing
|
||||
if ((result == tecOVERSIZE) || (result == tecKILLED))
|
||||
removeUnfundedOffers(view(), removedOffers, ctx_.registry.journal("View"));
|
||||
{
|
||||
removeUnfundedOffers(view(), removedOffers, ctx_.registry.getJournal("View"));
|
||||
}
|
||||
|
||||
if (result == tecEXPIRED)
|
||||
removeExpiredNFTokenOffers(view(), expiredNFTokenOffers, ctx_.registry.journal("View"));
|
||||
{
|
||||
removeExpiredNFTokenOffers(
|
||||
view(), expiredNFTokenOffers, ctx_.registry.getJournal("View"));
|
||||
}
|
||||
|
||||
if (result == tecINCOMPLETE)
|
||||
removeDeletedTrustLines(view(), removedTrustLines, ctx_.registry.journal("View"));
|
||||
{
|
||||
removeDeletedTrustLines(view(), removedTrustLines, ctx_.registry.getJournal("View"));
|
||||
}
|
||||
|
||||
if (result == tecEXPIRED)
|
||||
removeExpiredCredentials(view(), expiredCredentials, ctx_.registry.journal("View"));
|
||||
{
|
||||
removeExpiredCredentials(view(), expiredCredentials, ctx_.registry.getJournal("View"));
|
||||
}
|
||||
|
||||
applied = isTecClaim(result);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user