Compare commits

..

18 Commits

Author SHA1 Message Date
mathbunnyru
99a33777c9 style: Update pre-commit hooks 2025-07-02 15:24:51 +00:00
Ayaz Salikhov
379a44641b fix: Import a GPG key when running pre-commit-autoupdate (#2287) 2025-07-02 16:23:13 +01:00
Ayaz Salikhov
18b8fc7e5c ci: Update LLVM tools to v20 (#2278)
Trying in https://github.com/XRPLF/clio/pull/2280
2025-07-02 14:55:54 +01:00
Ayaz Salikhov
be2d915df7 fix: Cleanup fmt headers (#2285) 2025-07-02 14:55:24 +01:00
Ayaz Salikhov
57dda8ac50 chore: Update fmt to 11.2.0 (#2281)
Seems that new clang-tidy fails with our old fmt lib
2025-07-02 14:11:35 +01:00
Ayaz Salikhov
5cdd8a642f fix: Use .contains() method where available (#2277) 2025-07-01 18:28:12 +01:00
Ayaz Salikhov
8abc9c6645 style: Add pre-commit hook to fix JSON style in C++ code (#2266) 2025-06-30 15:28:59 +01:00
Ayaz Salikhov
24e1aa9ae5 fix: Only set package_id:confs for sanitized builds (#2261) 2025-06-30 13:15:28 +01:00
Ayaz Salikhov
9bee023105 fix: Do not allow command injection in GitHub workflows (#2270) 2025-06-30 12:03:06 +01:00
github-actions[bot]
4ee3ef94d9 style: clang-tidy auto fixes (#2272)
Fixes #2271. 

Co-authored-by: godexsoft <385326+godexsoft@users.noreply.github.com>
2025-06-30 11:40:45 +01:00
Ayaz Salikhov
8fcc2dfa19 fix: Pin lxml<6.0.0 (#2269) 2025-06-27 18:56:14 +01:00
Ayaz Salikhov
123e09695e feat: Switch to xrpl/2.5.0 release (#2267) 2025-06-27 17:13:05 +01:00
Peter Chen
371237487b feat: Support single asset vault (#1979)
fixes #1921

---------

Co-authored-by: Sergey Kuznetsov <skuznetsov@ripple.com>
Co-authored-by: Ayaz Salikhov <mathbunnyru@users.noreply.github.com>
2025-06-27 15:27:34 +01:00
Ayaz Salikhov
d97f19ba1d style: Fix JSON style in C++ code (#2262) 2025-06-27 11:45:11 +01:00
github-actions[bot]
e92dbc8fce style: clang-tidy auto fixes (#2264)
Fixes #2263. Please review and commit clang-tidy fixes.

Co-authored-by: godexsoft <385326+godexsoft@users.noreply.github.com>
2025-06-27 10:30:17 +01:00
Ayaz Salikhov
769fdab6b7 feat: Add Support For Token Escrow (#2252)
Fix: https://github.com/XRPLF/clio/issues/2174
2025-06-26 18:03:26 +01:00
Ayaz Salikhov
363344d36e feat: Add init_conan script (#2242)
This should make life of a developer so much easier
2025-06-26 17:12:32 +01:00
Ayaz Salikhov
4f7e8194f0 fix: Don't cancel ci image builds (#2259) 2025-06-26 14:51:34 +01:00
212 changed files with 7549 additions and 5060 deletions

View File

@@ -1,39 +0,0 @@
name: Setup conan
description: Setup conan profiles and artifactory on macOS runner
inputs:
conan_files_dir:
description: Directory with conan files
required: true
runs:
using: composite
steps:
- name: Fail on non-macOS
if: runner.os != 'macOS'
shell: bash
run: exit 1
- name: Copy global.conf
shell: bash
run: |
cp "${{ inputs.conan_files_dir }}/global.conf" "${{ env.CONAN_HOME }}/global.conf"
- name: Create apple-clang conan profile
shell: bash
run: |
mkdir -p "${{ env.CONAN_HOME }}/profiles"
cp .github/actions/setup_conan_macos/apple-clang.profile "${{ env.CONAN_HOME }}/profiles/apple-clang"
- name: Create conan profiles for sanitizers
shell: bash
working-directory: ${{ inputs.conan_files_dir }}
run: |
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.asan"
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.tsan"
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.ubsan"
- name: Add artifactory remote
shell: bash
run: |
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev

View File

@@ -142,16 +142,3 @@ updates:
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/setup_conan_macos/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop

43
.github/scripts/conan/init.sh vendored Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
set -ex
CURRENT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_DIR="$(cd "$CURRENT_DIR/../../../" && pwd)"
CONAN_DIR="${CONAN_HOME:-$HOME/.conan2}"
PROFILES_DIR="$CONAN_DIR/profiles"
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang.profile"
GCC_PROFILE="$REPO_DIR/docker/ci/conan/gcc.profile"
CLANG_PROFILE="$REPO_DIR/docker/ci/conan/clang.profile"
SANITIZER_TEMPLATE_FILE="$REPO_DIR/docker/ci/conan/sanitizer_template.profile"
rm -rf "$CONAN_DIR"
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
cp "$REPO_DIR/docker/ci/conan/global.conf" "$CONAN_DIR/global.conf"
create_profile_with_sanitizers() {
profile_name="$1"
profile_source="$2"
cp "$profile_source" "$PROFILES_DIR/$profile_name"
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.asan"
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.tsan"
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.ubsan"
}
mkdir -p "$PROFILES_DIR"
if [[ "$(uname)" == "Darwin" ]]; then
create_profile_with_sanitizers "apple-clang" "$APPLE_CLANG_PROFILE"
echo "include(apple-clang)" > "$PROFILES_DIR/default"
else
create_profile_with_sanitizers "clang" "$CLANG_PROFILE"
create_profile_with_sanitizers "gcc" "$GCC_PROFILE"
echo "include(gcc)" > "$PROFILES_DIR/default"
fi

View File

@@ -77,11 +77,10 @@ jobs:
with:
disable_ccache: ${{ inputs.disable_cache }}
- name: Setup conan
- name: Setup conan on macOS
if: runner.os == 'macOS'
uses: ./.github/actions/setup_conan_macos
with:
conan_files_dir: docker/ci/conan/
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Restore cache
if: ${{ !inputs.disable_cache }}

View File

@@ -18,6 +18,7 @@ concurrency:
env:
CONAN_PROFILE: clang
LLVM_TOOLS_VERSION: 20
jobs:
clang_tidy:
@@ -61,7 +62,7 @@ jobs:
shell: bash
id: run_clang_tidy
run: |
run-clang-tidy-19 -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
- name: Fix local includes and clang-format style
if: ${{ steps.run_clang_tidy.outcome != 'success' }}

View File

@@ -26,6 +26,14 @@ jobs:
- run: pre-commit autoupdate --freeze
- run: pre-commit run --all-files || true
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
if: github.event_name != 'pull_request'
with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
if: always()
env:

View File

@@ -69,9 +69,9 @@ jobs:
shell: bash
if: ${{ inputs.generate_changelog }}
run: |
LAST_TAG=$(gh release view --json tagName -q .tagName)
LAST_TAG_COMMIT=$(git rev-parse $LAST_TAG)
BASE_COMMIT=$(git merge-base HEAD $LAST_TAG_COMMIT)
LAST_TAG="$(gh release view --json tagName -q .tagName)"
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b"
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
@@ -108,10 +108,10 @@ jobs:
if: ${{ github.event_name != 'pull_request' }}
shell: bash
run: |
gh release create ${{ inputs.version }} \
gh release create "${{ inputs.version }}" \
${{ inputs.overwrite_release && '--prerelease' || '' }} \
--title "${{ inputs.title }}" \
--target $GITHUB_SHA \
--target "${GITHUB_SHA}" \
${{ inputs.draft && '--draft' || '' }} \
--notes-file "${RUNNER_TEMP}/release_notes.md" \
./release_artifacts/clio_server*

View File

@@ -23,7 +23,7 @@ on:
workflow_dispatch:
concurrency:
# Only cancel in-progress jobs or runs for the current workflow - matches against branch & tags
# Only matches runs for the current workflow - matches against branch & tags
group: ${{ github.workflow }}-${{ github.ref }}
# We want to execute all builds sequentially in develop
cancel-in-progress: false

View File

@@ -15,7 +15,12 @@ on:
- develop
paths:
- .github/workflows/upload_conan_deps.yml
- .github/scripts/generate_conan_matrix.py
- .github/actions/generate/action.yml
- .github/actions/prepare_runner/action.yml
- .github/scripts/conan/generate_matrix.py
- .github/scripts/conan/init.sh
- conanfile.py
- conan.lock
push:
@@ -23,7 +28,12 @@ on:
- develop
paths:
- .github/workflows/upload_conan_deps.yml
- .github/scripts/generate_conan_matrix.py
- .github/actions/generate/action.yml
- .github/actions/prepare_runner/action.yml
- .github/scripts/conan/generate_matrix.py
- .github/scripts/conan/init.sh
- conanfile.py
- conan.lock
@@ -41,7 +51,7 @@ jobs:
- name: Calculate conan matrix
id: set-matrix
run: .github/scripts/generate_conan_matrix.py >> "${GITHUB_OUTPUT}"
run: .github/scripts/conan/generate_matrix.py >> "${GITHUB_OUTPUT}"
upload-conan-deps:
name: Build ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} ${{ matrix.build_type }}
@@ -66,11 +76,10 @@ jobs:
with:
disable_ccache: true
- name: Setup conan
- name: Setup conan on macOS
if: runner.os == 'macOS'
uses: ./.github/actions/setup_conan_macos
with:
conan_files_dir: docker/ci/conan/
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Show conan profile
run: conan profile show --profile:all ${{ env.CONAN_PROFILE }}

View File

@@ -26,12 +26,12 @@ repos:
# Autoformat: YAML, JSON, Markdown, etc.
- repo: https://github.com/rbubley/mirrors-prettier
rev: 787fb9f542b140ba0b2aced38e6a3e68021647a3 # frozen: v3.5.3
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
hooks:
- id: prettier
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: 586c3ea3f51230da42bab657c6a32e9e66c364f0 # frozen: v0.44.0
rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # frozen: v0.45.0
hooks:
- id: markdownlint-fix
exclude: LICENSE.md
@@ -55,17 +55,32 @@ repos:
--ignore-words=pre-commit-hooks/codespell_ignore.txt,
]
# Running fix-local-includes before clang-format
# to ensure that the include order is correct.
# Running some C++ hooks before clang-format
# to ensure that the style is consistent.
- repo: local
hooks:
- id: json-in-cpp
name: Fix JSON style in C++
entry: pre-commit-hooks/json_in_cpp.py
types: [c++]
language: python
exclude: |
(?x)^(
tests/unit/etl/SubscriptionSourceTests.cpp|
tests/unit/web/ServerTests.cpp|
tests/unit/web/impl/ErrorHandlingTests.cpp|
tests/unit/web/ng/ServerTests.cpp|
tests/unit/web/ng/impl/ErrorHandlingTests.cpp
)$
- id: fix-local-includes
name: Fix Local Includes
entry: pre-commit-hooks/fix-local-includes.sh
types: [c++]
language: script
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: f9a52e87b6cdcb01b0a62b8611d9ba9f2dad0067 # frozen: v19.1.7
rev: 6b9072cd80691b1b48d80046d884409fb1d962d1 # frozen: v20.1.7
hooks:
- id: clang-format
args: [--style=file]

View File

@@ -8,7 +8,7 @@ if (lint)
endif ()
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
else ()
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-19" "clang-tidy" REQUIRED)
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-20" "clang-tidy" REQUIRED)
endif ()
if (NOT _CLANG_TIDY_BIN)

View File

@@ -3,7 +3,7 @@
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1750263730.908",
"xrpl/2.5.0-rc1#e5897e048ea5712d2c71561c507d949d%1750263725.455",
"xrpl/2.5.0#7880d1696f11fceb1d498570f1a184c8%1751035267.743",
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1750263721.79",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1750263717.455",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1750263715.145",
@@ -20,6 +20,7 @@
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1750263668.751",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1750263666.833",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1750263646.614",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1746298708.362",
"fmt/10.1.1#021e170cf81db57da82b5f737b6906c1%1750263644.741",
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1750263643.099",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1750263632.157",
@@ -33,7 +34,7 @@
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1750263698.841",
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1750263690.822",
"cmake/3.31.6#ed0e6c1d49bd564ce6fed1a19653b86d%1750263636.055",
"cmake/3.31.7#57c3e118bcf267552c0ea3f8bee1e7d5%1749863707.208",
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1750263614.565"
],
"python_requires": [],

View File

@@ -26,11 +26,11 @@ class ClioConan(ConanFile):
requires = [
'boost/1.83.0',
'cassandra-cpp-driver/2.17.0',
'fmt/10.1.1',
'fmt/11.2.0',
'protobuf/3.21.12',
'grpc/1.50.1',
'openssl/1.1.1v',
'xrpl/2.5.0-rc1',
'xrpl/2.5.0',
'zlib/1.3.1',
'libbacktrace/cci.20210118'
]

View File

@@ -16,7 +16,7 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
USER root
WORKDIR /root
ARG LLVM_TOOLS_VERSION=19
ARG LLVM_TOOLS_VERSION=20
# Add repositories
RUN apt-get update \
@@ -45,6 +45,10 @@ RUN apt-get update \
zip \
&& pip3 install -q --upgrade --no-cache-dir pip \
&& pip3 install -q --no-cache-dir \
# TODO: Remove this once we switch to newer Ubuntu base image
# lxml 6.0.0 is not compatible with our image
'lxml<6.0.0' \
\
cmake==3.31.6 \
conan==2.17.0 \
gcovr \

View File

@@ -8,4 +8,4 @@ compiler.version=16
os=Linux
[conf]
tools.build:compiler_executables={'c': '/usr/bin/clang-16', 'cpp': '/usr/bin/clang++-16'}
tools.build:compiler_executables={"c": "/usr/bin/clang-16", "cpp": "/usr/bin/clang++-16"}

View File

@@ -8,4 +8,4 @@ compiler.version=12
os=Linux
[conf]
tools.build:compiler_executables={'c': '/usr/bin/gcc-12', 'cpp': '/usr/bin/g++-12'}
tools.build:compiler_executables={"c": "/usr/bin/gcc-12", "cpp": "/usr/bin/g++-12"}

View File

@@ -1,3 +1,2 @@
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
tools.info.package_id:confs = ["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]

View File

@@ -1,7 +1,8 @@
{% set compiler, sani = profile_name.split('.') %}
{% set sanitizer_opt_map = {'asan': 'address', 'tsan': 'thread', 'ubsan': 'undefined'} %}
{% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %}
{% set sanitizer = sanitizer_opt_map[sani] %}
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
@@ -10,11 +11,13 @@
include({{ compiler }})
[options]
boost/*:extra_b2_flags = "cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
boost/*:without_stacktrace = True
boost/*:extra_b2_flags="cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
boost/*:without_stacktrace=True
[conf]
tools.build:cflags += {{ sanitizer_build_flags }}
tools.build:cxxflags += {{ sanitizer_build_flags }}
tools.build:exelinkflags += {{ sanitizer_link_flags }}
tools.build:sharedlinkflags += {{ sanitizer_link_flags }}
tools.build:cflags+={{ sanitizer_build_flags }}
tools.build:cxxflags+={{ sanitizer_build_flags }}
tools.build:exelinkflags+={{ sanitizer_link_flags }}
tools.build:sharedlinkflags+={{ sanitizer_link_flags }}
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]

View File

@@ -6,7 +6,7 @@
## Minimum Requirements
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 1.55, <2.0](https://conan.io/downloads.html)
- [Conan 2.17.0](https://conan.io/downloads.html)
- [CMake 3.20, <4.0](https://cmake.org/download/)
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html): needed for code coverage generation
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
@@ -19,10 +19,21 @@
### Conan Configuration
Clio requires `compiler.cppstd=20` in your Conan profile (`~/.conan2/profiles/default`).
By default, Conan uses `~/.conan2` as it's home folder.
You can change it by using `$CONAN_HOME` env variable.
[More info about Conan home](https://docs.conan.io/2/reference/environment.html#conan-home).
> [!NOTE]
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` for the time being as some of Clio's dependencies are not yet capable of building under C++23.
> [!TIP]
> To setup Conan automatically, you can run `.github/scripts/conan/init.sh`.
> This will delete Conan home directory (if it exists), set up profiles and add Artifactory remote.
The instruction below assumes that `$CONAN_HOME` is not set.
#### Profiles
The default profile is the file in `~/.conan2/profiles/default`.
Here are some examples of possible profiles:
**Mac apple-clang 16 example**:
@@ -53,13 +64,19 @@ compiler.version=12
os=Linux
[conf]
tools.build:compiler_executables={'c': '/usr/bin/gcc-12', 'cpp': '/usr/bin/g++-12'}
tools.build:compiler_executables={"c": "/usr/bin/gcc-12", "cpp": "/usr/bin/g++-12"}
```
Add the following to the `~/.conan2/global.conf` file:
> [!NOTE]
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` in your profile for the time being as some of Clio's dependencies are not yet capable of building under C++23.
#### global.conf file
To increase the speed of downloading and uploading packages, add the following to the `~/.conan2/global.conf` file:
```text
tools.info.package_id:confs = ["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
```
#### Artifactory
@@ -70,16 +87,7 @@ Make sure artifactory is setup with Conan.
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
```
Now you should be able to download the prebuilt `xrpl` package on some platforms.
> [!NOTE]
> You may need to edit the `~/.conan2/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise, you could see compilation errors when building the project with gcc version 13 (or newer).
Remove old packages you may have cached interactively.
```sh
conan remove xrpl
```
Now you should be able to download the prebuilt dependencies (including `xrpl` package) on supported platforms.
#### Conan lockfile
@@ -102,6 +110,7 @@ Navigate to Clio's root directory and run:
```sh
mkdir build && cd build
# You can also specify profile explicitly by adding `--profile:all <PROFILE_NAME>`
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
# You can also add -GNinja to use Ninja build system instead of Make
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
@@ -213,18 +222,16 @@ Sometimes, during development, you need to build against a custom version of `li
## Using `clang-tidy` for static analysis
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
Clang-tidy can be run by CMake when building the project.
To achieve this, you just need to provide the option `-o '&:lint=True'` for the `conan install` command:
```sh
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True'
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True' --profile:all clang
```
By default CMake will try to find `clang-tidy` automatically in your system.
To force CMake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
```sh
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm/bin/clang-tidy
```

View File

@@ -5,7 +5,6 @@
Clio needs access to a `rippled` server in order to work. The following configurations are required for Clio and `rippled` to communicate:
1. In the Clio config file, provide the following:
- The IP of the `rippled` server
- The port on which `rippled` is accepting unencrypted WebSocket connections
@@ -13,7 +12,6 @@ Clio needs access to a `rippled` server in order to work. The following configur
- The port on which `rippled` is handling gRPC requests
2. In the `rippled` config file, you need to open:
- A port to accept unencrypted WebSocket connections
- A port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry

76
pre-commit-hooks/json_in_cpp.py Executable file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python3
import argparse
import re
from pathlib import Path
def fix_json_style(cpp_content: str) -> str:
cpp_content = cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
pattern = r'R"JSON\((.*?)\)JSON"'
def replace_json(match):
raw_json = match.group(1)
raw_json = (
raw_json.replace(" :", ":")
.replace(" ,", ",")
.replace(" null", "null")
.replace(':"', ': "')
.replace(',"', ', "')
.replace('":{', '": {')
.replace('":[', '": [')
.replace('":true', '": true')
.replace('":false', '": false')
.replace('":null', '": null')
)
for digit in range(10):
raw_json = raw_json.replace(f'":{digit}', f'": {digit}')
return f'R"JSON({raw_json})JSON"'
return re.sub(pattern, replace_json, cpp_content, flags=re.DOTALL)
def process_file(file_path: Path, dry_run: bool) -> bool:
content = file_path.read_text(encoding="utf-8")
new_content = fix_json_style(content)
if new_content != content:
print(f"Processing file: {file_path}")
if dry_run:
print("Dry run: changes won't be written to the file.")
else:
print("Writing changes to file.")
file_path.write_text(new_content, encoding="utf-8")
return new_content == content
def main():
parser = argparse.ArgumentParser(
description="Fix JSON style in C++ files",
)
parser.add_argument(
"--dry-run",
default=False,
action="store_true",
help="Don't modify files, just print what would be changed",
)
parser.add_argument(
"files",
nargs="*",
help="Specific files to process",
)
args = parser.parse_args()
success = True
for file in args.files:
success = success and process_file(Path(file), dry_run=args.dry_run)
if not success:
print("Errors occurred while processing files.")
exit(1)
if __name__ == "__main__":
main()

View File

@@ -60,17 +60,17 @@ CliArgs::parse(int argc, char const* argv[])
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
po::notify(parsed);
if (parsed.count("help") != 0u) {
if (parsed.contains("help")) {
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
return Action{Action::Exit{EXIT_SUCCESS}};
}
if (parsed.count("version") != 0u) {
if (parsed.contains("version")) {
std::cout << util::build::getClioFullVersionString() << '\n';
return Action{Action::Exit{EXIT_SUCCESS}};
}
if (parsed.count("config-description") != 0u) {
if (parsed.contains("config-description")) {
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
@@ -83,18 +83,17 @@ CliArgs::parse(int argc, char const* argv[])
auto configPath = parsed["conf"].as<std::string>();
if (parsed.count("migrate") != 0u) {
if (parsed.contains("migrate")) {
auto const opt = parsed["migrate"].as<std::string>();
if (opt == "status")
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()}};
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)}};
}
if (parsed.count("verify") != 0u)
if (parsed.contains("verify"))
return Action{Action::VerifyConfig{.configPath = std::move(configPath)}};
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.count("ng-web-server") != 0}
};
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.contains("ng-web-server")}};
}
} // namespace app

View File

@@ -78,17 +78,20 @@ WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName) : AmendmentK
} // namespace impl
AmendmentKey::operator std::string const&() const
AmendmentKey::
operator std::string const&() const
{
return name;
}
AmendmentKey::operator std::string_view() const
AmendmentKey::
operator std::string_view() const
{
return name;
}
AmendmentKey::operator ripple::uint256() const
AmendmentKey::
operator ripple::uint256() const
{
return Amendment::getAmendmentId(name);
}

View File

@@ -49,35 +49,45 @@ durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTim
using namespace util::prometheus;
BackendCounters::BackendCounters()
: tooBusyCounter_(PrometheusService::counterInt(
"backend_too_busy_total_number",
Labels(),
"The total number of times the backend was too busy to process a request"
))
, writeSyncCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync"}}),
"The total number of times the backend had to write synchronously"
))
, writeSyncRetryCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync_retry"}}),
"The total number of times the backend had to retry a synchronous write"
))
: tooBusyCounter_(
PrometheusService::counterInt(
"backend_too_busy_total_number",
Labels(),
"The total number of times the backend was too busy to process a request"
)
)
, writeSyncCounter_(
PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync"}}),
"The total number of times the backend had to write synchronously"
)
)
, writeSyncRetryCounter_(
PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync_retry"}}),
"The total number of times the backend had to retry a synchronous write"
)
)
, asyncWriteCounters_{"write_async"}
, asyncReadCounters_{"read_async"}
, readDurationHistogram_(PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "read"}}),
kHISTOGRAM_BUCKETS,
"The duration of backend read operations including retries"
))
, writeDurationHistogram_(PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "write"}}),
kHISTOGRAM_BUCKETS,
"The duration of backend write operations including retries"
))
, readDurationHistogram_(
PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "read"}}),
kHISTOGRAM_BUCKETS,
"The duration of backend read operations including retries"
)
)
, writeDurationHistogram_(
PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "write"}}),
kHISTOGRAM_BUCKETS,
"The duration of backend write operations including retries"
)
)
{
}
@@ -170,26 +180,34 @@ BackendCounters::report() const
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
: name_(std::move(name))
, pendingCounter_(PrometheusService::gaugeInt(
"backend_operations_current_number",
Labels({{"operation", name_}, {"status", "pending"}}),
"The current number of pending " + name_ + " operations"
))
, completedCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "completed"}}),
"The total number of completed " + name_ + " operations"
))
, retryCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "retry"}}),
"The total number of retried " + name_ + " operations"
))
, errorCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "error"}}),
"The total number of errored " + name_ + " operations"
))
, pendingCounter_(
PrometheusService::gaugeInt(
"backend_operations_current_number",
Labels({{"operation", name_}, {"status", "pending"}}),
"The current number of pending " + name_ + " operations"
)
)
, completedCounter_(
PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "completed"}}),
"The total number of completed " + name_ + " operations"
)
)
, retryCounter_(
PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "retry"}}),
"The total number of retried " + name_ + " operations"
)
)
, errorCounter_(
PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "error"}}),
"The total number of errored " + name_ + " operations"
)
)
{
}

View File

@@ -234,8 +234,12 @@ public:
* @return A vector of ripple::uint256 representing the account roots
*/
virtual std::vector<ripple::uint256>
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
const = 0;
fetchAccountRoots(
std::uint32_t number,
std::uint32_t pageSize,
std::uint32_t seq,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Updates the range of sequences that are stored in the DB.
@@ -459,8 +463,11 @@ public:
* @return The sequence in unit32_t on success; nullopt otherwise
*/
virtual std::optional<std::uint32_t>
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield)
const = 0;
doFetchLedgerObjectSeq(
ripple::uint256 const& key,
std::uint32_t sequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief The database-specific implementation for fetching ledger objects.

View File

@@ -40,7 +40,7 @@
#include <boost/uuid/string_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <cassandra.h>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>
@@ -361,8 +361,10 @@ public:
}
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const override
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto start = std::chrono::system_clock::now();
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
@@ -392,8 +394,11 @@ public:
}
std::optional<NFT>
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const override
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
if (not res)
@@ -554,10 +559,9 @@ public:
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTStatements),
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
@@ -566,10 +570,9 @@ public:
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTURIStatements),
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
@@ -626,8 +629,11 @@ public:
}
std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
const override
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
@@ -645,8 +651,11 @@ public:
}
std::optional<std::uint32_t>
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
const override
doFetchLedgerObjectSeq(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
@@ -680,8 +689,11 @@ public:
}
std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const override
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
if (auto const result = res->template get<ripple::uint256>(); result) {
@@ -714,10 +726,9 @@ public:
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
// TODO: seems like a job for "hash IN (list of hashes)" instead?
std::transform(
std::cbegin(hashes),
std::cend(hashes),
std::back_inserter(statements),
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
return schema_->selectTransaction.bind(hash);
}
);
auto const entries = executor_.readEach(yield, statements);
@@ -761,18 +772,14 @@ public:
// TODO: seems like a job for "key IN (list of keys)" instead?
std::transform(
std::cbegin(keys),
std::cend(keys),
std::back_inserter(statements),
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
return schema_->selectObject.bind(key, sequence);
}
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries),
std::cend(entries),
std::back_inserter(results),
[](auto const& res) -> Blob {
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
return *maybeValue;
@@ -785,8 +792,12 @@ public:
}
std::vector<ripple::uint256>
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
const override
fetchAccountRoots(
std::uint32_t number,
std::uint32_t pageSize,
std::uint32_t seq,
boost::asio::yield_context yield
) const override
{
std::vector<ripple::uint256> liveAccounts;
std::optional<ripple::AccountID> lastItem;

File diff suppressed because it is too large Load Diff

View File

@@ -25,7 +25,7 @@
#include "util/log/Logger.hpp"
#include <cassandra.h>
#include <fmt/core.h>
#include <fmt/format.h>
#include <stdexcept>
#include <string>
@@ -45,7 +45,8 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
cass_cluster_set_token_aware_routing(*this, cass_true);
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
throw std::runtime_error(
fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
);
}

View File

@@ -45,11 +45,13 @@ public:
* @brief Create a new retry policy instance with the io_context provided
*/
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc)
: retry_(util::makeRetryExponentialBackoff(
std::chrono::milliseconds(1),
std::chrono::seconds(1),
boost::asio::make_strand(ioc)
))
: retry_(
util::makeRetryExponentialBackoff(
std::chrono::milliseconds(1),
std::chrono::seconds(1),
boost::asio::make_strand(ioc)
)
)
{
}

View File

@@ -28,7 +28,7 @@
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <cassandra.h>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/STAccount.h>

View File

@@ -171,9 +171,11 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
auto pipe = DataPipeType{numExtractors, startSequence};
for (auto i = 0u; i < numExtractors; ++i) {
extractors.push_back(std::make_unique<ExtractorType>(
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
));
extractors.push_back(
std::make_unique<ExtractorType>(
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
)
);
}
auto transformer =

View File

@@ -44,7 +44,7 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <algorithm>
#include <chrono>
@@ -184,11 +184,14 @@ LoadBalancer::LoadBalancer(
LOG(log_.warn()) << "Failed to fetch ETL state from source = " << source->toString()
<< " Please check the configuration and network";
} else if (etlState_ && etlState_->networkID != stateOpt->networkID) {
checkOnETLFailure(fmt::format(
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
stateOpt->networkID,
etlState_->networkID
));
checkOnETLFailure(
fmt::format(
"ETL sources must be on the same network. Source network id = {} does not match others network id "
"= {}",
stateOpt->networkID,
etlState_->networkID
)
);
} else {
etlState_ = stateOpt;
}
@@ -278,9 +281,8 @@ LoadBalancer::forwardToRippled(
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
bool servedFromCache = true;
auto updater =
[this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield
) -> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
auto updater = [this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
servedFromCache = false;
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
if (result.has_value()) {
@@ -294,10 +296,9 @@ LoadBalancer::forwardToRippled(
};
auto result = forwardingCache_->getOrUpdate(
yield,
cmd,
std::move(updater),
[](util::ResponseExpirationCache::EntryData const& entry) { return not entry.response.contains("error"); }
yield, cmd, std::move(updater), [](util::ResponseExpirationCache::EntryData const& entry) {
return not entry.response.contains("error");
}
);
if (servedFromCache) {
++forwardingCounters_.cacheHit.get();

View File

@@ -172,8 +172,10 @@ public:
* @return A std::vector<std::string> The ledger data
*/
std::vector<std::string>
loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2})
override;
loadInitialLedger(
uint32_t sequence,
std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}
) override;
/**
* @brief Load the initial ledger, writing data to the queue.

View File

@@ -19,7 +19,7 @@
#include "data/DBHelpers.hpp"
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/AccountID.h>
@@ -138,7 +138,8 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
// There should always be a difference so the returned finalIDs
// iterator should never be end(). But better safe than sorry.
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner) {
throw std::runtime_error(fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
throw std::runtime_error(
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
);
}

View File

@@ -94,8 +94,8 @@ private:
double totalTime = 0.0;
auto currentSequence = startSequence_;
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)
) {
while (!shouldFinish(currentSequence) &&
networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)) {
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>([this, currentSequence]() {
return ledgerFetcher_.get().fetchDataAndDiff(currentSequence);
});

View File

@@ -28,7 +28,7 @@
#include <boost/json/object.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <chrono>
#include <exception>

View File

@@ -26,7 +26,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <grpcpp/client_context.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/support/channel_arguments.h>

View File

@@ -209,47 +209,49 @@ public:
size_t numWrites = 0;
backend_->cache().setFull();
auto seconds = ::util::timed<std::chrono::seconds>([this, keys = std::move(edgeKeys), sequence, &numWrites](
) mutable {
for (auto& key : keys) {
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
if (succ)
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
}
ripple::uint256 prev = data::kFIRST_KEY;
while (auto cur = backend_->cache().getSuccessor(prev, sequence)) {
ASSERT(cur.has_value(), "Successor for key {} must exist", ripple::strHex(prev));
if (prev == data::kFIRST_KEY)
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
if (isBookDir(cur->key, cur->blob)) {
auto base = getBookBase(cur->key);
// make sure the base is not an actual object
if (!backend_->cache().get(base, sequence)) {
auto succ = backend_->cache().getSuccessor(base, sequence);
ASSERT(succ.has_value(), "Book base {} must have a successor", ripple::strHex(base));
if (succ->key == cur->key) {
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base) << " - "
<< ripple::strHex(cur->key);
backend_->writeSuccessor(uint256ToString(base), sequence, uint256ToString(cur->key));
}
}
++numWrites;
auto seconds =
::util::timed<std::chrono::seconds>([this, keys = std::move(edgeKeys), sequence, &numWrites]() mutable {
for (auto& key : keys) {
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
if (succ)
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
}
prev = cur->key;
static constexpr std::size_t kLOG_STRIDE = 100000;
if (numWrites % kLOG_STRIDE == 0 && numWrites != 0)
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
}
ripple::uint256 prev = data::kFIRST_KEY;
while (auto cur = backend_->cache().getSuccessor(prev, sequence)) {
ASSERT(cur.has_value(), "Successor for key {} must exist", ripple::strHex(prev));
if (prev == data::kFIRST_KEY)
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(data::kLAST_KEY));
++numWrites;
});
if (isBookDir(cur->key, cur->blob)) {
auto base = getBookBase(cur->key);
// make sure the base is not an actual object
if (!backend_->cache().get(base, sequence)) {
auto succ = backend_->cache().getSuccessor(base, sequence);
ASSERT(succ.has_value(), "Book base {} must have a successor", ripple::strHex(base));
if (succ->key == cur->key) {
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base) << " - "
<< ripple::strHex(cur->key);
backend_->writeSuccessor(
uint256ToString(base), sequence, uint256ToString(cur->key)
);
}
}
++numWrites;
}
prev = cur->key;
static constexpr std::size_t kLOG_STRIDE = 100000;
if (numWrites % kLOG_STRIDE == 0 && numWrites != 0)
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
}
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(data::kLAST_KEY));
++numWrites;
});
LOG(log_.info()) << "Looping through cache and submitting all writes took " << seconds
<< " seconds. numWrites = " << std::to_string(numWrites);

View File

@@ -249,8 +249,9 @@ public:
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const override
{
return std::chrono::time_point<std::chrono::system_clock>{std::chrono::seconds{lastPublishSeconds_.get().value()
}};
return std::chrono::time_point<std::chrono::system_clock>{
std::chrono::seconds{lastPublishSeconds_.get().value()}
};
}
/**

View File

@@ -40,7 +40,7 @@
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/protocol/jss.h>
#include <algorithm>
@@ -79,11 +79,13 @@ SubscriptionSource::SubscriptionSource(
, onConnect_(std::move(onConnect))
, onDisconnect_(std::move(onDisconnect))
, onLedgerClosed_(std::move(onLedgerClosed))
, lastMessageTimeSecondsSinceEpoch_(PrometheusService::gaugeInt(
"subscription_source_last_message_time",
util::prometheus::Labels({{"source", fmt::format("{}:{}", ip, wsPort)}}),
"Seconds since epoch of the last message received from rippled subscription streams"
))
, lastMessageTimeSecondsSinceEpoch_(
PrometheusService::gaugeInt(
"subscription_source_last_message_time",
util::prometheus::Labels({{"source", fmt::format("{}:{}", ip, wsPort)}}),
"Seconds since epoch of the last message received from rippled subscription streams"
)
)
{
wsConnectionBuilder_.addHeader({boost::beast::http::field::user_agent, "clio-client"})
.addHeader({"X-User", "clio-client"})
@@ -329,9 +331,13 @@ SubscriptionSource::setValidatedRange(std::string range)
pairs.emplace_back(sequence, sequence);
} else {
if (minAndMax.size() != 2) {
throw std::runtime_error(fmt::format(
"Error parsing range: {}.Min and max should be of size 2. Got size = {}", range, minAndMax.size()
));
throw std::runtime_error(
fmt::format(
"Error parsing range: {}.Min and max should be of size 2. Got size = {}",
range,
minAndMax.size()
)
);
}
uint32_t const min = std::stoll(minAndMax[0]);
uint32_t const max = std::stoll(minAndMax[1]);

View File

@@ -34,7 +34,7 @@
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/http/field.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <atomic>
#include <chrono>

View File

@@ -56,7 +56,6 @@
#include <boost/json/object.hpp>
#include <boost/signals2/connection.hpp>
#include <fmt/core.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <chrono>

View File

@@ -58,7 +58,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/json/object.hpp>
#include <boost/signals2/connection.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>

View File

@@ -45,7 +45,7 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <algorithm>
#include <chrono>
@@ -184,11 +184,14 @@ LoadBalancer::LoadBalancer(
LOG(log_.warn()) << "Failed to fetch ETL state from source = " << source->toString()
<< " Please check the configuration and network";
} else if (etlState_ && etlState_->networkID != stateOpt->networkID) {
checkOnETLFailure(fmt::format(
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
stateOpt->networkID,
etlState_->networkID
));
checkOnETLFailure(
fmt::format(
"ETL sources must be on the same network. Source network id = {} does not match others network id "
"= {}",
stateOpt->networkID,
etlState_->networkID
)
);
} else {
etlState_ = stateOpt;
}
@@ -284,9 +287,8 @@ LoadBalancer::forwardToRippled(
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
bool servedFromCache = true;
auto updater =
[this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield
) -> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
auto updater = [this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
servedFromCache = false;
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
if (result.has_value()) {
@@ -300,10 +302,9 @@ LoadBalancer::forwardToRippled(
};
auto result = forwardingCache_->getOrUpdate(
yield,
cmd,
std::move(updater),
[](util::ResponseExpirationCache::EntryData const& entry) { return not entry.response.contains("error"); }
yield, cmd, std::move(updater), [](util::ResponseExpirationCache::EntryData const& entry) {
return not entry.response.contains("error");
}
);
if (servedFromCache) {
++forwardingCounters_.cacheHit.get();

View File

@@ -22,7 +22,7 @@
#include "util/Concepts.hpp"
#include <boost/json/object.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/proto/org/xrpl/rpc/v1/get_ledger.pb.h>

View File

@@ -28,7 +28,7 @@
#include <boost/json/object.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <chrono>
#include <exception>

View File

@@ -27,7 +27,7 @@
#include "web/Resolver.hpp"
#include <boost/asio/spawn.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <grpcpp/client_context.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/support/channel_arguments.h>

View File

@@ -34,7 +34,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/strand.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/protocol/Fees.h>
#include <xrpl/protocol/LedgerHeader.h>
@@ -227,8 +227,9 @@ public:
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const override
{
return std::chrono::time_point<std::chrono::system_clock>{std::chrono::seconds{lastPublishSeconds_.get().value()
}};
return std::chrono::time_point<std::chrono::system_clock>{
std::chrono::seconds{lastPublishSeconds_.get().value()}
};
}
/**

View File

@@ -159,8 +159,10 @@ public:
* @param transactions The transactions in the current ledger.
*/
void
pubBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions)
final;
pubBookChanges(
ripple::LedgerHeader const& lgrInfo,
std::vector<data::TransactionAndMetadata> const& transactions
) final;
/**
* @brief Subscribe to the proposed transactions feed.

View File

@@ -57,9 +57,7 @@ ProposedTransactionFeed::sub(ripple::AccountID const& account, SubscriberSharedP
{
auto const weakPtr = std::weak_ptr(subscriber);
auto const added = accountSignal_.connectTrackableSlot(
subscriber,
account,
[this, weakPtr](std::shared_ptr<std::string> const& msg) {
subscriber, account, [this, weakPtr](std::shared_ptr<std::string> const& msg) {
if (auto connectionPtr = weakPtr.lock()) {
// Check if this connection already sent
if (notified_.contains(connectionPtr.get()))

View File

@@ -31,7 +31,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <boost/json/object.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Book.h>
#include <xrpl/protocol/LedgerHeader.h>

View File

@@ -280,7 +280,7 @@ TransactionFeed::pub(
data->getFieldAmount(ripple::sfTakerPays).issue(),
(*data)[~ripple::sfDomainID]
};
if (affectedBooks.find(book) == affectedBooks.end()) {
if (!affectedBooks.contains(book)) {
affectedBooks.insert(book);
}
}

View File

@@ -33,7 +33,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Book.h>
#include <xrpl/protocol/LedgerHeader.h>

View File

@@ -23,7 +23,7 @@
#include "util/prometheus/Label.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <fmt/core.h>
#include <fmt/format.h>
#include <string>

View File

@@ -60,7 +60,6 @@ Most indexes are based on either ledger states or transactions. We provide the `
If you need to do full scan against other table, you can follow below steps:
- Describe the table which needs full scan in a struct. It has to satisfy the `TableSpec`(cassandra/Spec.hpp) concept, containing static member:
- Tuple type `Row`, it's the type of each field in a row. The order of types should match what database will return in a row. Key types should come first, followed by other field types sorted in alphabetical order.
- `kPARTITION_KEY`, it's the name of the partition key of the table.
- `kTABLE_NAME`

View File

@@ -24,7 +24,7 @@
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include <fmt/core.h>
#include <fmt/format.h>
#include <functional>
#include <string>
@@ -63,16 +63,18 @@ public:
std::string const& key
)
{
return handler.prepare(fmt::format(
R"(
return handler.prepare(
fmt::format(
R"(
SELECT *
FROM {}
WHERE TOKEN({}) >= ? AND TOKEN({}) <= ?
)",
data::cassandra::qualifiedTableName<SettingsProviderType>(settingsProvider_.get(), tableName),
key,
key
));
data::cassandra::qualifiedTableName<SettingsProviderType>(settingsProvider_.get(), tableName),
key,
key
)
);
}
/**
@@ -84,14 +86,16 @@ public:
data::cassandra::PreparedStatement const&
getPreparedInsertMigratedMigrator(data::cassandra::Handle const& handler)
{
static auto kPREPARED = handler.prepare(fmt::format(
R"(
static auto kPREPARED = handler.prepare(
fmt::format(
R"(
INSERT INTO {}
(migrator_name, status)
VALUES (?, ?)
)",
data::cassandra::qualifiedTableName<SettingsProviderType>(settingsProvider_.get(), "migrator_status")
));
data::cassandra::qualifiedTableName<SettingsProviderType>(settingsProvider_.get(), "migrator_status")
)
);
return kPREPARED;
}
};

View File

@@ -51,6 +51,7 @@ target_sources(
handlers/Subscribe.cpp
handlers/TransactionEntry.cpp
handlers/Unsubscribe.cpp
handlers/VaultInfo.cpp
)
target_link_libraries(clio_rpc PRIVATE clio_util)

View File

@@ -25,7 +25,7 @@
#include "util/prometheus/Prometheus.hpp"
#include <boost/json/object.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/protocol/jss.h>
#include <chrono>
@@ -40,41 +40,55 @@ using util::prometheus::Label;
using util::prometheus::Labels;
Counters::MethodInfo::MethodInfo(std::string const& method)
: started(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "started"}, {"method", method}}},
fmt::format("Total number of started calls to the method {}", method)
))
, finished(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "finished"}, {"method", method}}},
fmt::format("Total number of finished calls to the method {}", method)
))
, failed(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "failed"}, {"method", method}}},
fmt::format("Total number of failed calls to the method {}", method)
))
, errored(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "errored"}, {"method", method}}},
fmt::format("Total number of errored calls to the method {}", method)
))
, forwarded(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "forwarded"}, {"method", method}}},
fmt::format("Total number of forwarded calls to the method {}", method)
))
, failedForward(PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "failed_forward"}, {"method", method}}},
fmt::format("Total number of failed forwarded calls to the method {}", method)
))
, duration(PrometheusService::counterInt(
"rpc_method_duration_us",
Labels({util::prometheus::Label{"method", method}}),
fmt::format("Total duration of calls to the method {}", method)
))
: started(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "started"}, {"method", method}}},
fmt::format("Total number of started calls to the method {}", method)
)
)
, finished(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "finished"}, {"method", method}}},
fmt::format("Total number of finished calls to the method {}", method)
)
)
, failed(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "failed"}, {"method", method}}},
fmt::format("Total number of failed calls to the method {}", method)
)
)
, errored(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "errored"}, {"method", method}}},
fmt::format("Total number of errored calls to the method {}", method)
)
)
, forwarded(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "forwarded"}, {"method", method}}},
fmt::format("Total number of forwarded calls to the method {}", method)
)
)
, failedForward(
PrometheusService::counterInt(
"rpc_method_total_number",
Labels{{{"status", "failed_forward"}, {"method", method}}},
fmt::format("Total number of failed forwarded calls to the method {}", method)
)
)
, duration(
PrometheusService::counterInt(
"rpc_method_duration_us",
Labels({util::prometheus::Label{"method", method}}),
fmt::format("Total duration of calls to the method {}", method)
)
)
{
}
@@ -89,31 +103,41 @@ Counters::getMethodInfo(std::string const& method)
}
Counters::Counters(WorkQueue const& wq)
: tooBusyCounter_(PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "too_busy"}}),
"Total number of too busy errors"
))
, notReadyCounter_(PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "not_ready"}}),
"Total number of not ready replyes"
))
, badSyntaxCounter_(PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "bad_syntax"}}),
"Total number of bad syntax replyes"
))
, unknownCommandCounter_(PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "unknown_command"}}),
"Total number of unknown command replyes"
))
, internalErrorCounter_(PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "internal_error"}}),
"Total number of internal errors"
))
: tooBusyCounter_(
PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "too_busy"}}),
"Total number of too busy errors"
)
)
, notReadyCounter_(
PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "not_ready"}}),
"Total number of not ready replyes"
)
)
, badSyntaxCounter_(
PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "bad_syntax"}}),
"Total number of bad syntax replyes"
)
)
, unknownCommandCounter_(
PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "unknown_command"}}),
"Total number of unknown command replyes"
)
)
, internalErrorCounter_(
PrometheusService::counterInt(
"rpc_error_total_number",
Labels({Label{"error_type", "internal_error"}}),
"Total number of internal errors"
)
)
, workQueue_(std::cref(wq))
, startupTime_{std::chrono::system_clock::now()}
{

View File

@@ -89,7 +89,7 @@ getErrorInfo(ClioError code)
{.code = ClioError::RpcMalformedAuthorizedCredentials,
.error = "malformedAuthorizedCredentials",
.message = "Malformed authorized credentials."},
{.code = ClioError::RpcEntryNotFound, .error = "entryNotFound", .message = "Entry Not Found."},
// special system errors
{.code = ClioError::RpcInvalidApiVersion, .error = JS(invalid_API_version), .message = "Invalid API version."},
{.code = ClioError::RpcCommandIsMissing,

View File

@@ -43,6 +43,7 @@ enum class ClioError {
RpcFieldNotFoundTransaction = 5006,
RpcMalformedOracleDocumentId = 5007,
RpcMalformedAuthorizedCredentials = 5008,
RpcEntryNotFound = 5009,
// special system errors start with 6000
RpcInvalidApiVersion = 6000,

View File

@@ -17,7 +17,6 @@ See [tests/unit/rpc](https://github.com/XRPLF/clio/tree/develop/tests/unit/rpc)
Handlers need to fulfil the requirements specified by the `SomeHandler` concept (see `rpc/common/Concepts.hpp`):
- Expose types:
- `Input` - The POD struct which acts as input for the handler
- `Output` - The POD struct which acts as output of a valid handler invocation

View File

@@ -30,6 +30,7 @@ std::unordered_set<std::string_view> const&
handledRpcs()
{
static std::unordered_set<std::string_view> const kHANDLED_RPCS = {
// clang-format off
"account_channels",
"account_currencies",
"account_info",
@@ -64,7 +65,9 @@ handledRpcs()
"tx",
"subscribe",
"unsubscribe",
"vault_info",
"version",
// clang-format on
};
return kHANDLED_RPCS;
}

View File

@@ -37,7 +37,6 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/json.hpp>
#include <boost/json/object.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/protocol/ErrorCodes.h>
@@ -159,9 +158,8 @@ public:
}
if (not ctx.isAdmin and responseCache_ and responseCache_->shouldCache(ctx.method)) {
auto updater =
[this, &ctx](boost::asio::yield_context
) -> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
auto updater = [this, &ctx](boost::asio::yield_context)
-> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
auto result = buildResponseImpl(ctx);
auto const extracted =

View File

@@ -45,7 +45,7 @@
#include <boost/json/value_to.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/lexical_cast/bad_lexical_cast.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/basics/base_uint.h>
@@ -1283,9 +1283,10 @@ postProcessOrderBook(
} else {
saTakerGetsFunded = saOwnerFundsLimit;
offerJson["taker_gets_funded"] = toBoostJson(saTakerGetsFunded.getJson(ripple::JsonOptions::none));
offerJson["taker_pays_funded"] =
toBoostJson(std::min(saTakerPays, ripple::multiply(saTakerGetsFunded, dirRate, saTakerPays.issue()))
.getJson(ripple::JsonOptions::none));
offerJson["taker_pays_funded"] = toBoostJson(
std::min(saTakerPays, ripple::multiply(saTakerGetsFunded, dirRate, saTakerPays.issue()))
.getJson(ripple::JsonOptions::none)
);
}
ripple::STAmount const saOwnerPays = (ripple::parityRate == offerRate)

View File

@@ -28,6 +28,7 @@
#include "data/BackendInterface.hpp"
#include "data/Types.hpp"
#include "rpc/Errors.hpp"
#include "rpc/JS.hpp"
#include "rpc/common/Types.hpp"
#include "util/JsonUtils.hpp"
#include "util/Taggable.hpp"
@@ -41,7 +42,8 @@
#include <boost/regex.hpp>
#include <boost/regex/v5/regex_fwd.hpp>
#include <boost/regex/v5/regex_match.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Number.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/json/json_value.h>
#include <xrpl/protocol/AccountID.h>
@@ -50,9 +52,12 @@
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/Issue.h>
#include <xrpl/protocol/Keylet.h>
#include <xrpl/protocol/LedgerFormats.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/MPTIssue.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/Rate.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/STAmount.h>
#include <xrpl/protocol/STBase.h>
#include <xrpl/protocol/STLedgerEntry.h>
@@ -60,9 +65,11 @@
#include <xrpl/protocol/STTx.h>
#include <xrpl/protocol/SecretKey.h>
#include <xrpl/protocol/Seed.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/protocol/TxMeta.h>
#include <xrpl/protocol/UintTypes.h>
#include <xrpl/protocol/XRPAmount.h>
#include <xrpl/protocol/jss.h>
#include <chrono>
#include <cstddef>

View File

@@ -46,7 +46,8 @@ WorkQueue::OneTimeCallable::operator()()
called_ = true;
}
}
WorkQueue::OneTimeCallable::operator bool() const
WorkQueue::OneTimeCallable::
operator bool() const
{
return func_.operator bool();
}

View File

@@ -24,7 +24,7 @@
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <optional>
#include <string>

View File

@@ -25,7 +25,7 @@
#include "rpc/common/Types.hpp"
#include <boost/json/value.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <cstddef>
#include <functional>
@@ -107,8 +107,8 @@ public:
template <SomeRequirement... Requirements>
explicit IfType(Requirements&&... requirements)
: processor_(
[... r = std::forward<Requirements>(requirements
)](boost::json::value& j, std::string_view key) -> MaybeError {
[... r = std::forward<Requirements>(requirements)](boost::json::value& j, std::string_view key)
-> MaybeError {
std::optional<Status> firstFailure = std::nullopt;
// the check logic is the same as fieldspec

View File

@@ -28,7 +28,7 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/AccountID.h>

View File

@@ -26,7 +26,7 @@
#include <boost/json/array.hpp>
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/ErrorCodes.h>
@@ -457,7 +457,10 @@ public:
checkIsU32Numeric(std::string_view sv);
template <class HexType>
requires(std::is_same_v<HexType, ripple::uint160> || std::is_same_v<HexType, ripple::uint192> || std::is_same_v<HexType, ripple::uint256>)
requires(
std::is_same_v<HexType, ripple::uint160> || std::is_same_v<HexType, ripple::uint192> ||
std::is_same_v<HexType, ripple::uint256>
)
MaybeError
makeHexStringValidator(boost::json::value const& value, std::string_view key)
{

View File

@@ -23,7 +23,7 @@
#include "util/log/Logger.hpp"
#include <boost/json/object.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <cstdint>
#include <expected>

View File

@@ -60,6 +60,7 @@
#include "rpc/handlers/TransactionEntry.hpp"
#include "rpc/handlers/Tx.hpp"
#include "rpc/handlers/Unsubscribe.hpp"
#include "rpc/handlers/VaultInfo.hpp"
#include "rpc/handlers/VersionHandler.hpp"
#include "util/config/ConfigDefinition.hpp"
@@ -114,6 +115,7 @@ ProductionHandlerProvider::ProductionHandlerProvider(
{"tx", {.handler = TxHandler{backend, etl}}},
{"subscribe", {.handler = SubscribeHandler{backend, amendmentCenter, subscriptionManager}}},
{"unsubscribe", {.handler = UnsubscribeHandler{subscriptionManager}}},
{"vault_info", {.handler = VaultInfoHandler{backend}}},
{"version", {.handler = VersionHandler{config}}},
}
{

View File

@@ -79,7 +79,32 @@ GatewayBalancesHandler::process(GatewayBalancesHandler::Input input, Context con
auto output = GatewayBalancesHandler::Output{};
auto addEscrow = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltESCROW) {
auto const& escrow = sle.getFieldAmount(ripple::sfAmount);
auto& lockedBalance = output.locked[escrow.getCurrency()];
if (lockedBalance == beast::zero) {
// This is needed to set the currency code correctly
lockedBalance = escrow;
} else {
try {
lockedBalance += escrow;
} catch (std::runtime_error const&) {
// Presumably the exception was caused by overflow.
// On overflow return the largest valid STAmount.
// Very large sums of STAmount are approximations
// anyway.
lockedBalance = ripple::STAmount(
lockedBalance.issue(), ripple::STAmount::cMaxValue, ripple::STAmount::cMaxOffset
);
}
}
}
};
auto const addToResponse = [&](ripple::SLE const sle) {
addEscrow(sle);
if (sle.getType() == ripple::ltRIPPLE_STATE) {
ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance);
auto const lowLimit = sle.getFieldAmount(ripple::sfLowLimit);
@@ -194,6 +219,14 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, GatewayBalancesH
if (auto balances = toJson(output.assets); !balances.empty())
obj[JS(assets)] = balances;
if (!output.locked.empty()) {
boost::json::object lockedObj;
for (auto const& [currency, amount] : output.locked) {
lockedObj[ripple::to_string(currency)] = amount.getText();
}
obj[JS(locked)] = std::move(lockedObj);
}
obj[JS(account)] = output.accountID;
obj[JS(ledger_index)] = output.ledgerIndex;
obj[JS(ledger_hash)] = output.ledgerHash;

View File

@@ -73,6 +73,7 @@ public:
std::map<ripple::AccountID, std::vector<ripple::STAmount>> hotBalances;
std::map<ripple::AccountID, std::vector<ripple::STAmount>> assets;
std::map<ripple::AccountID, std::vector<ripple::STAmount>> frozenBalances;
std::map<ripple::Currency, ripple::STAmount> locked;
// validated should be sent via framework
bool validated = true;
};

View File

@@ -108,9 +108,11 @@ GetAggregatePriceHandler::process(GetAggregatePriceHandler::Input input, Context
auto const scale =
iter->isFieldPresent(ripple::sfScale) ? -static_cast<int>(iter->getFieldU8(ripple::sfScale)) : 0;
timestampPricesBiMap.insert(TimestampPricesBiMap::value_type(
node.getFieldU32(ripple::sfLastUpdateTime), ripple::STAmount{ripple::noIssue(), price, scale}
));
timestampPricesBiMap.insert(
TimestampPricesBiMap::value_type(
node.getFieldU32(ripple::sfLastUpdateTime), ripple::STAmount{ripple::noIssue(), price, scale}
)
);
return true;
}
return false;
@@ -263,12 +265,14 @@ tag_invoke(boost::json::value_to_tag<GetAggregatePriceHandler::Input>, boost::js
}
for (auto const& oracle : jsonObject.at(JS(oracles)).as_array()) {
input.oracles.push_back(GetAggregatePriceHandler::Oracle{
.documentId = boost::json::value_to<std::uint64_t>(oracle.as_object().at(JS(oracle_document_id))),
.account = *util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(oracle.as_object().at(JS(account)))
)
});
input.oracles.push_back(
GetAggregatePriceHandler::Oracle{
.documentId = boost::json::value_to<std::uint64_t>(oracle.as_object().at(JS(oracle_document_id))),
.account = *util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(oracle.as_object().at(JS(account)))
)
}
);
}
input.baseAsset = boost::json::value_to<std::string>(jv.at(JS(base_asset)));
input.quoteAsset = boost::json::value_to<std::string>(jv.at(JS(quote_asset)));

View File

@@ -75,9 +75,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
key = expectedkey.value();
} else if (input.offer) {
auto const id =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(input.offer->at(JS(account)))
);
auto const id = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(input.offer->at(JS(account)))
);
key = ripple::keylet::offer(*id, boost::json::value_to<std::uint32_t>(input.offer->at(JS(seq)))).key;
} else if (input.rippleStateAccount) {
auto const id1 = util::parseBase58Wrapper<ripple::AccountID>(
@@ -91,9 +91,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
key = ripple::keylet::line(*id1, *id2, currency).key;
} else if (input.escrow) {
auto const id =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(input.escrow->at(JS(owner)))
);
auto const id = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(input.escrow->at(JS(owner)))
);
key = ripple::keylet::escrow(*id, input.escrow->at(JS(seq)).as_int64()).key;
} else if (input.depositPreauth) {
auto const owner = util::parseBase58Wrapper<ripple::AccountID>(
@@ -124,9 +124,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
key = ripple::keylet::depositPreauth(owner.value(), authCreds).key;
}
} else if (input.ticket) {
auto const id =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(input.ticket->at(JS(account))
));
auto const id = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(input.ticket->at(JS(account)))
);
key = ripple::getTicketIndex(*id, input.ticket->at(JS(ticket_seq)).as_int64());
} else if (input.amm) {
@@ -136,9 +136,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
if (ripple::isXRP(currency)) {
return ripple::xrpIssue();
}
auto const issuer =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(assetJson.at(JS(issuer)))
);
auto const issuer = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(assetJson.at(JS(issuer)))
);
return ripple::Issue{currency, *issuer};
};
@@ -174,9 +174,9 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
} else if (input.mptoken) {
auto const holder =
ripple::parseBase58<ripple::AccountID>(boost::json::value_to<std::string>(input.mptoken->at(JS(account))));
auto const mptIssuanceID =
ripple::uint192{std::string_view(boost::json::value_to<std::string>(input.mptoken->at(JS(mpt_issuance_id))))
};
auto const mptIssuanceID = ripple::uint192{
std::string_view(boost::json::value_to<std::string>(input.mptoken->at(JS(mpt_issuance_id))))
};
key = ripple::keylet::mptoken(mptIssuanceID, *holder).key;
} else if (input.permissionedDomain) {
auto const account = ripple::parseBase58<ripple::AccountID>(
@@ -184,12 +184,17 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
);
auto const seq = input.permissionedDomain->at(JS(seq)).as_int64();
key = ripple::keylet::permissionedDomain(*account, seq).key;
} else if (input.vault) {
auto const account =
ripple::parseBase58<ripple::AccountID>(boost::json::value_to<std::string>(input.vault->at(JS(owner))));
auto const seq = input.vault->at(JS(seq)).as_int64();
key = ripple::keylet::vault(*account, seq).key;
} else if (input.delegate) {
auto const account =
ripple::parseBase58<ripple::AccountID>(boost::json::value_to<std::string>(input.delegate->at(JS(account))));
auto const authorize =
ripple::parseBase58<ripple::AccountID>(boost::json::value_to<std::string>(input.delegate->at(JS(authorize)))
);
auto const authorize = ripple::parseBase58<ripple::AccountID>(
boost::json::value_to<std::string>(input.delegate->at(JS(authorize)))
);
key = ripple::keylet::delegate(*account, *authorize).key;
} else {
// Must specify 1 of the following fields to indicate what type
@@ -214,13 +219,13 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx)
if (!ledgerObject || ledgerObject->empty()) {
if (not input.includeDeleted)
return Error{Status{"entryNotFound"}};
return Error{Status{ClioError::RpcEntryNotFound}};
auto const deletedSeq = sharedPtrBackend_->fetchLedgerObjectSeq(key, lgrInfo.seq, ctx.yield);
if (!deletedSeq)
return Error{Status{"entryNotFound"}};
return Error{Status{ClioError::RpcEntryNotFound}};
ledgerObject = sharedPtrBackend_->fetchLedgerObject(key, deletedSeq.value() - 1, ctx.yield);
if (!ledgerObject || ledgerObject->empty())
return Error{Status{"entryNotFound"}};
return Error{Status{ClioError::RpcEntryNotFound}};
output.deletedLedgerIndex = deletedSeq;
}
@@ -326,6 +331,7 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
{JS(credential), ripple::ltCREDENTIAL},
{JS(mptoken), ripple::ltMPTOKEN},
{JS(permissioned_domain), ripple::ltPERMISSIONED_DOMAIN},
{JS(vault), ripple::ltVAULT},
{JS(delegate), ripple::ltDELEGATE}
};
@@ -415,6 +421,8 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
input.mptoken = jv.at(JS(mptoken)).as_object();
} else if (jsonObject.contains(JS(permissioned_domain))) {
input.permissionedDomain = jv.at(JS(permissioned_domain)).as_object();
} else if (jsonObject.contains(JS(vault))) {
input.vault = jv.at(JS(vault)).as_object();
} else if (jsonObject.contains(JS(delegate))) {
input.delegate = jv.at(JS(delegate)).as_object();
}

View File

@@ -104,6 +104,7 @@ public:
std::optional<boost::json::object> amm;
std::optional<boost::json::object> mptoken;
std::optional<boost::json::object> permissionedDomain;
std::optional<boost::json::object> vault;
std::optional<ripple::STXChainBridge> bridge;
std::optional<std::string> bridgeAccount;
std::optional<uint32_t> chainClaimId;
@@ -145,12 +146,12 @@ public:
return Error{Status{RippledError::rpcINVALID_PARAMS, "malformedAccounts"}};
}
auto const id1 =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(value.as_array()[0])
);
auto const id2 =
util::parseBase58Wrapper<ripple::AccountID>(boost::json::value_to<std::string>(value.as_array()[1])
);
auto const id1 = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(value.as_array()[0])
);
auto const id2 = util::parseBase58Wrapper<ripple::AccountID>(
boost::json::value_to<std::string>(value.as_array()[1])
);
if (!id1 || !id2)
return Error{Status{ClioError::RpcMalformedAddress, "malformedAddresses"}};
@@ -393,6 +394,23 @@ public:
},
},
}}},
{JS(vault),
meta::WithCustomError{
validation::Type<std::string, boost::json::object>{}, Status(ClioError::RpcMalformedRequest)
},
meta::IfType<std::string>{kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
meta::IfType<boost::json::object>{meta::Section{
{JS(seq),
meta::WithCustomError{validation::Required{}, Status(ClioError::RpcMalformedRequest)},
meta::WithCustomError{validation::Type<uint32_t>{}, Status(ClioError::RpcMalformedRequest)}},
{
JS(owner),
meta::WithCustomError{validation::Required{}, Status(ClioError::RpcMalformedRequest)},
meta::WithCustomError{
validation::CustomValidators::accountBase58Validator, Status(ClioError::RpcMalformedOwner)
},
},
}}},
{JS(delegate),
meta::WithCustomError{
validation::Type<std::string, boost::json::object>{}, Status(ClioError::RpcMalformedRequest)

View File

@@ -138,14 +138,7 @@ NFTOffersHandlerBase::iterateOfferDirectory(
}
auto result = traverseOwnedNodes(
*sharedPtrBackend_,
directory,
cursor,
startHint,
lgrInfo.seq,
reserve,
yield,
[&offers](ripple::SLE&& offer) {
*sharedPtrBackend_, directory, cursor, startHint, lgrInfo.seq, reserve, yield, [&offers](ripple::SLE&& offer) {
if (offer.getType() == ripple::ltNFTOKEN_OFFER) {
offers.push_back(std::move(offer));
return true;

View File

@@ -31,7 +31,7 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Indexes.h>

View File

@@ -34,7 +34,7 @@
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/BuildInfo.h>

View File

@@ -140,8 +140,10 @@ private:
subscribeToAccounts(std::vector<std::string> const& accounts, feed::SubscriberSharedPtr const& session) const;
void
subscribeToAccountsProposed(std::vector<std::string> const& accounts, feed::SubscriberSharedPtr const& session)
const;
subscribeToAccountsProposed(
std::vector<std::string> const& accounts,
feed::SubscriberSharedPtr const& session
) const;
void
subscribeToBooks(

View File

@@ -31,7 +31,6 @@
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/jss.h>
#include <string>

View File

@@ -37,7 +37,7 @@
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_to.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/basics/strHex.h>

View File

@@ -129,8 +129,10 @@ UnsubscribeHandler::unsubscribeFromStreams(
}
void
UnsubscribeHandler::unsubscribeFromAccounts(std::vector<std::string> accounts, feed::SubscriberSharedPtr const& session)
const
UnsubscribeHandler::unsubscribeFromAccounts(
std::vector<std::string> accounts,
feed::SubscriberSharedPtr const& session
) const
{
for (auto const& account : accounts) {
auto const accountID = accountFromStringStrict(account);
@@ -150,8 +152,10 @@ UnsubscribeHandler::unsubscribeFromProposedAccounts(
}
}
void
UnsubscribeHandler::unsubscribeFromBooks(std::vector<OrderBook> const& books, feed::SubscriberSharedPtr const& session)
const
UnsubscribeHandler::unsubscribeFromBooks(
std::vector<OrderBook> const& books,
feed::SubscriberSharedPtr const& session
) const
{
for (auto const& orderBook : books) {
subscriptions_->unsubBook(orderBook.book, session);

View File

@@ -106,8 +106,10 @@ private:
unsubscribeFromAccounts(std::vector<std::string> accounts, feed::SubscriberSharedPtr const& session) const;
void
unsubscribeFromProposedAccounts(std::vector<std::string> accountsProposed, feed::SubscriberSharedPtr const& session)
const;
unsubscribeFromProposedAccounts(
std::vector<std::string> accountsProposed,
feed::SubscriberSharedPtr const& session
) const;
void
unsubscribeFromBooks(std::vector<OrderBook> const& books, feed::SubscriberSharedPtr const& session) const;

View File

@@ -0,0 +1,190 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "rpc/handlers/VaultInfo.hpp"
#include "data/BackendInterface.hpp"
#include "rpc/Errors.hpp"
#include "rpc/JS.hpp"
#include "rpc/RPCHelpers.hpp"
#include "rpc/common/Types.hpp"
#include "util/Assert.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <xrpl/basics/base_uint.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/Keylet.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/STBase.h>
#include <xrpl/protocol/STLedgerEntry.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
namespace rpc {
namespace {
/**
* @brief Ensures that the input contains either a `vaultID` alone, or both `owner` and `tnxSequence`.
* Any other combination is considered malformed.
*
* @param input The input object containing optional fields for the vault request.
* @return Returns true if the input is valid, false otherwise.
*/
bool
validate(VaultInfoHandler::Input const& input)
{
bool const hasVaultId = input.vaultID.has_value();
bool const hasOwner = input.owner.has_value();
bool const hasSeq = input.tnxSequence.has_value();
// Only valid combinations: (vaultID) or (owner + ledgerIndex)
// NOLINTNEXTLINE(readability-simplify-boolean-expr)
return (hasVaultId && !hasOwner && !hasSeq) || (!hasVaultId && hasOwner && hasSeq);
}
} // namespace
VaultInfoHandler::VaultInfoHandler(std::shared_ptr<BackendInterface> const& sharedPtrBackend)
: sharedPtrBackend_{sharedPtrBackend}
{
}
VaultInfoHandler::Result
VaultInfoHandler::process(VaultInfoHandler::Input input, Context const& ctx) const
{
// vault info input must either have owner and sequence, or vault_id only.
if (not validate(input))
return Error{ClioError::RpcMalformedRequest};
auto const range = sharedPtrBackend_->fetchLedgerRange();
ASSERT(range.has_value(), "VaultInfo's ledger range must be available");
auto const expectedLgrInfo = getLedgerHeaderFromHashOrSeq(
*sharedPtrBackend_, ctx.yield, std::nullopt, input.ledgerIndex, range->maxSequence
);
if (not expectedLgrInfo.has_value())
return Error{expectedLgrInfo.error()};
auto const& lgrInfo = *expectedLgrInfo;
// Extract the vault keylet based on input
auto const vaultKeylet = [&]() -> std::expected<ripple::Keylet, Status> {
if (input.owner && input.tnxSequence) {
auto const accountStr = *input.owner;
auto const accountID = accountFromStringStrict(accountStr);
// checks that account exists
{
auto const accountKeylet = ripple::keylet::account(*accountID);
auto const accountLedgerObject =
sharedPtrBackend_->fetchLedgerObject(accountKeylet.key, lgrInfo.seq, ctx.yield);
if (!accountLedgerObject)
return std::unexpected{Status{ClioError::RpcEntryNotFound}};
}
return ripple::keylet::vault(*accountID, *input.tnxSequence);
}
ripple::uint256 nodeIndex;
if (nodeIndex.parseHex(*input.vaultID))
return ripple::keylet::vault(nodeIndex);
return std::unexpected{Status{ClioError::RpcEntryNotFound}};
}();
if (not vaultKeylet.has_value())
return Error{vaultKeylet.error()};
// Fetch the vault object and it's associated issuance ID
auto const vaultLedgerObject =
sharedPtrBackend_->fetchLedgerObject(vaultKeylet.value().key, lgrInfo.seq, ctx.yield);
if (not vaultLedgerObject)
return Error{Status{ClioError::RpcEntryNotFound, "vault object not found."}};
ripple::STLedgerEntry const vaultSle{
ripple::SerialIter{vaultLedgerObject->data(), vaultLedgerObject->size()}, vaultKeylet.value().key
};
auto const issuanceKeylet = ripple::keylet::mptIssuance(vaultSle[ripple::sfShareMPTID]).key;
auto const issuanceObject = sharedPtrBackend_->fetchLedgerObject(issuanceKeylet, lgrInfo.seq, ctx.yield);
if (not issuanceObject)
return Error{Status{ClioError::RpcEntryNotFound, "issuance object not found."}};
ripple::STLedgerEntry const issuanceSle{
ripple::SerialIter{issuanceObject->data(), issuanceObject->size()}, issuanceKeylet
};
// put issuance object into "shares" field of vault object
// follows same logic as rippled:
// https://github.com/XRPLF/rippled/pull/5224/files#diff-6cb544622c7942261f097d628f61f1c1fcf34a1bcfd954aedbada4238fc28f69R107
Output response;
response.vault = toBoostJson(vaultSle.getJson(ripple::JsonOptions::none));
response.vault.as_object()[JS(shares)] = toBoostJson(issuanceSle.getJson(ripple::JsonOptions::none));
response.ledgerIndex = lgrInfo.seq;
return response;
}
void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, VaultInfoHandler::Output const& output)
{
jv = boost::json::object{
{JS(ledger_index), output.ledgerIndex}, {JS(validated), output.validated}, {JS(vault), output.vault}
};
}
VaultInfoHandler::Input
tag_invoke(boost::json::value_to_tag<VaultInfoHandler::Input>, boost::json::value const& jv)
{
auto input = VaultInfoHandler::Input{};
auto const& jsonObject = jv.as_object();
if (jsonObject.contains(JS(owner)))
input.owner = jsonObject.at(JS(owner)).as_string();
if (jsonObject.contains(JS(seq)))
input.tnxSequence = static_cast<uint32_t>(jsonObject.at(JS(seq)).as_int64());
if (jsonObject.contains(JS(vault_id)))
input.vaultID = jsonObject.at(JS(vault_id)).as_string();
if (jsonObject.contains(JS(ledger_index))) {
if (not jsonObject.at(JS(ledger_index)).is_string()) {
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}
} // namespace rpc

View File

@@ -0,0 +1,134 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/BackendInterface.hpp"
#include "rpc/Errors.hpp"
#include "rpc/JS.hpp"
#include "rpc/common/MetaProcessors.hpp"
#include "rpc/common/Specs.hpp"
#include "rpc/common/Types.hpp"
#include "rpc/common/Validators.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/object.hpp>
#include <boost/json/value.hpp>
#include <xrpl/protocol/STLedgerEntry.h>
#include <xrpl/protocol/jss.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
namespace rpc {
/**
* @brief The vault_info command retrieves information about a vault, currency, shares etc.
*/
class VaultInfoHandler {
std::shared_ptr<BackendInterface> sharedPtrBackend_;
public:
/**
* @brief Construct a new VaultInfo object
*
* @param sharedPtrBackend The backend to use
*/
VaultInfoHandler(std::shared_ptr<BackendInterface> const& sharedPtrBackend);
/**
* @brief A struct to hold the input data for the command
*/
struct Input {
std::optional<std::string> vaultID;
std::optional<std::string> owner;
std::optional<uint32_t> tnxSequence;
std::optional<uint32_t> ledgerIndex;
};
/**
* @brief A struct to hold the output data for the command
*/
struct Output {
boost::json::value vault;
uint32_t ledgerIndex{};
bool validated = true;
};
using Result = HandlerReturnType<Output>;
/**
* @brief Returns the API specification for the command
*
* @param apiVersion The api version to return the spec for
* @return The spec for the given apiVersion
*/
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const kRPC_SPEC = RpcSpec{
{JS(vault_id),
meta::WithCustomError{
validation::CustomValidators::uint256HexStringValidator, Status(ClioError::RpcMalformedRequest)
}},
{JS(owner),
meta::WithCustomError{
validation::CustomValidators::accountBase58Validator,
Status(ClioError::RpcMalformedRequest, "OwnerNotHexString")
}},
{JS(seq), meta::WithCustomError{validation::Type<uint32_t>{}, Status(ClioError::RpcMalformedRequest)}},
{JS(ledger_index), validation::CustomValidators::ledgerIndexValidator},
};
return kRPC_SPEC;
}
/**
* @brief Process the VaultInfo command
*
* @param input The input data for the command
* @param ctx The context of the request
* @return The result of the operation
*/
Result
process(Input input, Context const& ctx) const;
private:
/**
* @brief Convert the Output to a JSON object
*
* @param jv The JSON object to convert to
* @param output The output to convert
*/
friend void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output);
/**
* @brief Convert a JSON object to Input type
*
* @param jv The JSON object to convert
* @return Input parsed from the JSON object
*/
friend Input
tag_invoke(boost::json::value_to_tag<Input>, boost::json::value const& jv);
};
} // namespace rpc

View File

@@ -29,7 +29,6 @@
#include <boost/stacktrace.hpp>
#include <boost/stacktrace/stacktrace.hpp>
#endif // CLIO_WITHOUT_STACKTRACE
#include <fmt/core.h>
#include <fmt/format.h>
#include <cstdlib>

View File

@@ -134,8 +134,7 @@ public:
return;
boost::asio::spawn(
yield_,
[signal = familySignal_, fn = std::move(fn)](boost::asio::yield_context yield) mutable {
yield_, [signal = familySignal_, fn = std::move(fn)](boost::asio::yield_context yield) mutable {
Coroutine coroutine(std::move(yield), std::move(signal));
fn(coroutine);
}

View File

@@ -36,7 +36,7 @@ LedgerTypes::getLedgerEntryTypeFromStr(std::string const& entryName)
return map;
}();
if (kTYPE_MAP.find(entryName) == kTYPE_MAP.end())
if (!kTYPE_MAP.contains(entryName))
return ripple::ltANY;
return kTYPE_MAP.at(entryName);

View File

@@ -21,7 +21,7 @@
#include "rpc/JS.hpp"
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/StringUtilities.h>
#include <xrpl/basics/strHex.h>
@@ -114,6 +114,7 @@ class LedgerTypes {
LedgerTypeAttribute::accountOwnedLedgerType(JS(did), ripple::ltDID),
LedgerTypeAttribute::accountOwnedLedgerType(JS(oracle), ripple::ltORACLE),
LedgerTypeAttribute::accountOwnedLedgerType(JS(credential), ripple::ltCREDENTIAL),
LedgerTypeAttribute::accountOwnedLedgerType(JS(vault), ripple::ltVAULT),
LedgerTypeAttribute::chainLedgerType(JS(nunl), ripple::ltNEGATIVE_UNL),
LedgerTypeAttribute::deletionBlockerLedgerType(JS(mpt_issuance), ripple::ltMPTOKEN_ISSUANCE),
LedgerTypeAttribute::deletionBlockerLedgerType(JS(mptoken), ripple::ltMPTOKEN),

View File

@@ -78,8 +78,7 @@ SignalsHandler::SignalsHandler(config::ClioConfigDefinition const& config, std::
<< " milliseconds.";
setHandler(impl::SignalsHandlerStatic::handleSecondSignal);
timer_.emplace(context_.scheduleAfter(
gracefulPeriod_,
[forceExitHandler = std::move(forceExitHandler)](auto&& stopToken, bool canceled) {
gracefulPeriod_, [forceExitHandler = std::move(forceExitHandler)](auto&& stopToken, bool canceled) {
// TODO: Update this after https://github.com/XRPLF/clio/issues/1380
if (not stopToken.isStopRequested() and not canceled) {
LOG(LogService::warn()) << "Force exit at the end of graceful period.";

View File

@@ -19,8 +19,9 @@
#include "util/TimeUtils.hpp"
#include <fmt/base.h>
#include <fmt/chrono.h>
#include <fmt/core.h>
#include <fmt/format.h>
#include <xrpl/basics/chrono.h>
#include <chrono>

View File

@@ -166,17 +166,16 @@ public:
static_assert(not std::is_same_v<RetType, std::any>);
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(delay);
return AnyOperation<RetType>(pimpl_->scheduleAfter(
millis,
[fn = std::forward<decltype(fn)>(fn)](auto stopToken) -> std::any {
return AnyOperation<RetType>(
pimpl_->scheduleAfter(millis, [fn = std::forward<decltype(fn)>(fn)](auto stopToken) -> std::any {
if constexpr (std::is_void_v<RetType>) {
fn(std::move(stopToken));
return {};
} else {
return std::make_any<RetType>(fn(std::move(stopToken)));
}
}
));
})
);
}
/**
@@ -197,8 +196,7 @@ public:
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(delay);
return AnyOperation<RetType>(pimpl_->scheduleAfter(
millis,
[fn = std::forward<decltype(fn)>(fn)](auto stopToken, auto cancelled) -> std::any {
millis, [fn = std::forward<decltype(fn)>(fn)](auto stopToken, auto cancelled) -> std::any {
if constexpr (std::is_void_v<RetType>) {
fn(std::move(stopToken), cancelled);
return {};
@@ -224,13 +222,10 @@ public:
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(interval);
return AnyOperation<RetType>( //
pimpl_->executeRepeatedly(
millis,
[fn = std::forward<decltype(fn)>(fn)] -> std::any {
fn();
return {};
}
)
pimpl_->executeRepeatedly(millis, [fn = std::forward<decltype(fn)>(fn)] -> std::any {
fn();
return {};
})
);
}

View File

@@ -22,7 +22,7 @@
#include "util/async/Error.hpp"
#include "util/async/impl/ErasedOperation.hpp"
#include <fmt/core.h>
#include <fmt/format.h>
#include <fmt/std.h>
#include <any>

View File

@@ -146,13 +146,10 @@ public:
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(interval);
return AnyOperation<RetType>( //
pimpl_->executeRepeatedly(
millis,
[fn = std::forward<decltype(fn)>(fn)] -> std::any {
fn();
return {};
}
)
pimpl_->executeRepeatedly(millis, [fn = std::forward<decltype(fn)>(fn)] -> std::any {
fn();
return {};
})
);
}

View File

@@ -170,7 +170,7 @@ template <typename T>
concept SomeStdDuration = requires {
// Thank you Ed Catmur for this trick.
// See https://stackoverflow.com/questions/74383254/concept-that-models-only-the-stdchrono-duration-types
[]<typename Rep, typename Period>( //
[]<typename Rep, typename Period>( //
std::type_identity<std::chrono::duration<Rep, Period>>
) {}(std::type_identity<std::decay_t<T>>());
};
@@ -180,7 +180,7 @@ concept SomeStdDuration = requires {
*/
template <typename T>
concept SomeStdOptional = requires {
[]<typename Type>( //
[]<typename Type>( //
std::type_identity<std::optional<Type>>
) {}(std::type_identity<std::decay_t<T>>());
};

View File

@@ -20,7 +20,7 @@
#pragma once
#include <fmt/core.h>
#include <fmt/format.h>
#include <fmt/std.h>
#include <string>

Some files were not shown because too many files have changed in this diff Show More