diff --git a/.github/actions/generate-version/action.yml b/.github/actions/generate-version/action.yml new file mode 100644 index 0000000000..6b84aac2f3 --- /dev/null +++ b/.github/actions/generate-version/action.yml @@ -0,0 +1,44 @@ +name: Generate build version number +description: "Generate build version number." + +outputs: + version: + description: "The generated build version number." + value: ${{ steps.version.outputs.version }} + +runs: + using: composite + steps: + # When a tag is pushed, the version is used as-is. + - name: Generate version for tag event + if: ${{ github.event_name == 'tag' }} + shell: bash + env: + VERSION: ${{ github.ref_name }} + run: echo "VERSION=${VERSION}" >> "${GITHUB_ENV}" + + # When a tag is not pushed, then the version (e.g. 1.2.3-b0) is extracted + # from the BuildInfo.cpp file and the shortened commit hash appended to it. + # We use a plus sign instead of a hyphen because Conan recipe versions do + # not support two hyphens. + - name: Generate version for non-tag event + if: ${{ github.event_name != 'tag' }} + shell: bash + run: | + echo 'Extracting version from BuildInfo.cpp.' + VERSION="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" + if [[ -z "${VERSION}" ]]; then + echo 'Unable to extract version from BuildInfo.cpp.' + exit 1 + fi + + echo 'Appending shortened commit hash to version.' + SHA='${{ github.sha }}' + VERSION="${VERSION}+${SHA:0:7}" + + echo "VERSION=${VERSION}" >> "${GITHUB_ENV}" + + - name: Output version + id: version + shell: bash + run: echo "version=${VERSION}" >> "${GITHUB_OUTPUT}" diff --git a/.github/actions/setup-conan/action.yml b/.github/actions/setup-conan/action.yml index dedf53f109..9d834884d2 100644 --- a/.github/actions/setup-conan/action.yml +++ b/.github/actions/setup-conan/action.yml @@ -2,11 +2,11 @@ name: Setup Conan description: "Set up Conan configuration, profile, and remote." inputs: - conan_remote_name: + remote_name: description: "The name of the Conan remote to use." required: false default: xrplf - conan_remote_url: + remote_url: description: "The URL of the Conan endpoint to use." required: false default: https://conan.ripplex.io @@ -31,16 +31,16 @@ runs: conan config install conan/profiles/ -tf $(conan config home)/profiles/ echo 'Conan profile:' - conan profile show + conan profile show --profile ci - name: Set up Conan remote shell: bash env: - CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} - CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }} + REMOTE_NAME: ${{ inputs.remote_name }} + REMOTE_URL: ${{ inputs.remote_url }} run: | - echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'." - conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}" + echo "Adding Conan remote '${REMOTE_NAME}' at '${REMOTE_URL}'." + conan remote add --index 0 --force "${REMOTE_NAME}" "${REMOTE_URL}" echo 'Listing Conan remotes.' conan remote list diff --git a/.github/scripts/levelization/results/ordering.txt b/.github/scripts/levelization/results/ordering.txt index c9c65fb0dd..8d17e1167f 100644 --- a/.github/scripts/levelization/results/ordering.txt +++ b/.github/scripts/levelization/results/ordering.txt @@ -104,6 +104,7 @@ test.overlay > xrpl.basics test.overlay > xrpld.app test.overlay > xrpld.overlay test.overlay > xrpld.peerfinder +test.overlay > xrpl.nodestore test.overlay > xrpl.protocol test.overlay > xrpl.shamap test.peerfinder > test.beast diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index f4e41c6b87..c2253dc194 100755 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -20,8 +20,8 @@ class Config: Generate a strategy matrix for GitHub Actions CI. On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and -Windows configurations, while upon merge into the develop, release, or master -branches, we will build all configurations, and test most of them. +Windows configurations, while upon merge into the develop or release branches, +we will build all configurations, and test most of them. We will further set additional CMake arguments as follows: - All builds will have the `tests`, `werr`, and `xrpld` options. diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 3aa48ac070..46f6b7500a 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -1,7 +1,8 @@ # This workflow runs all workflows to check, build and test the project on # various Linux flavors, as well as on MacOS and Windows, on every push to a # user branch. However, it will not run if the pull request is a draft unless it -# has the 'DraftRunCI' label. +# has the 'DraftRunCI' label. For commits to PRs that target a release branch, +# it also uploads the libxrpl recipe to the Conan remote. name: PR on: @@ -53,12 +54,12 @@ jobs: .github/scripts/rename/** .github/workflows/reusable-check-levelization.yml .github/workflows/reusable-check-rename.yml - .github/workflows/reusable-notify-clio.yml .github/workflows/on-pr.yml # Keep the paths below in sync with those in `on-trigger.yml`. .github/actions/build-deps/** .github/actions/build-test/** + .github/actions/generate-version/** .github/actions/setup-conan/** .github/scripts/strategy-matrix/** .github/workflows/reusable-build.yml @@ -66,6 +67,7 @@ jobs: .github/workflows/reusable-build-test.yml .github/workflows/reusable-strategy-matrix.yml .github/workflows/reusable-test.yml + .github/workflows/reusable-upload-recipe.yml .codecov.yml cmake/** conan/** @@ -121,22 +123,42 @@ jobs: secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - notify-clio: + upload-recipe: needs: - should-run - build-test - if: ${{ needs.should-run.outputs.go == 'true' && (startsWith(github.base_ref, 'release') || github.base_ref == 'master') }} - uses: ./.github/workflows/reusable-notify-clio.yml + # Only run when committing to a PR that targets a release branch in the + # XRPLF repository. + if: ${{ github.repository_owner == 'XRPLF' && needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }} + uses: ./.github/workflows/reusable-upload-recipe.yml secrets: - clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} - conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} - conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} + remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} + + notify-clio: + needs: upload-recipe + runs-on: ubuntu-latest + steps: + # Notify the Clio repository about the newly proposed release version, so + # it can be checked for compatibility before the release is actually made. + - name: Notify Clio + env: + GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }} + PR_URL: ${{ github.event.pull_request.html_url }} + run: | + gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ + -F "client_payload[ref]=${{ needs.upload-recipe.outputs.recipe_ref }}" \ + -F "client_payload[pr_url]=${PR_URL}" passed: if: failure() || cancelled() needs: - - build-test - check-levelization + - check-rename + - build-test + - upload-recipe + - notify-clio runs-on: ubuntu-latest steps: - name: Fail diff --git a/.github/workflows/on-tag.yml b/.github/workflows/on-tag.yml new file mode 100644 index 0000000000..c6361b4016 --- /dev/null +++ b/.github/workflows/on-tag.yml @@ -0,0 +1,25 @@ +# This workflow uploads the libxrpl recipe to the Conan remote when a versioned +# tag is pushed. +name: Tag + +on: + push: + tags: + - "v*" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + upload-recipe: + # Only run when a tag is pushed to the XRPLF repository. + if: ${{ github.repository_owner == 'XRPLF' }} + uses: ./.github/workflows/reusable-upload-recipe.yml + secrets: + remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 2c63c2baa5..210670f5a1 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -1,9 +1,7 @@ -# This workflow runs all workflows to build the dependencies required for the -# project on various Linux flavors, as well as on MacOS and Windows, on a -# scheduled basis, on merge into the 'develop', 'release', or 'master' branches, -# or manually. The missing commits check is only run when the code is merged -# into the 'develop' or 'release' branches, and the documentation is built when -# the code is merged into the 'develop' branch. +# This workflow runs all workflows to build and test the code on various Linux +# flavors, as well as on MacOS and Windows, on a scheduled basis, on merge into +# the 'develop' or 'release*' branches, or when requested manually. Upon pushes +# to the develop branch it also uploads the libxrpl recipe to the Conan remote. name: Trigger on: @@ -11,7 +9,6 @@ on: branches: - "develop" - "release*" - - "master" paths: # These paths are unique to `on-trigger.yml`. - ".github/workflows/on-trigger.yml" @@ -19,6 +16,7 @@ on: # Keep the paths below in sync with those in `on-pr.yml`. - ".github/actions/build-deps/**" - ".github/actions/build-test/**" + - ".github/actions/generate-version/**" - ".github/actions/setup-conan/**" - ".github/scripts/strategy-matrix/**" - ".github/workflows/reusable-build.yml" @@ -26,6 +24,7 @@ on: - ".github/workflows/reusable-build-test.yml" - ".github/workflows/reusable-strategy-matrix.yml" - ".github/workflows/reusable-test.yml" + - ".github/workflows/reusable-upload-recipe.yml" - ".codecov.yml" - "cmake/**" - "conan/**" @@ -70,11 +69,20 @@ jobs: with: # Enable ccache only for events targeting the XRPLF repository, since # other accounts will not have access to our remote cache storage. - # However, we do not enable ccache for events targeting the master or a - # release branch, to protect against the rare case that the output - # produced by ccache is not identical to a regular compilation. - ccache_enabled: ${{ github.repository_owner == 'XRPLF' && !(github.base_ref == 'master' || startsWith(github.base_ref, 'release')) }} + # However, we do not enable ccache for events targeting a release branch, + # to protect against the rare case that the output produced by ccache is + # not identical to a regular compilation. + ccache_enabled: ${{ github.repository_owner == 'XRPLF' && !startsWith(github.ref, 'refs/heads/release') }} os: ${{ matrix.os }} strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }} secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + upload-recipe: + needs: build-test + # Only run when pushing to the develop branch in the XRPLF repository. + if: ${{ github.repository_owner == 'XRPLF' && github.event_name == 'push' && github.ref == 'refs/heads/develop' }} + uses: ./.github/workflows/reusable-upload-recipe.yml + secrets: + remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 00754e5eae..6b8fd9955e 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -3,7 +3,9 @@ name: Run pre-commit hooks on: pull_request: push: - branches: [develop, release, master] + branches: + - "develop" + - "release*" workflow_dispatch: jobs: diff --git a/.github/workflows/reusable-build-test-config.yml b/.github/workflows/reusable-build-test-config.yml index accb615fb3..d78f2d29b5 100644 --- a/.github/workflows/reusable-build-test-config.yml +++ b/.github/workflows/reusable-build-test-config.yml @@ -131,6 +131,8 @@ jobs: subtract: ${{ inputs.nproc_subtract }} - name: Setup Conan + env: + SANITIZERS: ${{ inputs.sanitizers }} uses: ./.github/actions/setup-conan - name: Build dependencies diff --git a/.github/workflows/reusable-notify-clio.yml b/.github/workflows/reusable-notify-clio.yml deleted file mode 100644 index 0941d5f2e3..0000000000 --- a/.github/workflows/reusable-notify-clio.yml +++ /dev/null @@ -1,91 +0,0 @@ -# This workflow exports the built libxrpl package to the Conan remote on a -# a channel named after the pull request, and notifies the Clio repository about -# the new version so it can check for compatibility. -name: Notify Clio - -# This workflow can only be triggered by other workflows. -on: - workflow_call: - inputs: - conan_remote_name: - description: "The name of the Conan remote to use." - required: false - type: string - default: xrplf - conan_remote_url: - description: "The URL of the Conan endpoint to use." - required: false - type: string - default: https://conan.ripplex.io - secrets: - clio_notify_token: - description: "The GitHub token to notify Clio about new versions." - required: true - conan_remote_username: - description: "The username for logging into the Conan remote." - required: true - conan_remote_password: - description: "The password for logging into the Conan remote." - required: true - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-clio - cancel-in-progress: true - -defaults: - run: - shell: bash - -jobs: - upload: - if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} - runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158 - steps: - - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Generate outputs - id: generate - env: - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo 'Generating user and channel.' - echo "user=clio" >> "${GITHUB_OUTPUT}" - echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}" - echo 'Extracting version.' - echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" - - name: Calculate conan reference - id: conan_ref - run: | - echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" - - name: Set up Conan - uses: ./.github/actions/setup-conan - with: - conan_remote_name: ${{ inputs.conan_remote_name }} - conan_remote_url: ${{ inputs.conan_remote_url }} - - name: Log into Conan remote - env: - CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} - run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" - - name: Upload package - env: - CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} - run: | - conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} . - conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }} - outputs: - conan_ref: ${{ steps.conan_ref.outputs.conan_ref }} - - notify: - needs: upload - runs-on: ubuntu-latest - steps: - - name: Notify Clio - env: - GH_TOKEN: ${{ secrets.clio_notify_token }} - PR_URL: ${{ github.event.pull_request.html_url }} - run: | - gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ - -F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \ - -F "client_payload[pr_url]=${PR_URL}" diff --git a/.github/workflows/reusable-upload-recipe.yml b/.github/workflows/reusable-upload-recipe.yml new file mode 100644 index 0000000000..79af516fb3 --- /dev/null +++ b/.github/workflows/reusable-upload-recipe.yml @@ -0,0 +1,97 @@ +# This workflow exports the built libxrpl package to the Conan remote. +name: Upload Conan recipe + +# This workflow can only be triggered by other workflows. +on: + workflow_call: + inputs: + remote_name: + description: "The name of the Conan remote to use." + required: false + type: string + default: xrplf + remote_url: + description: "The URL of the Conan endpoint to use." + required: false + type: string + default: https://conan.ripplex.io + + secrets: + remote_username: + description: "The username for logging into the Conan remote." + required: true + remote_password: + description: "The password for logging into the Conan remote." + required: true + + outputs: + recipe_ref: + description: "The Conan recipe reference ('name/version') that was uploaded." + value: ${{ jobs.upload.outputs.ref }} + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-upload-recipe + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + upload: + runs-on: ubuntu-latest + container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158 + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Generate build version number + id: version + uses: ./.github/actions/generate-version + + - name: Set up Conan + uses: ./.github/actions/setup-conan + with: + remote_name: ${{ inputs.remote_name }} + remote_url: ${{ inputs.remote_url }} + + - name: Log into Conan remote + env: + REMOTE_NAME: ${{ inputs.remote_name }} + REMOTE_USERNAME: ${{ secrets.remote_username }} + REMOTE_PASSWORD: ${{ secrets.remote_password }} + run: conan remote login "${REMOTE_NAME}" "${REMOTE_USERNAME}" --password "${REMOTE_PASSWORD}" + + - name: Upload Conan recipe (version) + env: + REMOTE_NAME: ${{ inputs.remote_name }} + run: | + conan export . --version=${{ steps.version.outputs.version }} + conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/${{ steps.version.outputs.version }} + + - name: Upload Conan recipe (develop) + if: ${{ github.ref == 'refs/heads/develop' }} + env: + REMOTE_NAME: ${{ inputs.remote_name }} + run: | + conan export . --version=develop + conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/develop + + - name: Upload Conan recipe (rc) + if: ${{ startsWith(github.ref, 'refs/heads/release') }} + env: + REMOTE_NAME: ${{ inputs.remote_name }} + run: | + conan export . --version=rc + conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/rc + + - name: Upload Conan recipe (release) + if: ${{ github.event_name == 'tag' }} + env: + REMOTE_NAME: ${{ inputs.remote_name }} + run: | + conan export . --version=release + conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/release + + outputs: + ref: xrpl/${{ steps.version.outputs.version }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 29ae95fce5..2dada1ef08 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -84,10 +84,12 @@ jobs: subtract: ${{ env.NPROC_SUBTRACT }} - name: Setup Conan + env: + SANITIZERS: ${{ matrix.sanitizers }} uses: ./.github/actions/setup-conan with: - conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} - conan_remote_url: ${{ env.CONAN_REMOTE_URL }} + remote_name: ${{ env.CONAN_REMOTE_NAME }} + remote_url: ${{ env.CONAN_REMOTE_URL }} - name: Build dependencies uses: ./.github/actions/build-deps @@ -98,6 +100,7 @@ jobs: # Set the verbosity to "quiet" for Windows to avoid an excessive # amount of logs. For other OSes, the "verbose" logs are more useful. log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }} + sanitizers: ${{ matrix.sanitizers }} - name: Log into Conan remote if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }} diff --git a/BUILD.md b/BUILD.md index b239b10be6..f90aa0c148 100644 --- a/BUILD.md +++ b/BUILD.md @@ -148,8 +148,8 @@ function extract_version { } # Define which recipes to export. -recipes=('ed25519' 'grpc' 'openssl' 'secp256k1' 'snappy' 'soci') -folders=('all' 'all' '3.x.x' 'all' 'all' 'all') +recipes=('ed25519' 'grpc' 'nudb' 'openssl' 'secp256k1' 'snappy' 'soci') +folders=('all' 'all' 'all' '3.x.x' 'all' 'all' 'all') # Selectively check out the recipes from our CCI fork. cd external diff --git a/CMakeLists.txt b/CMakeLists.txt index ee0484e79d..c24b27adb2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,9 @@ if(POLICY CMP0077) endif() # Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows. -file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH) +if(DEFINED CMAKE_MODULE_PATH) + file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH) +endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") project(xrpl) diff --git a/SECURITY.md b/SECURITY.md index 18eec312ed..1be412ae2a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -78,72 +78,61 @@ To report a qualifying bug, please send a detailed report to: | Email Address | bugs@ripple.com | | :-----------: | :-------------------------------------------------- | -| Short Key ID | `0xC57929BE` | -| Long Key ID | `0xCD49A0AFC57929BE` | -| Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` | +| Short Key ID | `0xA9F514E0` | +| Long Key ID | `0xD900855AA9F514E0` | +| Fingerprint | `B72C 0654 2F2A E250 2763 A268 D900 855A A9F5 14E0` | The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is: ``` -----BEGIN PGP PUBLIC KEY BLOCK----- -mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt -kCpUYEDal0ygkKobu8SzOoATcDl18iCrScX39VpTm96vISFZMhmOryYCIp4QLJNN -4HKc2ZdBj6W4igNi6vj5Qo6JMyGpLY2mz4CZskbt0TNuUxWrGood+UrCzpY8x7/N -a93fcvNw+prgCr0rCH3hAPmAFfsOBbtGzNnmq7xf3jg5r4Z4sDiNIF1X1y53DAfV -rWDx49IKsuCEJfPMp1MnBSvDvLaQ2hKXs+cOpx1BCZgHn3skouEUxxgqbtTzBLt1 -xXpmuijsaltWngPnGO7mOAzbpZSdBm82/Emrk9bPMuD0QaLQjWr7HkTSUs6ZsKt4 -7CLPdWqxyY/QVw9UaxeHEtWGQGMIQGgVJGh1fjtUr5O1sC9z9jXcQ0HuIHnRCTls -GP7hklJmfH5V4SyAJQ06/hLuEhUJ7dn+BlqCsT0tLmYTgZYNzNcLHcqBFMEZHvHw -9GENMx/tDXgajKql4bJnzuTK0iGU/YepanANLd1JHECJ4jzTtmKOus9SOGlB2/l1 -0t0ADDYAS3eqOdOcUvo9ElSLCI5vSVHhShSte/n2FMWU+kMUboTUisEG8CgQnrng -g2CvvQvqDkeOtZeqMcC7HdiZS0q3LJUWtwA/ViwxrVlBDCxiTUXCotyBWwARAQAB -tDBSaXBwbGUgTGFicyBCdWcgQm91bnR5IFByb2dyYW0gPGJ1Z3NAcmlwcGxlLmNv -bT6JAjcEEwEKACEFAlUwGHYCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ -zUmgr8V5Kb6R0g//SwY/mVJY59k87iL26/KayauSoOcz7xjcST26l4ZHVVX85gOY -HYZl8k0+m8X3zxeYm9a3QAoAml8sfoaFRFQP8ynnefRrLUPaZ2MjbJ0SACMwZNef -T6o7Mi8LBAaiNZdYVyIfX1oM6YXtqYkuJdav6ZCyvVYqc9OvMJPY2ZzJYuI/ZtvQ -/lTndxCeg9ALNX/iezOLGdfMpf4HuIFVwcPPlwGi+HDlB9/bggDEHC8z434SXVFc -aQatXAPcDkjMUweU7y0CZtYEj00HITd4pSX6MqGiHrxlDZTqinCOPs1Ieqp7qufs -MzlM6irLGucxj1+wa16ieyYvEtGaPIsksUKkywx0O7cf8N2qKg+eIkUk6O0Uc6eO -CszizmiXIXy4O6OiLlVHGKkXHMSW9Nwe9GE95O8G9WR8OZCEuDv+mHPAutO+IjdP -PDAAUvy+3XnkceO+HGWRpVvJZfFP2YH4A33InFL5yqlJmSoR/yVingGLxk55bZDM -+HYGR3VeMb8Xj1rf/02qERsZyccMCFdAvKDbTwmvglyHdVLu5sPmktxbBYiemfyJ -qxMxmYXCc9S0hWrWZW7edktBa9NpE58z1mx+hRIrDNbS2sDHrib9PULYCySyVYcF -P+PWEe1CAS5jqkR2ker5td2/pHNnJIycynBEs7l6zbc9fu+nktFJz0q2B+GJAhwE -EAEKAAYFAlUwGaQACgkQ+tiY1qQ2QkjMFw//f2hNY3BPNe+1qbhzumMDCnbTnGif -kLuAGl9OKt81VHG1f6RnaGiLpR696+6Ja45KzH15cQ5JJl5Bgs1YkR/noTGX8IAD -c70eNwiFu8JXTaaeeJrsmFkF9Tueufb364risYkvPP8tNUD3InBFEZT3WN7JKwix -coD4/BwekUwOZVDd/uCFEyhlhZsROxdKNisNo3VtAq2s+3tIBAmTrriFUl0K+ZC5 -zgavcpnPN57zMtW9aK+VO3wXqAKYLYmtgxkVzSLUZt2M7JuwOaAdyuYWAneKZPCu -1AXkmyo+d84sd5mZaKOr5xArAFiNMWPUcZL4rkS1Fq4dKtGAqzzR7a7hWtA5o27T -6vynuxZ1n0PPh0er2O/zF4znIjm5RhTlfjp/VmhZdQfpulFEQ/dMxxGkQ9z5IYbX -mTlSDbCSb+FMsanRBJ7Drp5EmBIudVGY6SHI5Re1RQiEh7GoDfUMUwZO+TVDII5R -Ra7WyuimYleJgDo/+7HyfuIyGDaUCVj6pwVtYtYIdOI3tTw1R1Mr0V8yaNVnJghL -CHcEJQL+YHSmiMM3ySil3O6tm1By6lFz8bVe/rgG/5uklQrnjMR37jYboi1orCC4 -yeIoQeV0ItlxeTyBwYIV/o1DBNxDevTZvJabC93WiGLw2XFjpZ0q/9+zI2rJUZJh -qxmKP+D4e27lCI65Ag0EVTAYdgEQAMvttYNqeRNBRpSX8fk45WVIV8Fb21fWdwk6 -2SkZnJURbiC0LxQnOi7wrtii7DeFZtwM2kFHihS1VHekBnIKKZQSgGoKuFAQMGyu -a426H4ZsSmA9Ufd7kRbvdtEcp7/RTAanhrSL4lkBhaKJrXlxBJ27o3nd7/rh7r3a -OszbPY6DJ5bWClX3KooPTDl/RF2lHn+fweFk58UvuunHIyo4BWJUdilSXIjLun+P -Qaik4ZAsZVwNhdNz05d+vtai4AwbYoO7adboMLRkYaXSQwGytkm+fM6r7OpXHYuS -cR4zB/OK5hxCVEpWfiwN71N2NMvnEMaWd/9uhqxJzyvYgkVUXV9274TUe16pzXnW -ZLfmitjwc91e7mJBBfKNenDdhaLEIlDRwKTLj7k58f9srpMnyZFacntu5pUMNblB -cjXwWxz5ZaQikLnKYhIvrIEwtWPyjqOzNXNvYfZamve/LJ8HmWGCKao3QHoAIDvB -9XBxrDyTJDpxbog6Qu4SY8AdgVlan6c/PsLDc7EUegeYiNTzsOK+eq3G5/E92eIu -TsUXlciypFcRm1q8vLRr+HYYe2mJDo4GetB1zLkAFBcYJm/x9iJQbu0hn5NxJvZO -R0Y5nOJQdyi+muJzKYwhkuzaOlswzqVXkq/7+QCjg7QsycdcwDjiQh3OrsgXHrwl -M7gyafL9ABEBAAGJAh8EGAEKAAkFAlUwGHYCGwwACgkQzUmgr8V5Kb50BxAAhj9T -TwmNrgRldTHszj+Qc+v8RWqV6j+R+zc0cn5XlUa6XFaXI1OFFg71H4dhCPEiYeN0 -IrnocyMNvCol+eKIlPKbPTmoixjQ4udPTR1DC1Bx1MyW5FqOrsgBl5t0e1VwEViM -NspSStxu5Hsr6oWz2GD48lXZWJOgoL1RLs+uxjcyjySD/em2fOKASwchYmI+ezRv -plfhAFIMKTSCN2pgVTEOaaz13M0U+MoprThqF1LWzkGkkC7n/1V1f5tn83BWiagG -2N2Q4tHLfyouzMUKnX28kQ9sXfxwmYb2sA9FNIgxy+TdKU2ofLxivoWT8zS189z/ -Yj9fErmiMjns2FzEDX+bipAw55X4D/RsaFgC+2x2PDbxeQh6JalRA2Wjq32Ouubx -u+I4QhEDJIcVwt9x6LPDuos1F+M5QW0AiUhKrZJ17UrxOtaquh/nPUL9T3l2qPUn -1ChrZEEEhHO6vA8+jn0+cV9n5xEz30Str9iHnDQ5QyR5LyV4UBPgTdWyQzNVKA69 -KsSr9lbHEtQFRzGuBKwt6UlSFv9vPWWJkJit5XDKAlcKuGXj0J8OlltToocGElkF -+gEBZfoOWi/IBjRLrFW2cT3p36DTR5O1Ud/1DLnWRqgWNBLrbs2/KMKE6EnHttyD -7Tz8SQkuxltX/yBXMV3Ddy0t6nWV2SZEfuxJAQI= -=spg4 +mQINBGkSZAQBEACprU199OhgdsOsygNjiQV4msuN3vDOUooehL+NwfsGfW79Tbqq +Q2u7uQ3NZjW+M2T4nsDwuhkr7pe7xSReR5W8ssaczvtUyxkvbMClilcgZ2OSCAuC +N9tzJsqOqkwBvXoNXkn//T2jnPz0ZU2wSF+NrEibq5FeuyGdoX3yXXBxq9pW9HzK +HkQll63QSl6BzVSGRQq+B6lGgaZGLwf3mzmIND9Z5VGLNK2jKynyz9z091whNG/M +kV+E7/r/bujHk7WIVId07G5/COTXmSr7kFnNEkd2Umw42dkgfiNKvlmJ9M7c1wLK +KbL9Eb4ADuW6rRc5k4s1e6GT8R4/VPliWbCl9SE32hXH8uTkqVIFZP2eyM5WRRHs +aKzitkQG9UK9gcb0kdgUkxOvvgPHAe5IuZlcHFzU4y0dBbU1VEFWVpiLU0q+IuNw +5BRemeHc59YNsngkmAZ+/9zouoShRusZmC8Wzotv75C2qVBcjijPvmjWAUz0Zunm +Lsr+O71vqHE73pERjD07wuD/ISjiYRYYE/bVrXtXLZijC7qAH4RE3nID+2ojcZyO +/2jMQvt7un56RsGH4UBHi3aBHi9bUoDGCXKiQY981cEuNaOxpou7Mh3x/ONzzSvk +sTV6nl1LOZHykN1JyKwaNbTSAiuyoN+7lOBqbV04DNYAHL88PrT21P83aQARAQAB +tB1SaXBwbGUgTGFicyA8YnVnc0ByaXBwbGUuY29tPokCTgQTAQgAOBYhBLcsBlQv +KuJQJ2OiaNkAhVqp9RTgBQJpEmQEAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA +AAoJENkAhVqp9RTgBzgP/i7y+aDWl1maig1XMdyb+o0UGusumFSW4Hmj278wlKVv +usgLPihYgHE0PKrv6WRyKOMC1tQEcYYN93M+OeQ1vFhS2YyURq6RCMmh4zq/awXG +uZbG36OURB5NH8lGBOHiN/7O+nY0CgenBT2JWm+GW3nEOAVOVm4+r5GlpPlv+Dp1 +NPBThcKXFMnH73++NpSQoDzTfRYHPxhDAX3jkLi/moXfSanOLlR6l94XNNN0jBHW +Quao0rzf4WSXq9g6AS224xhAA5JyIcFl8TX7hzj5HaFn3VWo3COoDu4U7H+BM0fl +85yqiMQypp7EhN2gxpMMWaHY5TFM85U/bFXFYfEgihZ4/gt4uoIzsNI9jlX7mYvG +KFdDij+oTlRsuOxdIy60B3dKcwOH9nZZCz0SPsN/zlRWgKzK4gDKdGhFkU9OlvPu +94ZqscanoiWKDoZkF96+sjgfjkuHsDK7Lwc1Xi+T4drHG/3aVpkYabXox+lrKB/S +yxZjeqOIQzWPhnLgCaLyvsKo5hxKzL0w3eURu8F3IS7RgOOlljv4M+Me9sEVcdNV +aN3/tQwbaomSX1X5D5YXqhBwC3rU3wXwamsscRTGEpkV+JCX6KUqGP7nWmxCpAly +FL05XuOd5SVHJjXLeuje0JqLUpN514uL+bThWwDbDTdAdwW3oK/2WbXz7IfJRLBj +uQINBGkSZAQBEADdI3SL2F72qkrgFqXWE6HSRBu9bsAvTE5QrRPWk7ux6at537r4 +S4sIw2dOwLvbyIrDgKNq3LQ5wCK88NO/NeCOFm4AiCJSl3pJHXYnTDoUxTrrxx+o +vSRI4I3fHEql/MqzgiAb0YUezjgFdh3vYheMPp/309PFbOLhiFqEcx80Mx5h06UH +gDzu1qNj3Ec+31NLic5zwkrAkvFvD54d6bqYR3SEgMau6aYEewpGHbWBi2pLqSi2 +lQcAeOFixqGpTwDmAnYR8YtjBYepy0MojEAdTHcQQlOYSDk4q4elG+io2N8vECfU +rD6ORecN48GXdZINYWTAdslrUeanmBdgQrYkSpce8TSghgT9P01SNaXxmyaehVUO +lqI4pcg5G2oojAE8ncNS3TwDtt7daTaTC3bAdr4PXDVAzNAiewjMNZPB7xidkDGQ +Y4W1LxTMXyJVWxehYOH7tsbBRKninlfRnLgYzmtIbNRAAvNcsxU6ihv3AV0WFknN +YbSzotEv1Xq/5wk309x8zCDe+sP0cQicvbXafXmUzPAZzeqFg+VLFn7F9MP1WGlW +B1u7VIvBF1Mp9Nd3EAGBAoLRdRu+0dVWIjPTQuPIuD9cCatJA0wVaKUrjYbBMl88 +a12LixNVGeSFS9N7ADHx0/o7GNT6l88YbaLP6zggUHpUD/bR+cDN7vllIQARAQAB +iQI2BBgBCAAgFiEEtywGVC8q4lAnY6Jo2QCFWqn1FOAFAmkSZAQCGwwACgkQ2QCF +Wqn1FOAfAA/8CYq4p0p4bobY20CKEMsZrkBTFJyPDqzFwMeTjgpzqbD7Y3Qq5QCK +OBbvY02GWdiIsNOzKdBxiuam2xYP9WHZj4y7/uWEvT0qlPVmDFu+HXjoJ43oxwFd +CUp2gMuQ4cSL3X94VRJ3BkVL+tgBm8CNY0vnTLLOO3kum/R69VsGJS1JSGUWjNM+ +4qwS3mz+73xJu1HmERyN2RZF/DGIZI2PyONQQ6aH85G1Dd2ohu2/DBAkQAMBrPbj +FrbDaBLyFhODxU3kTWqnfLlaElSm2EGdIU2yx7n4BggEa//NZRMm5kyeo4vzhtlQ +YIVUMLAOLZvnEqDnsLKp+22FzNR/O+htBQC4lPywl53oYSALdhz1IQlcAC1ru5KR +XPzhIXV6IIzkcx9xNkEclZxmsuy5ERXyKEmLbIHAlzFmnrldlt2ZgXDtzaorLmxj +klKibxd5tF50qOpOivz+oPtFo7n+HmFa1nlVAMxlDCUdM0pEVeYDKI5zfVwalyhZ +NnjpakdZSXMwgc7NP/hH9buF35hKDp7EckT2y3JNYwHsDdy1icXN2q40XZw5tSIn +zkPWdu3OUY8PISohN6Pw4h0RH4ZmoX97E8sEfmdKaT58U4Hf2aAv5r9IWCSrAVqY +u5jvac29CzQR9Kal0A+8phHAXHNFD83SwzIC0syaT9ficAguwGH8X6Q= +=nGuD -----END PGP PUBLIC KEY BLOCK----- ``` diff --git a/cmake/CompilationEnv.cmake b/cmake/CompilationEnv.cmake index 345e4cdd62..2d97f94615 100644 --- a/cmake/CompilationEnv.cmake +++ b/cmake/CompilationEnv.cmake @@ -13,6 +13,7 @@ include_guard(GLOBAL) set(is_clang FALSE) set(is_gcc FALSE) set(is_msvc FALSE) +set(is_xcode FALSE) if(CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") # Clang or AppleClang set(is_clang TRUE) @@ -24,6 +25,11 @@ else() message(FATAL_ERROR "Unsupported C++ compiler: ${CMAKE_CXX_COMPILER_ID}") endif() +# Xcode generator detection +if(CMAKE_GENERATOR STREQUAL "Xcode") + set(is_xcode TRUE) +endif() + # -------------------------------------------------------------------- # Operating system detection diff --git a/cmake/XrplCore.cmake b/cmake/XrplCore.cmake index f5190c73ad..6e51384003 100644 --- a/cmake/XrplCore.cmake +++ b/cmake/XrplCore.cmake @@ -32,14 +32,14 @@ target_protobuf_sources(xrpl.libpb xrpl/proto target_compile_options(xrpl.libpb PUBLIC - $<$:-wd4996> - $<$: + $<$:-wd4996> + $<$: --system-header-prefix="google/protobuf" -Wno-deprecated-dynamic-exception-spec > PRIVATE - $<$:-wd4065> - $<$>:-Wno-deprecated-declarations> + $<$:-wd4065> + $<$>:-Wno-deprecated-declarations> ) target_link_libraries(xrpl.libpb diff --git a/cmake/XrplInstall.cmake b/cmake/XrplInstall.cmake index 310436998d..0599a8268c 100644 --- a/cmake/XrplInstall.cmake +++ b/cmake/XrplInstall.cmake @@ -4,6 +4,12 @@ include(create_symbolic_link) +# If no suffix is defined for executables (e.g. Windows uses .exe but Linux +# and macOS use none), then explicitly set it to the empty string. +if(NOT DEFINED suffix) + set(suffix "") +endif() + install ( TARGETS common diff --git a/cmake/XrplInterface.cmake b/cmake/XrplInterface.cmake index 6e0203c099..f53b2dac26 100644 --- a/cmake/XrplInterface.cmake +++ b/cmake/XrplInterface.cmake @@ -4,6 +4,11 @@ include(CompilationEnv) +# Set defaults for optional variables to avoid uninitialized variable warnings +if(NOT DEFINED voidstar) + set(voidstar OFF) +endif() + add_library (opts INTERFACE) add_library (Xrpl::opts ALIAS opts) target_compile_definitions (opts @@ -52,7 +57,7 @@ add_library (xrpl_syslibs INTERFACE) add_library (Xrpl::syslibs ALIAS xrpl_syslibs) target_link_libraries (xrpl_syslibs INTERFACE - $<$: + $<$: legacy_stdio_definitions.lib Shlwapi kernel32 @@ -69,10 +74,10 @@ target_link_libraries (xrpl_syslibs odbccp32 crypt32 > - $<$>:dl> - $<$,$>>:rt>) + $<$>:dl> + $<$,$>>:rt>) -if (NOT MSVC) +if (NOT is_msvc) set (THREADS_PREFER_PTHREAD_FLAG ON) find_package (Threads) target_link_libraries (xrpl_syslibs INTERFACE Threads::Threads) diff --git a/cmake/XrplSanitizers.cmake b/cmake/XrplSanitizers.cmake index 050a5ef6f0..fc31e4a3ec 100644 --- a/cmake/XrplSanitizers.cmake +++ b/cmake/XrplSanitizers.cmake @@ -43,7 +43,10 @@ include(CompilationEnv) # Read environment variable -set(SANITIZERS $ENV{SANITIZERS}) +set(SANITIZERS "") +if(DEFINED ENV{SANITIZERS}) + set(SANITIZERS "$ENV{SANITIZERS}") +endif() # Set SANITIZERS_ENABLED flag for use in other modules if(SANITIZERS MATCHES "address|thread|undefinedbehavior") diff --git a/cmake/XrplSettings.cmake b/cmake/XrplSettings.cmake index 3e9e127fd4..c85341c6b8 100644 --- a/cmake/XrplSettings.cmake +++ b/cmake/XrplSettings.cmake @@ -4,10 +4,11 @@ include(CompilationEnv) -if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true") - set(is_ci TRUE) -else() - set(is_ci FALSE) +set(is_ci FALSE) +if(DEFINED ENV{CI}) + if("$ENV{CI}" STREQUAL "true") + set(is_ci TRUE) + endif() endif() get_directory_property(has_parent PARENT_DIRECTORY) diff --git a/cmake/deps/Boost.cmake b/cmake/deps/Boost.cmake index b73698efd8..49025ae342 100644 --- a/cmake/deps/Boost.cmake +++ b/cmake/deps/Boost.cmake @@ -30,7 +30,6 @@ target_link_libraries(xrpl_boost Boost::process Boost::program_options Boost::regex - Boost::system Boost::thread) if(Boost_COMPILER) target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking) diff --git a/conan.lock b/conan.lock index f3fa1e5f6e..64def2e916 100644 --- a/conan.lock +++ b/conan.lock @@ -11,7 +11,7 @@ "re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103", "protobuf/6.32.1#f481fd276fc23a33b85a3ed1e898b693%1765850161.038", "openssl/3.5.4#1b986e61b38fdfda3b40bebc1b234393%1768312656.257", - "nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957", + "nudb/2.0.9#0432758a24204da08fee953ec9ea03cb%1769436073.32", "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914", "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492", "libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03", @@ -23,7 +23,7 @@ "date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772", "c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336", "bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837", - "boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1765850172.862", + "boost/1.90.0#d5e8defe7355494953be18524a7f135b%1765955095.179", "abseil/20250127.0#99262a368bd01c0ccca8790dfced9719%1766517936.993" ], "build_requires": [ @@ -42,18 +42,22 @@ ], "python_requires": [], "overrides": { + "boost/1.90.0#d5e8defe7355494953be18524a7f135b": [ + null, + "boost/1.90.0" + ], "protobuf/5.27.0": [ "protobuf/6.32.1" ], "lz4/1.9.4": [ "lz4/1.10.0" ], - "boost/1.83.0": [ - "boost/1.88.0" - ], "sqlite3/3.44.2": [ "sqlite3/3.49.1" ], + "boost/1.83.0": [ + "boost/1.90.0" + ], "lz4/[>=1.9.4 <2]": [ "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504" ] diff --git a/conanfile.py b/conanfile.py index 8501909ce3..84ef1e2aa7 100644 --- a/conanfile.py +++ b/conanfile.py @@ -131,7 +131,7 @@ class Xrpl(ConanFile): transitive_headers_opt = ( {"transitive_headers": True} if conan_version.split(".")[0] == "2" else {} ) - self.requires("boost/1.88.0", force=True, **transitive_headers_opt) + self.requires("boost/1.90.0", force=True, **transitive_headers_opt) self.requires("date/3.0.4", **transitive_headers_opt) self.requires("lz4/1.10.0", force=True) self.requires("protobuf/6.32.1", force=True) diff --git a/include/xrpl/basics/Number.h b/include/xrpl/basics/Number.h index d1ef749784..2f467fb036 100644 --- a/include/xrpl/basics/Number.h +++ b/include/xrpl/basics/Number.h @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/include/xrpl/protocol/STObject.h b/include/xrpl/protocol/STObject.h index c6ca133e89..d282e9bf01 100644 --- a/include/xrpl/protocol/STObject.h +++ b/include/xrpl/protocol/STObject.h @@ -397,6 +397,9 @@ public: void delField(int index); + SOEStyle + getStyle(SField const& field) const; + bool hasMatchingEntry(STBase const&); diff --git a/src/libxrpl/beast/core/CurrentThreadName.cpp b/src/libxrpl/beast/core/CurrentThreadName.cpp index e8f7b629a7..34e8603afa 100644 --- a/src/libxrpl/beast/core/CurrentThreadName.cpp +++ b/src/libxrpl/beast/core/CurrentThreadName.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -95,6 +96,11 @@ setCurrentThreadNameImpl(std::string_view name) std::cerr << "WARNING: Thread name \"" << name << "\" (length " << name.size() << ") exceeds maximum of " << maxThreadNameLength << " characters on Linux.\n"; + + XRPL_ASSERT( + false, + "beast::detail::setCurrentThreadNameImpl : Thread name exceeds " + "maximum length for Linux"); } #endif } diff --git a/src/libxrpl/core/detail/Job.cpp b/src/libxrpl/core/detail/Job.cpp index 3968c3ede4..358441010b 100644 --- a/src/libxrpl/core/detail/Job.cpp +++ b/src/libxrpl/core/detail/Job.cpp @@ -41,7 +41,7 @@ Job::queue_time() const void Job::doJob() { - beast::setCurrentThreadName("doJob: " + mName); + beast::setCurrentThreadName("j:" + mName); m_loadEvent->start(); m_loadEvent->setName(mName); diff --git a/src/libxrpl/protocol/STAmount.cpp b/src/libxrpl/protocol/STAmount.cpp index ec60971e63..91ad029dda 100644 --- a/src/libxrpl/protocol/STAmount.cpp +++ b/src/libxrpl/protocol/STAmount.cpp @@ -100,6 +100,8 @@ areComparable(STAmount const& v1, STAmount const& v2) return false; } +static_assert(INITIAL_XRP.drops() == STAmount::cMaxNativeN); + STAmount::STAmount(SerialIter& sit, SField const& name) : STBase(name) { std::uint64_t value = sit.get64(); diff --git a/src/libxrpl/protocol/STObject.cpp b/src/libxrpl/protocol/STObject.cpp index 6007753ed1..a45eb4b7c7 100644 --- a/src/libxrpl/protocol/STObject.cpp +++ b/src/libxrpl/protocol/STObject.cpp @@ -580,6 +580,12 @@ STObject::delField(int index) v_.erase(v_.begin() + index); } +SOEStyle +STObject::getStyle(SField const& field) const +{ + return mType ? mType->style(field) : soeINVALID; +} + unsigned char STObject::getFieldU8(SField const& field) const { diff --git a/src/libxrpl/protocol/STTakesAsset.cpp b/src/libxrpl/protocol/STTakesAsset.cpp index d43e7b04a1..9167c6ace0 100644 --- a/src/libxrpl/protocol/STTakesAsset.cpp +++ b/src/libxrpl/protocol/STTakesAsset.cpp @@ -18,10 +18,28 @@ associateAsset(SLE& sle, Asset const& asset) // If the field is not set or present, skip it. if (type == STI_NOTPRESENT) continue; + // If the type doesn't downcast, then the flag shouldn't be on the // SField auto& ta = entry.downcast(); + auto const style = sle.getStyle(ta.getFName()); + XRPL_ASSERT_PARTS( + style != soeINVALID, + "xrpl::associateAsset", + "valid template element style"); + + XRPL_ASSERT_PARTS( + style != soeDEFAULT || !ta.isDefault(), + "xrpl::associateAsset", + "non-default value"); ta.associateAsset(asset); + + // associateAsset in derived classes may change the underlying + // value, but it won't know anything about how the value relates to + // the SLE. If the template element is soeDEFAULT, and the value + // changed to the default value, remove the field. + if (style == soeDEFAULT && ta.isDefault()) + sle.makeFieldAbsent(field); } } } diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 41a4fc2b3b..d8bfa71d46 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -5782,6 +5782,263 @@ class Vault_test : public beast::unit_test::suite testCase(MPT, "MPT", owner, depositor, issuer); } + void + testAssetsMaximum() + { + testcase("Assets Maximum"); + + using namespace test::jtx; + + Env env{*this, testable_amendments() | featureSingleAssetVault}; + Account const owner{"owner"}; + Account const issuer{"issuer"}; + + Vault vault{env}; + env.fund(XRP(1'000'000), issuer, owner); + env.close(); + + auto const maxInt64 = + std::to_string(std::numeric_limits::max()); + BEAST_EXPECT(maxInt64 == "9223372036854775807"); + + // Naming things is hard + auto const maxInt64Plus1 = std::to_string( + static_cast( + std::numeric_limits::max()) + + 1); + BEAST_EXPECT(maxInt64Plus1 == "9223372036854775808"); + + auto const initialXRP = to_string(INITIAL_XRP); + BEAST_EXPECT(initialXRP == "100000000000000000"); + + auto const initialXRPPlus1 = to_string(INITIAL_XRP + 1); + BEAST_EXPECT(initialXRPPlus1 == "100000000000000001"); + + { + testcase("Assets Maximum: XRP"); + + PrettyAsset const xrpAsset = xrpIssue(); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = xrpAsset}); + tx[sfData] = "4D65746144617461"; + + tx[sfAssetsMaximum] = maxInt64; + env(tx, ter(tefEXCEPTION), THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRPPlus1; + env(tx, ter(tefEXCEPTION), THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRP; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = maxInt64Plus1; + env(tx, ter(tefEXCEPTION), THISLINE); + env.close(); + + // This value will be rounded + auto const insertAt = maxInt64Plus1.size() - 3; + auto const decimalTest = maxInt64Plus1.substr(0, insertAt) + "." + + maxInt64Plus1.substr(insertAt); // (max int64+1) / 1000 + BEAST_EXPECT(decimalTest == "9223372036854775.808"); + tx[sfAssetsMaximum] = decimalTest; + auto const newKeylet = keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT(vaultSle->at(sfAssetsMaximum) == 9223372036854776); + } + + { + testcase("Assets Maximum: MPT"); + + PrettyAsset const mptAsset = [&]() { + MPTTester mptt{env, issuer, mptInitNoFund}; + mptt.create( + {.flags = + tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock}); + env.close(); + PrettyAsset const mptAsset = mptt["MPT"]; + mptt.authorize({.account = owner}); + env.close(); + return mptAsset; + }(); + + env(pay(issuer, owner, mptAsset(100'000)), THISLINE); + env.close(); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = mptAsset}); + tx[sfData] = "4D65746144617461"; + + tx[sfAssetsMaximum] = maxInt64; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRPPlus1; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRP; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = maxInt64Plus1; + env(tx, ter(tefEXCEPTION), THISLINE); + env.close(); + + // This value will be rounded + auto const insertAt = maxInt64Plus1.size() - 1; + auto const decimalTest = maxInt64Plus1.substr(0, insertAt) + "." + + maxInt64Plus1.substr(insertAt); // (max int64+1) / 10 + BEAST_EXPECT(decimalTest == "922337203685477580.8"); + tx[sfAssetsMaximum] = decimalTest; + auto const newKeylet = keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT(vaultSle->at(sfAssetsMaximum) == 922337203685477581); + } + + { + testcase("Assets Maximum: IOU"); + + // Almost anything goes with IOUs + PrettyAsset iouAsset = issuer["IOU"]; + env.trust(iouAsset(1000), owner); + env(pay(issuer, owner, iouAsset(200))); + env.close(); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = iouAsset}); + tx[sfData] = "4D65746144617461"; + + tx[sfAssetsMaximum] = maxInt64; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRPPlus1; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = initialXRP; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = maxInt64Plus1; + env(tx, THISLINE); + env.close(); + + tx[sfAssetsMaximum] = "1000000000000000e80"; + env.close(); + + tx[sfAssetsMaximum] = "1000000000000000e-96"; + env.close(); + + // These values will be rounded to 15 significant digits + { + auto const insertAt = maxInt64Plus1.size() - 1; + auto const decimalTest = maxInt64Plus1.substr(0, insertAt) + + "." + maxInt64Plus1.substr(insertAt); // (max int64+1) / 10 + BEAST_EXPECT(decimalTest == "922337203685477580.8"); + tx[sfAssetsMaximum] = decimalTest; + auto const newKeylet = + keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT( + (vaultSle->at(sfAssetsMaximum) == + Number{9223372036854776, 2, Number::normalized{}})); + } + { + tx[sfAssetsMaximum] = + "9223372036854775807e40"; // max int64 * 10^40 + auto const newKeylet = + keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT( + (vaultSle->at(sfAssetsMaximum) == + Number{9223372036854776, 43, Number::normalized{}})); + } + { + tx[sfAssetsMaximum] = + "9223372036854775807e-40"; // max int64 * 10^-40 + auto const newKeylet = + keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT( + (vaultSle->at(sfAssetsMaximum) == + Number{9223372036854776, -37, Number::normalized{}})); + } + { + tx[sfAssetsMaximum] = + "9223372036854775807e-100"; // max int64 * 10^-100 + auto const newKeylet = + keylet::vault(owner.id(), env.seq(owner)); + env(tx, THISLINE); + env.close(); + + // Field 'AssetsMaximum' may not be explicitly set to default. + auto const vaultSle = env.le(newKeylet); + if (!BEAST_EXPECT(vaultSle)) + return; + + BEAST_EXPECT(vaultSle->at(sfAssetsMaximum) == numZero); + } + + // What _can't_ IOUs do? + // 1. Exceed maximum exponent / offset + tx[sfAssetsMaximum] = "1000000000000000e81"; + env(tx, ter(tefEXCEPTION), THISLINE); + env.close(); + + // 2. Mantissa larger than uint64 max + try + { + tx[sfAssetsMaximum] = + "18446744073709551617e5"; // uint64 max + 1 + env(tx, THISLINE); + BEAST_EXPECT(false); + } + catch (parse_error const& e) + { + using namespace std::string_literals; + BEAST_EXPECT( + e.what() == + "invalidParamsField 'tx_json.AssetsMaximum' has invalid " + "data."s); + } + } + } + public: void run() override @@ -5802,6 +6059,7 @@ public: testDelegate(); testVaultClawbackBurnShares(); testVaultClawbackAssets(); + testAssetsMaximum(); } }; diff --git a/src/test/beast/beast_CurrentThreadName_test.cpp b/src/test/beast/beast_CurrentThreadName_test.cpp index dc12883a63..918d426979 100644 --- a/src/test/beast/beast_CurrentThreadName_test.cpp +++ b/src/test/beast/beast_CurrentThreadName_test.cpp @@ -88,20 +88,15 @@ public: BEAST_EXPECT(stateB == 2); } #if BOOST_OS_LINUX - // On Linux, verify that thread names longer than 15 characters - // are truncated to 15 characters (the 16th character is reserved for - // the null terminator). + // On Linux, verify that thread names within the 15 character limit + // are set correctly (the 16th character is reserved for the null + // terminator). { testName( "123456789012345", - "123456789012345"); // 15 chars, no truncation - testName( - "1234567890123456", "123456789012345"); // 16 chars, truncated - testName( - "ThisIsAVeryLongThreadNameExceedingLimit", - "ThisIsAVeryLong"); // 39 chars, truncated + "123456789012345"); // 15 chars, maximum allowed testName("", ""); // empty name - testName("short", "short"); // short name, no truncation + testName("short", "short"); // short name } #endif } diff --git a/src/test/core/Coroutine_test.cpp b/src/test/core/Coroutine_test.cpp index 72a4c02434..761fd46915 100644 --- a/src/test/core/Coroutine_test.cpp +++ b/src/test/core/Coroutine_test.cpp @@ -56,7 +56,7 @@ public: gate g1, g2; std::shared_ptr c; env.app().getJobQueue().postCoro( - jtCLIENT, "Coroutine-Test", [&](auto const& cr) { + jtCLIENT, "CoroTest", [&](auto const& cr) { c = cr; g1.signal(); c->yield(); @@ -83,7 +83,7 @@ public: gate g; env.app().getJobQueue().postCoro( - jtCLIENT, "Coroutine-Test", [&](auto const& c) { + jtCLIENT, "CoroTest", [&](auto const& c) { c->post(); c->yield(); g.signal(); @@ -109,7 +109,7 @@ public: BEAST_EXPECT(*lv == -1); gate g; - jq.addJob(jtCLIENT, "LocalValue-Test", [&]() { + jq.addJob(jtCLIENT, "LocalValTest", [&]() { this->BEAST_EXPECT(*lv == -1); *lv = -2; this->BEAST_EXPECT(*lv == -2); @@ -120,7 +120,7 @@ public: for (int i = 0; i < N; ++i) { - jq.postCoro(jtCLIENT, "Coroutine-Test", [&, id = i](auto const& c) { + jq.postCoro(jtCLIENT, "CoroTest", [&, id = i](auto const& c) { a[id] = c; g.signal(); c->yield(); @@ -148,7 +148,7 @@ public: c->join(); } - jq.addJob(jtCLIENT, "LocalValue-Test", [&]() { + jq.addJob(jtCLIENT, "LocalValTest", [&]() { this->BEAST_EXPECT(*lv == -2); g.signal(); }); diff --git a/src/test/jtx/TestHelpers.h b/src/test/jtx/TestHelpers.h index 3096a902b1..8149c2d464 100644 --- a/src/test/jtx/TestHelpers.h +++ b/src/test/jtx/TestHelpers.h @@ -15,15 +15,8 @@ #include #include -#include - -#if (defined(__clang_major__) && __clang_major__ < 15) -#include -using source_location = std::experimental::source_location; -#else #include -using std::source_location; -#endif +#include namespace xrpl { namespace test { @@ -640,7 +633,7 @@ checkMetrics( std::size_t expectedPerLedger, std::uint64_t expectedMinFeeLevel = baseFeeLevel.fee(), std::uint64_t expectedMedFeeLevel = minEscalationFeeLevel.fee(), - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { int line = location.line(); char const* file = location.file_name(); diff --git a/src/test/overlay/TMGetObjectByHash_test.cpp b/src/test/overlay/TMGetObjectByHash_test.cpp new file mode 100644 index 0000000000..71a485416d --- /dev/null +++ b/src/test/overlay/TMGetObjectByHash_test.cpp @@ -0,0 +1,211 @@ +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace xrpl { +namespace test { + +using namespace jtx; + +/** + * Test for TMGetObjectByHash reply size limiting. + * + * This verifies the fix that limits TMGetObjectByHash replies to + * Tuning::hardMaxReplyNodes to prevent excessive memory usage and + * potential DoS attacks from peers requesting large numbers of objects. + */ +class TMGetObjectByHash_test : public beast::unit_test::suite +{ + using middle_type = boost::beast::tcp_stream; + using stream_type = boost::beast::ssl_stream; + using socket_type = boost::asio::ip::tcp::socket; + using shared_context = std::shared_ptr; + /** + * Test peer that captures sent messages for verification. + */ + class PeerTest : public PeerImp + { + public: + PeerTest( + Application& app, + std::shared_ptr const& slot, + http_request_type&& request, + PublicKey const& publicKey, + ProtocolVersion protocol, + Resource::Consumer consumer, + std::unique_ptr&& stream_ptr, + OverlayImpl& overlay) + : PeerImp( + app, + id_++, + slot, + std::move(request), + publicKey, + protocol, + consumer, + std::move(stream_ptr), + overlay) + { + } + + ~PeerTest() = default; + + void + run() override + { + } + + void + send(std::shared_ptr const& m) override + { + lastSentMessage_ = m; + } + + std::shared_ptr + getLastSentMessage() const + { + return lastSentMessage_; + } + + static void + resetId() + { + id_ = 0; + } + + private: + inline static Peer::id_t id_ = 0; + std::shared_ptr lastSentMessage_; + }; + + shared_context context_{make_SSLContext("")}; + ProtocolVersion protocolVersion_{1, 7}; + + std::shared_ptr + createPeer(jtx::Env& env) + { + auto& overlay = dynamic_cast(env.app().overlay()); + boost::beast::http::request request; + auto stream_ptr = std::make_unique( + socket_type(env.app().getIOContext()), *context_); + + beast::IP::Endpoint local( + boost::asio::ip::make_address("172.1.1.1"), 51235); + beast::IP::Endpoint remote( + boost::asio::ip::make_address("172.1.1.2"), 51235); + + PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519))); + auto consumer = overlay.resourceManager().newInboundEndpoint(remote); + auto [slot, _] = overlay.peerFinder().new_inbound_slot(local, remote); + + auto peer = std::make_shared( + env.app(), + slot, + std::move(request), + key, + protocolVersion_, + consumer, + std::move(stream_ptr), + overlay); + + overlay.add_active(peer); + return peer; + } + + std::shared_ptr + createRequest(size_t const numObjects, Env& env) + { + // Store objects in the NodeStore that will be found during the query + auto& nodeStore = env.app().getNodeStore(); + + // Create and store objects + std::vector hashes; + hashes.reserve(numObjects); + for (int i = 0; i < numObjects; ++i) + { + uint256 hash(xrpl::sha512Half(i)); + hashes.push_back(hash); + + Blob data(100, static_cast(i % 256)); + nodeStore.store( + hotLEDGER, + std::move(data), + hash, + nodeStore.earliestLedgerSeq()); + } + + // Create a request with more objects than hardMaxReplyNodes + auto request = std::make_shared(); + request->set_type(protocol::TMGetObjectByHash_ObjectType_otLEDGER); + request->set_query(true); + + for (int i = 0; i < numObjects; ++i) + { + auto object = request->add_objects(); + object->set_hash(hashes[i].data(), hashes[i].size()); + object->set_ledgerseq(i); + } + return request; + } + + /** + * Test that reply is limited to hardMaxReplyNodes when more objects + * are requested than the limit allows. + */ + void + testReplyLimit(size_t const numObjects, int const expectedReplySize) + { + testcase("Reply Limit"); + + Env env(*this); + PeerTest::resetId(); + + auto peer = createPeer(env); + + auto request = createRequest(numObjects, env); + // Call the onMessage handler + peer->onMessage(request); + + // Verify that a reply was sent + auto sentMessage = peer->getLastSentMessage(); + BEAST_EXPECT(sentMessage != nullptr); + + // Parse the reply message + auto const& buffer = + sentMessage->getBuffer(compression::Compressed::Off); + + BEAST_EXPECT(buffer.size() > 6); + // Skip the message header (6 bytes: 4 for size, 2 for type) + protocol::TMGetObjectByHash reply; + BEAST_EXPECT( + reply.ParseFromArray(buffer.data() + 6, buffer.size() - 6) == true); + + // Verify the reply is limited to expectedReplySize + BEAST_EXPECT(reply.objects_size() == expectedReplySize); + } + + void + run() override + { + int const limit = static_cast(Tuning::hardMaxReplyNodes); + testReplyLimit(limit + 1, limit); + testReplyLimit(limit, limit); + testReplyLimit(limit - 1, limit - 1); + } +}; + +BEAST_DEFINE_TESTSUITE(TMGetObjectByHash, overlay, xrpl); + +} // namespace test +} // namespace xrpl diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index 15c65437d8..1133ee8208 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -5,6 +5,8 @@ #include #include +#include + #include #include #include @@ -12,13 +14,7 @@ #include #include -#if (defined(__clang_major__) && __clang_major__ < 15) -#include -using source_location = std::experimental::source_location; -#else #include -using std::source_location; -#endif namespace xrpl { namespace test { @@ -30,6 +26,7 @@ enum class FieldType { CurrencyField, HashField, HashOrObjectField, + FixedHashField, IssueField, ObjectField, StringField, @@ -86,6 +83,7 @@ getTypeName(FieldType typeID) case FieldType::CurrencyField: return "Currency"; case FieldType::HashField: + case FieldType::FixedHashField: return "hex string"; case FieldType::HashOrObjectField: return "hex string or object"; @@ -110,7 +108,7 @@ class LedgerEntry_test : public beast::unit_test::suite Json::Value const& jv, std::string const& err, std::string const& msg, - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { if (BEAST_EXPECT(jv.isMember(jss::status))) BEAST_EXPECTS( @@ -202,6 +200,7 @@ class LedgerEntry_test : public beast::unit_test::suite static auto const& badBlobValues = remove({3, 7, 8, 16}); static auto const& badCurrencyValues = remove({14}); static auto const& badHashValues = remove({2, 3, 7, 8, 16}); + static auto const& badFixedHashValues = remove({1, 2, 3, 4, 7, 8, 16}); static auto const& badIndexValues = remove({12, 16, 18, 19}); static auto const& badUInt32Values = remove({2, 3}); static auto const& badUInt64Values = remove({2, 3}); @@ -222,6 +221,8 @@ class LedgerEntry_test : public beast::unit_test::suite return badHashValues; case FieldType::HashOrObjectField: return badIndexValues; + case FieldType::FixedHashField: + return badFixedHashValues; case FieldType::IssueField: return badIssueValues; case FieldType::UInt32Field: @@ -290,7 +291,7 @@ class LedgerEntry_test : public beast::unit_test::suite FieldType const typeID, std::string const& expectedError, bool required = true, - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { forAllApiVersions([&, this](unsigned apiVersion) { if (required) @@ -343,7 +344,7 @@ class LedgerEntry_test : public beast::unit_test::suite FieldType typeID, std::string const& expectedError, bool required = true, - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { forAllApiVersions([&, this](unsigned apiVersion) { if (required) @@ -400,7 +401,7 @@ class LedgerEntry_test : public beast::unit_test::suite runLedgerEntryTest( test::jtx::Env& env, Json::StaticString const& parentField, - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { testMalformedField( env, @@ -424,7 +425,7 @@ class LedgerEntry_test : public beast::unit_test::suite test::jtx::Env& env, Json::StaticString const& parentField, std::vector const& subfields, - source_location const location = source_location::current()) + std::source_location const location = std::source_location::current()) { testMalformedField( env, @@ -715,7 +716,12 @@ class LedgerEntry_test : public beast::unit_test::suite } // negative tests - runLedgerEntryTest(env, jss::amendments); + testMalformedField( + env, + Json::Value{}, + jss::amendments, + FieldType::FixedHashField, + "malformedRequest"); } void @@ -1536,7 +1542,12 @@ class LedgerEntry_test : public beast::unit_test::suite } // negative tests - runLedgerEntryTest(env, jss::fee); + testMalformedField( + env, + Json::Value{}, + jss::fee, + FieldType::FixedHashField, + "malformedRequest"); } void @@ -1559,7 +1570,12 @@ class LedgerEntry_test : public beast::unit_test::suite } // negative tests - runLedgerEntryTest(env, jss::hashes); + testMalformedField( + env, + Json::Value{}, + jss::hashes, + FieldType::FixedHashField, + "malformedRequest"); } void @@ -1684,7 +1700,12 @@ class LedgerEntry_test : public beast::unit_test::suite } // negative tests - runLedgerEntryTest(env, jss::nunl); + testMalformedField( + env, + Json::Value{}, + jss::nunl, + FieldType::FixedHashField, + "malformedRequest"); } void @@ -2341,6 +2362,438 @@ class LedgerEntry_test : public beast::unit_test::suite } } + /// Test the ledger entry types that don't take parameters + void + testFixed() + { + using namespace test::jtx; + + Account const alice{"alice"}; + Account const bob{"bob"}; + + Env env{*this, envconfig([](auto cfg) { + cfg->START_UP = Config::FRESH; + return cfg; + })}; + + env.close(); + + /** Verifies that the RPC result has the expected data + * + * @param good: Indicates that the request should have succeeded + * and returned a ledger object of `expectedType` type. + * @param jv: The RPC result Json value + * @param expectedType: The type that the ledger object should + * have if "good". + * @param expectedError: Optional. The expected error if not + * good. Defaults to "entryNotFound". + */ + auto checkResult = + [&](bool good, + Json::Value const& jv, + Json::StaticString const& expectedType, + std::optional const& expectedError = {}) { + if (good) + { + BEAST_EXPECTS( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node] + [sfLedgerEntryType.jsonName] == expectedType, + to_string(jv)); + } + else + { + BEAST_EXPECTS( + jv.isObject() && jv.isMember(jss::result) && + jv[jss::result].isMember(jss::error) && + !jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::error] == + expectedError.value_or("entryNotFound"), + to_string(jv)); + } + }; + + /** Runs a series of tests for a given fixed-position ledger + * entry. + * + * @param field: The Json request field to use. + * @param expectedType: The type that the ledger object should + * have if "good". + * @param expectedKey: The keylet of the fixed object. + * @param good: Indicates whether the object is expected to + * exist. + */ + auto test = [&](Json::StaticString const& field, + Json::StaticString const& expectedType, + Keylet const& expectedKey, + bool good) { + testcase << expectedType.c_str() << (good ? "" : " not") + << " found"; + + auto const hexKey = strHex(expectedKey.key); + + { + // Test bad values + // "field":null + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[field] = Json::nullValue; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "field":"string" + params[jss::ledger_index] = jss::validated; + params[field] = "arbitrary string"; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "field":false + params[jss::ledger_index] = jss::validated; + params[field] = false; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "invalidParams"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + + // "field":[incorrect index hash] + auto const badKey = strHex(expectedKey.key + uint256{1}); + params[jss::ledger_index] = jss::validated; + params[field] = badKey; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "entryNotFound"); + BEAST_EXPECTS( + jv[jss::result][jss::index] == badKey, to_string(jv)); + } + + { + Json::Value params; + // "index":"field" using API 2 + params[jss::ledger_index] = jss::validated; + params[jss::index] = field; + params[jss::api_version] = 2; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + std::string const pdIdx = [&]() { + { + Json::Value params; + // Test good values + // Use the "field":true notation + params[jss::ledger_index] = jss::validated; + params[field] = true; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + // Index will always be returned for valid parameters. + std::string const pdIdx = + jv[jss::result][jss::index].asString(); + BEAST_EXPECTS(hexKey == pdIdx, to_string(jv)); + checkResult(good, jv, expectedType); + + return pdIdx; + } + }(); + + { + Json::Value params; + // "field":"[index hash]" + params[jss::ledger_index] = jss::validated; + params[field] = hexKey; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(good, jv, expectedType); + BEAST_EXPECT(jv[jss::result][jss::index].asString() == hexKey); + } + + { + // Bad value + // Use the "index":"field" notation with API 2 + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::index] = field; + params[jss::api_version] = 2; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, expectedType, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // Use the "index":"field" notation with API 3 + params[jss::ledger_index] = jss::validated; + params[jss::index] = field; + params[jss::api_version] = 3; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + // Index is correct either way + BEAST_EXPECT(jv[jss::result][jss::index].asString() == hexKey); + checkResult(good, jv, expectedType); + } + + { + Json::Value params; + // Use the "index":"[index hash]" notation + params[jss::ledger_index] = jss::validated; + params[jss::index] = pdIdx; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + // Index is correct either way + BEAST_EXPECT(jv[jss::result][jss::index].asString() == hexKey); + checkResult(good, jv, expectedType); + } + }; + + test(jss::amendments, jss::Amendments, keylet::amendments(), true); + test(jss::fee, jss::FeeSettings, keylet::fees(), true); + // There won't be an nunl + test(jss::nunl, jss::NegativeUNL, keylet::negativeUNL(), false); + // Can only get the short skip list this way + test(jss::hashes, jss::LedgerHashes, keylet::skip(), true); + } + + void + testHashes() + { + using namespace test::jtx; + + Account const alice{"alice"}; + Account const bob{"bob"}; + + Env env{*this, envconfig([](auto cfg) { + cfg->START_UP = Config::FRESH; + return cfg; + })}; + + env.close(); + + /** Verifies that the RPC result has the expected data + * + * @param good: Indicates that the request should have succeeded + * and returned a ledger object of `expectedType` type. + * @param jv: The RPC result Json value + * @param expectedCount: The number of Hashes expected in the + * object if "good". + * @param expectedError: Optional. The expected error if not + * good. Defaults to "entryNotFound". + */ + auto checkResult = + [&](bool good, + Json::Value const& jv, + int expectedCount, + std::optional const& expectedError = {}) { + if (good) + { + BEAST_EXPECTS( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node] + [sfLedgerEntryType.jsonName] == jss::LedgerHashes, + to_string(jv)); + BEAST_EXPECTS( + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember("Hashes") && + jv[jss::result][jss::node]["Hashes"].size() == + expectedCount, + to_string(jv[jss::result][jss::node]["Hashes"].size())); + } + else + { + BEAST_EXPECTS( + jv.isObject() && jv.isMember(jss::result) && + jv[jss::result].isMember(jss::error) && + !jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::error] == + expectedError.value_or("entryNotFound"), + to_string(jv)); + } + }; + + /** Runs a series of tests for a given ledger index. + * + * @param ledger: The ledger index value of the "hashes" request + * parameter. May not necessarily be a number. + * @param expectedKey: The expected keylet of the object. + * @param good: Indicates whether the object is expected to + * exist. + * @param expectedCount: The number of Hashes expected in the + * object if "good". + */ + auto test = [&](Json::Value ledger, + Keylet const& expectedKey, + bool good, + int expectedCount = 0) { + testcase << "LedgerHashes: seq: " << env.current()->header().seq + << " \"hashes\":" << to_string(ledger) + << (good ? "" : " not") << " found"; + + auto const hexKey = strHex(expectedKey.key); + + { + // Test bad values + // "hashes":null + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = Json::nullValue; + auto jv = env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "hashes":"non-uint string" + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = "arbitrary string"; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "hashes":"uint string" is invalid, too + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = "10"; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "malformedRequest"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "hashes":false + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = false; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "invalidParams"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + { + Json::Value params; + // "hashes":-1 + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = -1; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "internal"); + BEAST_EXPECT(!jv[jss::result].isMember(jss::index)); + } + + // "hashes":[incorrect index hash] + { + Json::Value params; + auto const badKey = strHex(expectedKey.key + uint256{1}); + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = badKey; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(false, jv, 0, "entryNotFound"); + BEAST_EXPECT(jv[jss::result][jss::index] == badKey); + } + + { + Json::Value params; + // Test good values + // Use the "hashes":ledger notation + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = ledger; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(good, jv, expectedCount); + // Index will always be returned for valid parameters. + std::string const pdIdx = + jv[jss::result][jss::index].asString(); + BEAST_EXPECTS(hexKey == pdIdx, strHex(pdIdx)); + } + + { + Json::Value params; + // "hashes":"[index hash]" + params[jss::ledger_index] = jss::validated; + params[jss::hashes] = hexKey; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(good, jv, expectedCount); + // Index is correct either way + BEAST_EXPECTS( + hexKey == jv[jss::result][jss::index].asString(), + strHex(jv[jss::result][jss::index].asString())); + } + + { + Json::Value params; + // Use the "index":"[index hash]" notation + params[jss::ledger_index] = jss::validated; + params[jss::index] = hexKey; + auto const jv = + env.rpc("json", "ledger_entry", to_string(params)); + checkResult(good, jv, expectedCount); + // Index is correct either way + BEAST_EXPECTS( + hexKey == jv[jss::result][jss::index].asString(), + strHex(jv[jss::result][jss::index].asString())); + } + }; + + // short skip list + test(true, keylet::skip(), true, 2); + // long skip list at index 0 + test(1, keylet::skip(1), false); + // long skip list at index 1 + test(1 << 17, keylet::skip(1 << 17), false); + + // Close more ledgers, but stop short of the flag ledger + for (auto i = env.current()->seq(); i <= 250; ++i) + env.close(); + + // short skip list + test(true, keylet::skip(), true, 249); + // long skip list at index 0 + test(1, keylet::skip(1), false); + // long skip list at index 1 + test(1 << 17, keylet::skip(1 << 17), false); + + // Close a flag ledger so the first "long" skip list is created + for (auto i = env.current()->seq(); i <= 260; ++i) + env.close(); + + // short skip list + test(true, keylet::skip(), true, 256); + // long skip list at index 0 + test(1, keylet::skip(1), true, 1); + // long skip list at index 1 + test(1 << 17, keylet::skip(1 << 17), false); + } + void testCLI() { @@ -2398,6 +2851,8 @@ public: testOracleLedgerEntry(); testMPT(); testPermissionedDomain(); + testFixed(); + testHashes(); testCLI(); } }; diff --git a/src/tests/libxrpl/CMakeLists.txt b/src/tests/libxrpl/CMakeLists.txt index 74dc184700..72d7d0fa92 100644 --- a/src/tests/libxrpl/CMakeLists.txt +++ b/src/tests/libxrpl/CMakeLists.txt @@ -6,9 +6,19 @@ find_package(GTest REQUIRED) # Custom target for all tests defined in this file add_custom_target(xrpl.tests) +# Test helpers +add_library(xrpl.helpers.test STATIC) +target_sources(xrpl.helpers.test PRIVATE + helpers/TestSink.cpp +) +target_include_directories(xrpl.helpers.test PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR} +) +target_link_libraries(xrpl.helpers.test PRIVATE xrpl.libxrpl) + # Common library dependencies for the rest of the tests. add_library(xrpl.imports.test INTERFACE) -target_link_libraries(xrpl.imports.test INTERFACE gtest::gtest xrpl.libxrpl) +target_link_libraries(xrpl.imports.test INTERFACE gtest::gtest xrpl.libxrpl xrpl.helpers.test) # One test for each module. xrpl_add_test(basics) diff --git a/src/tests/libxrpl/helpers/TestSink.cpp b/src/tests/libxrpl/helpers/TestSink.cpp new file mode 100644 index 0000000000..a71e9f7cc0 --- /dev/null +++ b/src/tests/libxrpl/helpers/TestSink.cpp @@ -0,0 +1,127 @@ +#include + +#include + +#include // for getenv + +#if BOOST_OS_WINDOWS +#include // for _isatty, _fileno +#include // for stdout +#else +#include // for isatty, STDOUT_FILENO +#endif + +#include + +namespace xrpl { + +TestSink::TestSink(beast::severities::Severity threshold) + : Sink(threshold, false) +{ +} + +void +TestSink::write(beast::severities::Severity level, std::string const& text) +{ + if (level < threshold()) + return; + writeAlways(level, text); +} + +void +TestSink::writeAlways( + beast::severities::Severity level, + std::string const& text) +{ + auto supportsColor = [] { + // 1. Check for "NO_COLOR" environment variable (Standard convention) + if (std::getenv("NO_COLOR") != nullptr) + { + return false; + } + + // 2. Check for "CLICOLOR_FORCE" (Force color) + if (std::getenv("CLICOLOR_FORCE") != nullptr) + { + return true; + } + + // 3. Platform-specific check to see if stdout is a terminal +#if BOOST_OS_WINDOWS + // Windows: Check if the output handle is a character device + // _fileno(stdout) is usually 1 + // _isatty returns non-zero if the handle is a character device, 0 + // otherwise. + return _isatty(_fileno(stdout)) != 0; +#else + // Linux/macOS: Check if file descriptor 1 (stdout) is a TTY + // STDOUT_FILENO is 1 + // isatty returns 1 if the file descriptor is a TTY, 0 otherwise. + return isatty(STDOUT_FILENO) != 0; +#endif + }(); + + auto color = [level]() { + switch (level) + { + case beast::severities::kTrace: + return "\033[34m"; // blue + case beast::severities::kDebug: + return "\033[32m"; // green + case beast::severities::kInfo: + return "\033[36m"; // cyan + case beast::severities::kWarning: + return "\033[33m"; // yellow + case beast::severities::kError: + return "\033[31m"; // red + case beast::severities::kFatal: + default: + break; + } + return "\033[31m"; // red + }(); + + auto prefix = [level]() { + switch (level) + { + case beast::severities::kTrace: + return "TRC:"; + case beast::severities::kDebug: + return "DBG:"; + case beast::severities::kInfo: + return "INF:"; + case beast::severities::kWarning: + return "WRN:"; + case beast::severities::kError: + return "ERR:"; + case beast::severities::kFatal: + default: + break; + } + return "FTL:"; + }(); + + auto& stream = [level]() -> std::ostream& { + switch (level) + { + case beast::severities::kError: + case beast::severities::kFatal: + return std::cerr; + default: + return std::cout; + } + }(); + + constexpr auto reset = "\033[0m"; + + if (supportsColor) + { + stream << color << prefix << " " << text << reset << std::endl; + } + else + { + stream << prefix << " " << text << std::endl; + } +} + +} // namespace xrpl diff --git a/src/tests/libxrpl/helpers/TestSink.h b/src/tests/libxrpl/helpers/TestSink.h new file mode 100644 index 0000000000..fc3223b04b --- /dev/null +++ b/src/tests/libxrpl/helpers/TestSink.h @@ -0,0 +1,27 @@ +#ifndef XRPL_DEBUGSINK_H +#define XRPL_DEBUGSINK_H + +#include + +namespace xrpl { +class TestSink : public beast::Journal::Sink +{ +public: + static TestSink& + instance() + { + static TestSink sink{}; + return sink; + } + + TestSink(beast::severities::Severity threshold = beast::severities::kDebug); + + void + write(beast::severities::Severity level, std::string const& text) override; + + void + writeAlways(beast::severities::Severity level, std::string const& text) + override; +}; +} // namespace xrpl +#endif // XRPL_DEBUGSINK_H diff --git a/src/tests/libxrpl/net/HTTPClient.cpp b/src/tests/libxrpl/net/HTTPClient.cpp index cfd206edde..5ff0bfc336 100644 --- a/src/tests/libxrpl/net/HTTPClient.cpp +++ b/src/tests/libxrpl/net/HTTPClient.cpp @@ -2,15 +2,22 @@ #include #include +#include +#include #include +#include +#include #include #include #include #include +#include #include #include +#include +#include #include using namespace xrpl; @@ -24,16 +31,19 @@ private: boost::asio::io_context ioc_; boost::asio::ip::tcp::acceptor acceptor_; boost::asio::ip::tcp::endpoint endpoint_; - std::atomic running_{true}; + bool running_{true}; + bool finished_{false}; unsigned short port_; // Custom headers to return - std::map custom_headers_; - std::string response_body_; - unsigned int status_code_{200}; + std::map customHeaders_; + std::string responseBody_; + unsigned int statusCode_{200}; + + beast::Journal j_; public: - TestHTTPServer() : acceptor_(ioc_), port_(0) + TestHTTPServer() : acceptor_(ioc_), port_(0), j_(TestSink::instance()) { // Bind to any available port endpoint_ = {boost::asio::ip::tcp::v4(), 0}; @@ -45,12 +55,19 @@ public: // Get the actual port that was assigned port_ = acceptor_.local_endpoint().port(); - accept(); + // Start the accept coroutine + boost::asio::co_spawn(ioc_, accept(), boost::asio::detached); } + TestHTTPServer(TestHTTPServer&&) = delete; + TestHTTPServer& + operator=(TestHTTPServer&&) = delete; + ~TestHTTPServer() { - stop(); + XRPL_ASSERT( + finished(), + "xrpl::TestHTTPServer::~TestHTTPServer : accept future ready"); } boost::asio::io_context& @@ -68,22 +85,21 @@ public: void setHeader(std::string const& name, std::string const& value) { - custom_headers_[name] = value; + customHeaders_[name] = value; } void setResponseBody(std::string const& body) { - response_body_ = body; + responseBody_ = body; } void setStatusCode(unsigned int code) { - status_code_ = code; + statusCode_ = code; } -private: void stop() { @@ -91,78 +107,84 @@ private: acceptor_.close(); } - void - accept() + bool + finished() const { - if (!running_) - return; - - acceptor_.async_accept( - ioc_, - endpoint_, - [&](boost::system::error_code const& error, - boost::asio::ip::tcp::socket peer) { - if (!running_) - return; - - if (!error) - { - handleConnection(std::move(peer)); - } - }); + return finished_; } - void +private: + boost::asio::awaitable + accept() + { + while (running_) + { + try + { + auto socket = + co_await acceptor_.async_accept(boost::asio::use_awaitable); + + if (!running_) + break; + + // Handle this connection + co_await handleConnection(std::move(socket)); + } + catch (std::exception const& e) + { + // Accept or handle failed, stop accepting + JLOG(j_.debug()) << "Error: " << e.what(); + break; + } + } + + finished_ = true; + } + + boost::asio::awaitable handleConnection(boost::asio::ip::tcp::socket socket) { try { - // Read the HTTP request boost::beast::flat_buffer buffer; boost::beast::http::request req; - boost::beast::http::read(socket, buffer, req); + + // Read the HTTP request asynchronously + co_await boost::beast::http::async_read( + socket, buffer, req, boost::asio::use_awaitable); // Create response boost::beast::http::response res; res.version(req.version()); - res.result(status_code_); + res.result(statusCode_); res.set(boost::beast::http::field::server, "TestServer"); - // Add custom headers - for (auto const& [name, value] : custom_headers_) + // Set body and prepare payload first + res.body() = responseBody_; + res.prepare_payload(); + + // Override Content-Length with custom headers after + // prepare_payload. This allows us to test case-insensitive + // header parsing. + for (auto const& [name, value] : customHeaders_) { res.set(name, value); } - // Set body and prepare payload first - res.body() = response_body_; - res.prepare_payload(); - - // Override Content-Length with custom headers after prepare_payload - // This allows us to test case-insensitive header parsing - for (auto const& [name, value] : custom_headers_) - { - if (boost::iequals(name, "Content-Length")) - { - res.erase(boost::beast::http::field::content_length); - res.set(name, value); - } - } - - // Send response - boost::beast::http::write(socket, res); + // Send response asynchronously + co_await boost::beast::http::async_write( + socket, res, boost::asio::use_awaitable); // Shutdown socket gracefully - boost::system::error_code ec; - socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec); + boost::system::error_code shutdownEc; + socket.shutdown( + boost::asio::ip::tcp::socket::shutdown_send, shutdownEc); } - catch (std::exception const&) + catch (std::exception const& e) { - // Connection handling errors are expected + // Error reading or writing, just close the connection + JLOG(j_.debug()) << "Connection error: " << e.what(); } - - if (running_) - accept(); } }; @@ -171,13 +193,13 @@ bool runHTTPTest( TestHTTPServer& server, std::string const& path, - std::atomic& completed, - std::atomic& result_status, - std::string& result_data, - boost::system::error_code& result_error) + bool& completed, + int& resultStatus, + std::string& resultData, + boost::system::error_code& resultError) { // Create a null journal for testing - beast::Journal j{beast::Journal::getNullSink()}; + beast::Journal j{TestSink::instance()}; // Initialize HTTPClient SSL context HTTPClient::initializeSSLContext("", "", false, j); @@ -193,9 +215,9 @@ runHTTPTest( [&](boost::system::error_code const& ec, int status, std::string const& data) -> bool { - result_error = ec; - result_status = status; - result_data = data; + resultError = ec; + resultStatus = status; + resultData = data; completed = true; return false; // don't retry }, @@ -203,13 +225,19 @@ runHTTPTest( // Run the IO context until completion auto start = std::chrono::steady_clock::now(); - while (!completed && - std::chrono::steady_clock::now() - start < std::chrono::seconds(10)) + while (server.ioc().run_one() != 0) { - if (server.ioc().run_one() == 0) + if (std::chrono::steady_clock::now() - start >= + std::chrono::seconds(10) || + server.finished()) { break; } + + if (completed) + { + server.stop(); + } } return completed; @@ -220,7 +248,7 @@ runHTTPTest( TEST(HTTPClient, case_insensitive_content_length) { // Test different cases of Content-Length header - std::vector header_cases = { + std::vector headerCases = { "Content-Length", // Standard case "content-length", // Lowercase - this tests the regex icase fix "CONTENT-LENGTH", // Uppercase @@ -228,53 +256,48 @@ TEST(HTTPClient, case_insensitive_content_length) "content-Length" // Mixed case 2 }; - for (auto const& header_name : header_cases) + for (auto const& headerName : headerCases) { TestHTTPServer server; - std::string test_body = "Hello World!"; - server.setResponseBody(test_body); - server.setHeader(header_name, std::to_string(test_body.size())); + std::string testBody = "Hello World!"; + server.setResponseBody(testBody); + server.setHeader(headerName, std::to_string(testBody.size())); - std::atomic completed{false}; - std::atomic result_status{0}; - std::string result_data; - boost::system::error_code result_error; + bool completed{false}; + int resultStatus{0}; + std::string resultData; + boost::system::error_code resultError; - bool test_completed = runHTTPTest( - server, - "/test", - completed, - result_status, - result_data, - result_error); + bool testCompleted = runHTTPTest( + server, "/test", completed, resultStatus, resultData, resultError); // Verify results - EXPECT_TRUE(test_completed); - EXPECT_FALSE(result_error); - EXPECT_EQ(result_status, 200); - EXPECT_EQ(result_data, test_body); + EXPECT_TRUE(testCompleted); + EXPECT_FALSE(resultError); + EXPECT_EQ(resultStatus, 200); + EXPECT_EQ(resultData, testBody); } } TEST(HTTPClient, basic_http_request) { TestHTTPServer server; - std::string test_body = "Test response body"; - server.setResponseBody(test_body); + std::string testBody = "Test response body"; + server.setResponseBody(testBody); server.setHeader("Content-Type", "text/plain"); - std::atomic completed{false}; - std::atomic result_status{0}; - std::string result_data; - boost::system::error_code result_error; + bool completed{false}; + int resultStatus{0}; + std::string resultData; + boost::system::error_code resultError; - bool test_completed = runHTTPTest( - server, "/basic", completed, result_status, result_data, result_error); + bool testCompleted = runHTTPTest( + server, "/basic", completed, resultStatus, resultData, resultError); - EXPECT_TRUE(test_completed); - EXPECT_FALSE(result_error); - EXPECT_EQ(result_status, 200); - EXPECT_EQ(result_data, test_body); + EXPECT_TRUE(testCompleted); + EXPECT_FALSE(resultError); + EXPECT_EQ(resultStatus, 200); + EXPECT_EQ(resultData, testBody); } TEST(HTTPClient, empty_response) @@ -283,45 +306,45 @@ TEST(HTTPClient, empty_response) server.setResponseBody(""); // Empty body server.setHeader("Content-Length", "0"); - std::atomic completed{false}; - std::atomic result_status{0}; - std::string result_data; - boost::system::error_code result_error; + bool completed{false}; + int resultStatus{0}; + std::string resultData; + boost::system::error_code resultError; - bool test_completed = runHTTPTest( - server, "/empty", completed, result_status, result_data, result_error); + bool testCompleted = runHTTPTest( + server, "/empty", completed, resultStatus, resultData, resultError); - EXPECT_TRUE(test_completed); - EXPECT_FALSE(result_error); - EXPECT_EQ(result_status, 200); - EXPECT_TRUE(result_data.empty()); + EXPECT_TRUE(testCompleted); + EXPECT_FALSE(resultError); + EXPECT_EQ(resultStatus, 200); + EXPECT_TRUE(resultData.empty()); } TEST(HTTPClient, different_status_codes) { - std::vector status_codes = {200, 404, 500}; + std::vector statusCodes = {200, 404, 500}; - for (auto status : status_codes) + for (auto status : statusCodes) { TestHTTPServer server; server.setStatusCode(status); server.setResponseBody("Status " + std::to_string(status)); - std::atomic completed{false}; - std::atomic result_status{0}; - std::string result_data; - boost::system::error_code result_error; + bool completed{false}; + int resultStatus{0}; + std::string resultData; + boost::system::error_code resultError; - bool test_completed = runHTTPTest( + bool testCompleted = runHTTPTest( server, "/status", completed, - result_status, - result_data, - result_error); + resultStatus, + resultData, + resultError); - EXPECT_TRUE(test_completed); - EXPECT_FALSE(result_error); - EXPECT_EQ(result_status, static_cast(status)); + EXPECT_TRUE(testCompleted); + EXPECT_FALSE(resultError); + EXPECT_EQ(resultStatus, static_cast(status)); } } diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index 99cc140056..654c2a8990 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -119,9 +119,7 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash) acquiringLedger_ = hash; app_.getJobQueue().addJob( - jtADVANCE, - "getConsensusLedger1", - [id = hash, &app = app_, this]() { + jtADVANCE, "GetConsL1", [id = hash, &app = app_, this]() { JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger1 started"; app.getInboundLedgers().acquireAsync( @@ -420,7 +418,7 @@ RCLConsensus::Adaptor::onAccept( { app_.getJobQueue().addJob( jtACCEPT, - "acceptLedger", + "AcceptLedger", [=, this, cj = std::move(consensusJson)]() mutable { // Note that no lock is held or acquired during this job. // This is because generic Consensus guarantees that once a ledger diff --git a/src/xrpld/app/consensus/RCLValidations.cpp b/src/xrpld/app/consensus/RCLValidations.cpp index eb10765ba3..6438334106 100644 --- a/src/xrpld/app/consensus/RCLValidations.cpp +++ b/src/xrpld/app/consensus/RCLValidations.cpp @@ -122,13 +122,11 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash) Application* pApp = &app_; - app_.getJobQueue().addJob( - jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() { - JLOG(j_.debug()) - << "JOB advanceLedger getConsensusLedger2 started"; - pApp->getInboundLedgers().acquireAsync( - hash, 0, InboundLedger::Reason::CONSENSUS); - }); + app_.getJobQueue().addJob(jtADVANCE, "GetConsL2", [pApp, hash, this]() { + JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger2 started"; + pApp->getInboundLedgers().acquireAsync( + hash, 0, InboundLedger::Reason::CONSENSUS); + }); return std::nullopt; } diff --git a/src/xrpld/app/ledger/ConsensusTransSetSF.cpp b/src/xrpld/app/ledger/ConsensusTransSetSF.cpp index b52cee2927..6678ee6334 100644 --- a/src/xrpld/app/ledger/ConsensusTransSetSF.cpp +++ b/src/xrpld/app/ledger/ConsensusTransSetSF.cpp @@ -46,7 +46,7 @@ ConsensusTransSetSF::gotNode( "xrpl::ConsensusTransSetSF::gotNode : transaction hash " "match"); auto const pap = &app_; - app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() { + app_.getJobQueue().addJob(jtTRANSACTION, "TxsToTxn", [pap, stx]() { pap->getOPs().submitTransaction(stx); }); } diff --git a/src/xrpld/app/ledger/OrderBookDB.cpp b/src/xrpld/app/ledger/OrderBookDB.cpp index 47b04f3d2c..5963c00c5a 100644 --- a/src/xrpld/app/ledger/OrderBookDB.cpp +++ b/src/xrpld/app/ledger/OrderBookDB.cpp @@ -48,9 +48,9 @@ OrderBookDB::setup(std::shared_ptr const& ledger) update(ledger); else app_.getJobQueue().addJob( - jtUPDATE_PF, - "OrderBookDB::update: " + std::to_string(ledger->seq()), - [this, ledger]() { update(ledger); }); + jtUPDATE_PF, "OrderBookUpd", [this, ledger]() { + update(ledger); + }); } } diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index 1f5e5cc7b6..710173cd17 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -454,7 +454,7 @@ InboundLedger::done() // We hold the PeerSet lock, so must dispatch app_.getJobQueue().addJob( - jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() { + jtLEDGER_DATA, "AcqDone", [self = shared_from_this()]() { if (self->complete_ && !self->failed_) { self->app_.getLedgerMaster().checkAccept(self->getLedger()); diff --git a/src/xrpld/app/ledger/detail/InboundLedgers.cpp b/src/xrpld/app/ledger/detail/InboundLedgers.cpp index 445786eb63..fd2cde698e 100644 --- a/src/xrpld/app/ledger/detail/InboundLedgers.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedgers.cpp @@ -192,7 +192,7 @@ public: // dispatch if (ledger->gotData(std::weak_ptr(peer), packet)) app_.getJobQueue().addJob( - jtLEDGER_DATA, "processLedgerData", [ledger]() { + jtLEDGER_DATA, "ProcessLData", [ledger]() { ledger->runData(); }); @@ -207,7 +207,7 @@ public: if (packet->type() == protocol::liAS_NODE) { app_.getJobQueue().addJob( - jtLEDGER_DATA, "gotStaleData", [this, packet]() { + jtLEDGER_DATA, "GotStaleData", [this, packet]() { gotStaleData(packet); }); } diff --git a/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp b/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp index 5b642ba3db..6909c45328 100644 --- a/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp +++ b/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp @@ -21,7 +21,7 @@ LedgerDeltaAcquire::LedgerDeltaAcquire( ledgerHash, LedgerReplayParameters::SUB_TASK_TIMEOUT, {jtREPLAY_TASK, - "LedgerReplayDelta", + "LedReplDelta", LedgerReplayParameters::MAX_QUEUED_TASKS}, app.journal("LedgerReplayDelta")) , inboundLedgers_(inboundLedgers) @@ -225,7 +225,7 @@ LedgerDeltaAcquire::onLedgerBuilt( } app_.getJobQueue().addJob( jtREPLAY_TASK, - "onLedgerBuilt", + "OnLedBuilt", [=, ledger = this->fullLedger_, &app = this->app_]() { for (auto reason : reasons) { diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index d176e85645..65421c9bd8 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -1344,7 +1344,7 @@ LedgerMaster::tryAdvance() if (!mAdvanceThread && !mValidLedger.empty()) { mAdvanceThread = true; - app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() { + app_.getJobQueue().addJob(jtADVANCE, "AdvanceLedger", [this]() { std::unique_lock sl(m_mutex); XRPL_ASSERT( @@ -1482,7 +1482,7 @@ bool LedgerMaster::newPathRequest() { std::unique_lock ml(m_mutex); - mPathFindNewRequest = newPFWork("pf:newRequest", ml); + mPathFindNewRequest = newPFWork("PthFindNewReq", ml); return mPathFindNewRequest; } @@ -1503,7 +1503,7 @@ LedgerMaster::newOrderBookDB() std::unique_lock ml(m_mutex); mPathLedger.reset(); - return newPFWork("pf:newOBDB", ml); + return newPFWork("PthFindOBDB", ml); } /** A thread needs to be dispatched to handle pathfinding work of some kind. @@ -1841,7 +1841,7 @@ LedgerMaster::fetchForHistory( mFillInProgress = seq; } app_.getJobQueue().addJob( - jtADVANCE, "tryFill", [this, ledger]() { + jtADVANCE, "TryFill", [this, ledger]() { tryFill(ledger); }); } @@ -1980,7 +1980,7 @@ LedgerMaster::doAdvance(std::unique_lock& sl) } app_.getOPs().clearNeedNetworkLedger(); - progress = newPFWork("pf:newLedger", sl); + progress = newPFWork("PthFindNewLed", sl); } if (progress) mAdvanceWork = true; @@ -2011,7 +2011,7 @@ LedgerMaster::gotFetchPack(bool progress, std::uint32_t seq) { if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire)) { - app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() { + app_.getJobQueue().addJob(jtLEDGER_DATA, "GotFetchPack", [&]() { app_.getInboundLedgers().gotFetchPack(); mGotFetchPackThread.clear(std::memory_order_release); }); diff --git a/src/xrpld/app/ledger/detail/LedgerReplayTask.cpp b/src/xrpld/app/ledger/detail/LedgerReplayTask.cpp index cd174b098f..c31aba6c43 100644 --- a/src/xrpld/app/ledger/detail/LedgerReplayTask.cpp +++ b/src/xrpld/app/ledger/detail/LedgerReplayTask.cpp @@ -77,7 +77,7 @@ LedgerReplayTask::LedgerReplayTask( parameter.finishHash_, LedgerReplayParameters::TASK_TIMEOUT, {jtREPLAY_TASK, - "LedgerReplayTask", + "LedReplTask", LedgerReplayParameters::MAX_QUEUED_TASKS}, app.journal("LedgerReplayTask")) , inboundLedgers_(inboundLedgers) diff --git a/src/xrpld/app/ledger/detail/SkipListAcquire.cpp b/src/xrpld/app/ledger/detail/SkipListAcquire.cpp index 5f4b0dc339..09d0ba1ea6 100644 --- a/src/xrpld/app/ledger/detail/SkipListAcquire.cpp +++ b/src/xrpld/app/ledger/detail/SkipListAcquire.cpp @@ -16,7 +16,7 @@ SkipListAcquire::SkipListAcquire( ledgerHash, LedgerReplayParameters::SUB_TASK_TIMEOUT, {jtREPLAY_TASK, - "SkipListAcquire", + "SkipListAcq", LedgerReplayParameters::MAX_QUEUED_TASKS}, app.journal("LedgerReplaySkipList")) , inboundLedgers_(inboundLedgers) diff --git a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp index 3cd0e84ef0..6b22c6ca8b 100644 --- a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp +++ b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp @@ -27,7 +27,7 @@ TransactionAcquire::TransactionAcquire( app, hash, TX_ACQUIRE_TIMEOUT, - {jtTXN_DATA, "TransactionAcquire", {}}, + {jtTXN_DATA, "TxAcq", {}}, app.journal("TransactionAcquire")) , mHaveRoot(false) , mPeerSet(std::move(peerSet)) @@ -60,7 +60,7 @@ TransactionAcquire::done() // just updates the consensus and related structures when we acquire // a transaction set. No need to update them if we're shutting down. app_.getJobQueue().addJob( - jtTXN_DATA, "completeAcquire", [pap, hash, map]() { + jtTXN_DATA, "ComplAcquire", [pap, hash, map]() { pap->getInboundTransactions().giveSet(hash, map, true); }); } diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index 15abff9b14..785b02dbb8 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -203,11 +203,7 @@ public: boost::asio::signal_set m_signals; - // Once we get C++20, we could use `std::atomic_flag` for `isTimeToStop` - // and eliminate the need for the condition variable and the mutex. - std::condition_variable stoppingCondition_; - mutable std::mutex stoppingMutex_; - std::atomic isTimeToStop = false; + std::atomic_flag isTimeToStop; std::atomic checkSigs_; @@ -1539,10 +1535,7 @@ ApplicationImp::run() getLoadManager().activateStallDetector(); } - { - std::unique_lock lk{stoppingMutex_}; - stoppingCondition_.wait(lk, [this] { return isTimeToStop.load(); }); - } + isTimeToStop.wait(false, std::memory_order_relaxed); JLOG(m_journal.debug()) << "Application stopping"; @@ -1629,14 +1622,14 @@ ApplicationImp::run() void ApplicationImp::signalStop(std::string msg) { - if (!isTimeToStop.exchange(true)) + if (!isTimeToStop.test_and_set(std::memory_order_acquire)) { if (msg.empty()) JLOG(m_journal.warn()) << "Server stopping"; else JLOG(m_journal.warn()) << "Server stopping: " << msg; - stoppingCondition_.notify_all(); + isTimeToStop.notify_all(); } } @@ -1655,7 +1648,7 @@ ApplicationImp::checkSigs(bool check) bool ApplicationImp::isStopping() const { - return isTimeToStop.load(); + return isTimeToStop.test(std::memory_order_relaxed); } int diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 7de632817c..99335bcdbc 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -336,8 +336,7 @@ run(int argc, char** argv) { using namespace std; - beast::setCurrentThreadName( - "rippled: main " + BuildInfo::getVersionString()); + beast::setCurrentThreadName("main"); po::variables_map vm; diff --git a/src/xrpld/app/main/NodeStoreScheduler.cpp b/src/xrpld/app/main/NodeStoreScheduler.cpp index 221c1f098e..0e02a092f8 100644 --- a/src/xrpld/app/main/NodeStoreScheduler.cpp +++ b/src/xrpld/app/main/NodeStoreScheduler.cpp @@ -12,9 +12,8 @@ NodeStoreScheduler::scheduleTask(NodeStore::Task& task) if (jobQueue_.isStopped()) return; - if (!jobQueue_.addJob(jtWRITE, "NodeObject::store", [&task]() { - task.performScheduledTask(); - })) + if (!jobQueue_.addJob( + jtWRITE, "NObjStore", [&task]() { task.performScheduledTask(); })) { // Job not added, presumably because we're shutting down. // Recover by executing the task synchronously. diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 2422ec4ae6..696512b5f4 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -981,7 +981,7 @@ NetworkOPsImp::setHeartbeatTimer() heartbeatTimer_, mConsensus.parms().ledgerGRANULARITY, [this]() { - m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() { + m_job_queue.addJob(jtNETOP_TIMER, "NetHeart", [this]() { processHeartbeatTimer(); }); }, @@ -997,7 +997,7 @@ NetworkOPsImp::setClusterTimer() clusterTimer_, 10s, [this]() { - m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() { + m_job_queue.addJob(jtNETOP_CLUSTER, "NetCluster", [this]() { processClusterTimer(); }); }, @@ -1225,7 +1225,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) auto tx = std::make_shared(trans, reason, app_); - m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() { + m_job_queue.addJob(jtTRANSACTION, "SubmitTxn", [this, tx]() { auto t = tx; processTransaction(t, false, false, FailHard::no); }); @@ -1323,7 +1323,7 @@ NetworkOPsImp::doTransactionAsync( if (mDispatchState == DispatchState::none) { if (m_job_queue.addJob( - jtBATCH, "transactionBatch", [this]() { transactionBatch(); })) + jtBATCH, "TxBatchAsync", [this]() { transactionBatch(); })) { mDispatchState = DispatchState::scheduled; } @@ -1370,7 +1370,7 @@ NetworkOPsImp::doTransactionSyncBatch( if (mTransactions.size()) { // More transactions need to be applied, but by another job. - if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() { + if (m_job_queue.addJob(jtBATCH, "TxBatchSync", [this]() { transactionBatch(); })) { @@ -3208,19 +3208,16 @@ NetworkOPsImp::reportFeeChange() if (f != mLastFeeSummary) { m_job_queue.addJob( - jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() { - pubServer(); - }); + jtCLIENT_FEE_CHANGE, "PubFee", [this]() { pubServer(); }); } } void NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase) { - m_job_queue.addJob( - jtCLIENT_CONSENSUS, - "reportConsensusStateChange->pubConsensus", - [this, phase]() { pubConsensus(phase); }); + m_job_queue.addJob(jtCLIENT_CONSENSUS, "PubCons", [this, phase]() { + pubConsensus(phase); + }); } inline void @@ -3728,7 +3725,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) app_.getJobQueue().addJob( jtCLIENT_ACCT_HIST, - "AccountHistoryTxStream", + "HistTxStream", [this, dbType = databaseType, subInfo]() { auto const& accountId = subInfo.index_->accountId_; auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_; diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 53237ed3ae..327a5a4f21 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -1158,7 +1158,7 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_.update(Resource::feeModerateBurdenPeer, "oversize"); app_.getJobQueue().addJob( - jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() { + jtMANIFEST, "RcvManifests", [this, that = shared_from_this(), m]() { overlay_.onManifests(m, that); }); } @@ -1351,8 +1351,8 @@ PeerImp::handleTransaction( { // If we've never been in synch, there's nothing we can do // with a transaction - JLOG(p_journal_.debug()) << "Ignoring incoming transaction: " - << "Need network ledger"; + JLOG(p_journal_.debug()) + << "Ignoring incoming transaction: Need network ledger"; return; } @@ -1452,7 +1452,7 @@ PeerImp::handleTransaction( { app_.getJobQueue().addJob( jtTRANSACTION, - "recvTransaction->checkTransaction", + "RcvCheckTx", [weak = std::weak_ptr(shared_from_this()), flags, checkSignature, @@ -1555,7 +1555,7 @@ PeerImp::onMessage(std::shared_ptr const& m) // Queue a job to process the request std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() { + app_.getJobQueue().addJob(jtLEDGER_REQ, "RcvGetLedger", [weak, m]() { if (auto peer = weak.lock()) peer->processLedgerRequest(m); }); @@ -1575,29 +1575,27 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_.update( Resource::feeModerateBurdenPeer, "received a proof path request"); std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob( - jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() { - if (auto peer = weak.lock()) + app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvProofPReq", [weak, m]() { + if (auto peer = weak.lock()) + { + auto reply = + peer->ledgerReplayMsgHandler_.processProofPathRequest(m); + if (reply.has_error()) { - auto reply = - peer->ledgerReplayMsgHandler_.processProofPathRequest(m); - if (reply.has_error()) - { - if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) - peer->charge( - Resource::feeMalformedRequest, - "proof_path_request"); - else - peer->charge( - Resource::feeRequestNoReply, "proof_path_request"); - } + if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) + peer->charge( + Resource::feeMalformedRequest, "proof_path_request"); else - { - peer->send(std::make_shared( - reply, protocol::mtPROOF_PATH_RESPONSE)); - } + peer->charge( + Resource::feeRequestNoReply, "proof_path_request"); } - }); + else + { + peer->send(std::make_shared( + reply, protocol::mtPROOF_PATH_RESPONSE)); + } + } + }); } void @@ -1629,30 +1627,27 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_.fee = Resource::feeModerateBurdenPeer; std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob( - jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() { - if (auto peer = weak.lock()) + app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvReplDReq", [weak, m]() { + if (auto peer = weak.lock()) + { + auto reply = + peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m); + if (reply.has_error()) { - auto reply = - peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m); - if (reply.has_error()) - { - if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) - peer->charge( - Resource::feeMalformedRequest, - "replay_delta_request"); - else - peer->charge( - Resource::feeRequestNoReply, - "replay_delta_request"); - } + if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) + peer->charge( + Resource::feeMalformedRequest, "replay_delta_request"); else - { - peer->send(std::make_shared( - reply, protocol::mtREPLAY_DELTA_RESPONSE)); - } + peer->charge( + Resource::feeRequestNoReply, "replay_delta_request"); } - }); + else + { + peer->send(std::make_shared( + reply, protocol::mtREPLAY_DELTA_RESPONSE)); + } + } + }); } void @@ -1748,7 +1743,7 @@ PeerImp::onMessage(std::shared_ptr const& m) { std::weak_ptr weak{shared_from_this()}; app_.getJobQueue().addJob( - jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() { + jtTXN_DATA, "RcvPeerData", [weak, ledgerHash, m]() { if (auto peer = weak.lock()) { peer->app_.getInboundTransactions().gotData( @@ -1876,7 +1871,7 @@ PeerImp::onMessage(std::shared_ptr const& m) std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, - "recvPropose->checkPropose", + "checkPropose", [weak, isTrusted, m, proposal]() { if (auto peer = weak.lock()) peer->checkPropose(isTrusted, m, proposal); @@ -2490,18 +2485,7 @@ PeerImp::onMessage(std::shared_ptr const& m) } else if (isTrusted || !app_.getFeeTrack().isLoadedLocal()) { - std::string const name = [isTrusted, val]() { - std::string ret = - isTrusted ? "Trusted validation" : "Untrusted validation"; - -#ifdef DEBUG - ret += " " + - std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " + - to_string(val->getNodeID()); -#endif - - return ret; - }(); + std::string const name = isTrusted ? "ChkTrust" : "ChkUntrust"; std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( @@ -2561,11 +2545,10 @@ PeerImp::onMessage(std::shared_ptr const& m) } std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob( - jtREQUESTED_TXN, "doTransactions", [weak, m]() { - if (auto peer = weak.lock()) - peer->doTransactions(m); - }); + app_.getJobQueue().addJob(jtREQUESTED_TXN, "DoTxs", [weak, m]() { + if (auto peer = weak.lock()) + peer->doTransactions(m); + }); return; } @@ -2618,6 +2601,16 @@ PeerImp::onMessage(std::shared_ptr const& m) newObj.set_ledgerseq(obj.ledgerseq()); // VFALCO NOTE "seq" in the message is obsolete + + // Check if by adding this object, reply has reached its + // limit + if (reply.objects_size() >= Tuning::hardMaxReplyNodes) + { + fee_.update( + Resource::feeModerateBurdenPeer, + " Reply limit reached. Truncating reply."); + break; + } } } } @@ -2695,11 +2688,10 @@ PeerImp::onMessage(std::shared_ptr const& m) } std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob( - jtMISSING_TXN, "handleHaveTransactions", [weak, m]() { - if (auto peer = weak.lock()) - peer->handleHaveTransactions(m); - }); + app_.getJobQueue().addJob(jtMISSING_TXN, "HandleHaveTxs", [weak, m]() { + if (auto peer = weak.lock()) + peer->handleHaveTransactions(m); + }); } void diff --git a/src/xrpld/rpc/detail/RPCSub.cpp b/src/xrpld/rpc/detail/RPCSub.cpp index 616911fdfa..877b894885 100644 --- a/src/xrpld/rpc/detail/RPCSub.cpp +++ b/src/xrpld/rpc/detail/RPCSub.cpp @@ -72,7 +72,7 @@ public: JLOG(j_.info()) << "RPCCall::fromNetwork start"; mSending = m_jobQueue.addJob( - jtCLIENT_SUBSCRIBE, "RPCSub::sendThread", [this]() { + jtCLIENT_SUBSCRIBE, "RPCSubSendThr", [this]() { sendThread(); }); } diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index d9ae357b1a..17d8e5e7bc 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -18,6 +18,32 @@ namespace xrpl { +using FunctionType = std::function( + Json::Value const&, + Json::StaticString const, + unsigned const apiVersion)>; + +static Expected +parseFixed( + Keylet const& keylet, + Json::Value const& params, + Json::StaticString const& fieldName, + unsigned const apiVersion); + +// Helper function to return FunctionType for objects that have a fixed +// location. That is, they don't take parameters to compute the index. +// e.g. amendments, fees, negative UNL, etc. +static FunctionType +fixed(Keylet const& keylet) +{ + return [keylet]( + Json::Value const& params, + Json::StaticString const fieldName, + unsigned const apiVersion) -> Expected { + return parseFixed(keylet, params, fieldName, apiVersion); + }; +} + static Expected parseObjectID( Json::Value const& params, @@ -33,13 +59,33 @@ parseObjectID( } static Expected -parseIndex(Json::Value const& params, Json::StaticString const fieldName) +parseIndex( + Json::Value const& params, + Json::StaticString const fieldName, + unsigned const apiVersion) { + if (apiVersion > 2u && params.isString()) + { + std::string const index = params.asString(); + if (index == jss::amendments.c_str()) + return keylet::amendments().key; + if (index == jss::fee.c_str()) + return keylet::fees().key; + if (index == jss::nunl) + return keylet::negativeUNL().key; + if (index == jss::hashes) + // Note this only finds the "short" skip list. Use "hashes":index to + // get the long list. + return keylet::skip().key; + } return parseObjectID(params, fieldName, "hex string"); } static Expected -parseAccountRoot(Json::Value const& params, Json::StaticString const fieldName) +parseAccountRoot( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (auto const account = LedgerEntryHelpers::parse(params)) { @@ -50,14 +96,13 @@ parseAccountRoot(Json::Value const& params, Json::StaticString const fieldName) "malformedAddress", fieldName, "AccountID"); } -static Expected -parseAmendments(Json::Value const& params, Json::StaticString const fieldName) -{ - return parseObjectID(params, fieldName, "hex string"); -} +auto const parseAmendments = fixed(keylet::amendments()); static Expected -parseAMM(Json::Value const& params, Json::StaticString const fieldName) +parseAMM( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -85,7 +130,10 @@ parseAMM(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseBridge(Json::Value const& params, Json::StaticString const fieldName) +parseBridge( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isMember(jss::bridge)) { @@ -116,13 +164,19 @@ parseBridge(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseCheck(Json::Value const& params, Json::StaticString const fieldName) +parseCheck( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { return parseObjectID(params, fieldName, "hex string"); } static Expected -parseCredential(Json::Value const& cred, Json::StaticString const fieldName) +parseCredential( + Json::Value const& cred, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!cred.isObject()) { @@ -153,7 +207,10 @@ parseCredential(Json::Value const& cred, Json::StaticString const fieldName) } static Expected -parseDelegate(Json::Value const& params, Json::StaticString const fieldName) +parseDelegate( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -244,7 +301,10 @@ parseAuthorizeCredentials(Json::Value const& jv) } static Expected -parseDepositPreauth(Json::Value const& dp, Json::StaticString const fieldName) +parseDepositPreauth( + Json::Value const& dp, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!dp.isObject()) { @@ -297,7 +357,10 @@ parseDepositPreauth(Json::Value const& dp, Json::StaticString const fieldName) } static Expected -parseDID(Json::Value const& params, Json::StaticString const fieldName) +parseDID( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { auto const account = LedgerEntryHelpers::parse(params); if (!account) @@ -312,7 +375,8 @@ parseDID(Json::Value const& params, Json::StaticString const fieldName) static Expected parseDirectoryNode( Json::Value const& params, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -365,7 +429,10 @@ parseDirectoryNode( } static Expected -parseEscrow(Json::Value const& params, Json::StaticString const fieldName) +parseEscrow( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -384,20 +451,53 @@ parseEscrow(Json::Value const& params, Json::StaticString const fieldName) return keylet::escrow(*id, *seq).key; } +auto const parseFeeSettings = fixed(keylet::fees()); + static Expected -parseFeeSettings(Json::Value const& params, Json::StaticString const fieldName) +parseFixed( + Keylet const& keylet, + Json::Value const& params, + Json::StaticString const& fieldName, + [[maybe_unused]] unsigned const apiVersion) { - return parseObjectID(params, fieldName, "hex string"); + if (!params.isBool()) + { + return parseObjectID(params, fieldName, "hex string"); + } + if (!params.asBool()) + { + return LedgerEntryHelpers::invalidFieldError( + "invalidParams", fieldName, "true"); + } + + return keylet.key; } static Expected -parseLedgerHashes(Json::Value const& params, Json::StaticString const fieldName) +parseLedgerHashes( + Json::Value const& params, + Json::StaticString const fieldName, + unsigned const apiVersion) { - return parseObjectID(params, fieldName, "hex string"); + if (params.isUInt() || params.isInt()) + { + // If the index doesn't parse as a UInt, throw + auto const index = params.asUInt(); + + // Return the "long" skip list for the given ledger index. + auto const keylet = keylet::skip(index); + return keylet.key; + } + // Return the key in `params` or the "short" skip list, which contains + // hashes since the last flag ledger. + return parseFixed(keylet::skip(), params, fieldName, apiVersion); } static Expected -parseLoanBroker(Json::Value const& params, Json::StaticString const fieldName) +parseLoanBroker( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -417,7 +517,10 @@ parseLoanBroker(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseLoan(Json::Value const& params, Json::StaticString const fieldName) +parseLoan( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -437,7 +540,10 @@ parseLoan(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseMPToken(Json::Value const& params, Json::StaticString const fieldName) +parseMPToken( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -460,7 +566,8 @@ parseMPToken(Json::Value const& params, Json::StaticString const fieldName) static Expected parseMPTokenIssuance( Json::Value const& params, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { auto const mptIssuanceID = LedgerEntryHelpers::parse(params); if (!mptIssuanceID) @@ -471,25 +578,30 @@ parseMPTokenIssuance( } static Expected -parseNFTokenOffer(Json::Value const& params, Json::StaticString const fieldName) +parseNFTokenOffer( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { return parseObjectID(params, fieldName, "hex string"); } static Expected -parseNFTokenPage(Json::Value const& params, Json::StaticString const fieldName) +parseNFTokenPage( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { return parseObjectID(params, fieldName, "hex string"); } -static Expected -parseNegativeUNL(Json::Value const& params, Json::StaticString const fieldName) -{ - return parseObjectID(params, fieldName, "hex string"); -} +auto const parseNegativeUNL = fixed(keylet::negativeUNL()); static Expected -parseOffer(Json::Value const& params, Json::StaticString const fieldName) +parseOffer( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -510,7 +622,10 @@ parseOffer(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseOracle(Json::Value const& params, Json::StaticString const fieldName) +parseOracle( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -531,7 +646,10 @@ parseOracle(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parsePayChannel(Json::Value const& params, Json::StaticString const fieldName) +parsePayChannel( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { return parseObjectID(params, fieldName, "hex string"); } @@ -539,7 +657,8 @@ parsePayChannel(Json::Value const& params, Json::StaticString const fieldName) static Expected parsePermissionedDomain( Json::Value const& pd, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (pd.isString()) { @@ -568,7 +687,8 @@ parsePermissionedDomain( static Expected parseRippleState( Json::Value const& jvRippleState, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { Currency uCurrency; @@ -618,13 +738,19 @@ parseRippleState( } static Expected -parseSignerList(Json::Value const& params, Json::StaticString const fieldName) +parseSignerList( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { return parseObjectID(params, fieldName, "hex string"); } static Expected -parseTicket(Json::Value const& params, Json::StaticString const fieldName) +parseTicket( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -645,7 +771,10 @@ parseTicket(Json::Value const& params, Json::StaticString const fieldName) } static Expected -parseVault(Json::Value const& params, Json::StaticString const fieldName) +parseVault( + Json::Value const& params, + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!params.isObject()) { @@ -668,7 +797,8 @@ parseVault(Json::Value const& params, Json::StaticString const fieldName) static Expected parseXChainOwnedClaimID( Json::Value const& claim_id, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!claim_id.isObject()) { @@ -693,7 +823,8 @@ parseXChainOwnedClaimID( static Expected parseXChainOwnedCreateAccountClaimID( Json::Value const& claim_id, - Json::StaticString const fieldName) + Json::StaticString const fieldName, + [[maybe_unused]] unsigned const apiVersion) { if (!claim_id.isObject()) { @@ -717,10 +848,6 @@ parseXChainOwnedCreateAccountClaimID( return keylet.key; } -using FunctionType = Expected (*)( - Json::Value const&, - Json::StaticString const); - struct LedgerEntry { Json::StaticString fieldName; @@ -753,7 +880,7 @@ doLedgerEntry(RPC::JsonContext& context) {jss::ripple_state, parseRippleState, ltRIPPLE_STATE}, }); - auto hasMoreThanOneMember = [&]() { + auto const hasMoreThanOneMember = [&]() { int count = 0; for (auto const& ledgerEntry : ledgerEntryParsers) @@ -797,8 +924,8 @@ doLedgerEntry(RPC::JsonContext& context) Json::Value const& params = ledgerEntry.fieldName == jss::bridge ? context.params : context.params[ledgerEntry.fieldName]; - auto const result = - ledgerEntry.parseFunction(params, ledgerEntry.fieldName); + auto const result = ledgerEntry.parseFunction( + params, ledgerEntry.fieldName, context.apiVersion); if (!result) return result.error(); @@ -829,9 +956,13 @@ doLedgerEntry(RPC::JsonContext& context) throw; } + // Return the computed index regardless of whether the node exists. + jvResult[jss::index] = to_string(uNodeIndex); + if (uNodeIndex.isZero()) { - return RPC::make_error(rpcENTRY_NOT_FOUND); + RPC::inject_error(rpcENTRY_NOT_FOUND, jvResult); + return jvResult; } auto const sleNode = lpLedger->read(keylet::unchecked(uNodeIndex)); @@ -843,12 +974,14 @@ doLedgerEntry(RPC::JsonContext& context) if (!sleNode) { // Not found. - return RPC::make_error(rpcENTRY_NOT_FOUND); + RPC::inject_error(rpcENTRY_NOT_FOUND, jvResult); + return jvResult; } if ((expectedType != ltANY) && (expectedType != sleNode->getType())) { - return RPC::make_error(rpcUNEXPECTED_LEDGER_TYPE); + RPC::inject_error(rpcUNEXPECTED_LEDGER_TYPE, jvResult); + return jvResult; } if (bNodeBinary) @@ -858,12 +991,10 @@ doLedgerEntry(RPC::JsonContext& context) sleNode->add(s); jvResult[jss::node_binary] = strHex(s.peekData()); - jvResult[jss::index] = to_string(uNodeIndex); } else { jvResult[jss::node] = sleNode->getJson(JsonOptions::none); - jvResult[jss::index] = to_string(uNodeIndex); } return jvResult;