Merge branch 'develop' into tapanito/lending-vault-invariant

This commit is contained in:
Vito Tumas
2026-01-22 23:36:25 +01:00
committed by GitHub
37 changed files with 390 additions and 320 deletions

View File

@@ -0,0 +1,44 @@
name: Generate build version number
description: "Generate build version number."
outputs:
version:
description: "The generated build version number."
value: ${{ steps.version.outputs.version }}
runs:
using: composite
steps:
# When a tag is pushed, the version is used as-is.
- name: Generate version for tag event
if: ${{ github.event_name == 'tag' }}
shell: bash
env:
VERSION: ${{ github.ref_name }}
run: echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
# When a tag is not pushed, then the version (e.g. 1.2.3-b0) is extracted
# from the BuildInfo.cpp file and the shortened commit hash appended to it.
# We use a plus sign instead of a hyphen because Conan recipe versions do
# not support two hyphens.
- name: Generate version for non-tag event
if: ${{ github.event_name != 'tag' }}
shell: bash
run: |
echo 'Extracting version from BuildInfo.cpp.'
VERSION="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')"
if [[ -z "${VERSION}" ]]; then
echo 'Unable to extract version from BuildInfo.cpp.'
exit 1
fi
echo 'Appending shortened commit hash to version.'
SHA='${{ github.sha }}'
VERSION="${VERSION}+${SHA:0:7}"
echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
- name: Output version
id: version
shell: bash
run: echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"

View File

@@ -2,11 +2,11 @@ name: Setup Conan
description: "Set up Conan configuration, profile, and remote."
inputs:
conan_remote_name:
remote_name:
description: "The name of the Conan remote to use."
required: false
default: xrplf
conan_remote_url:
remote_url:
description: "The URL of the Conan endpoint to use."
required: false
default: https://conan.ripplex.io
@@ -36,11 +36,11 @@ runs:
- name: Set up Conan remote
shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
REMOTE_NAME: ${{ inputs.remote_name }}
REMOTE_URL: ${{ inputs.remote_url }}
run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo "Adding Conan remote '${REMOTE_NAME}' at '${REMOTE_URL}'."
conan remote add --index 0 --force "${REMOTE_NAME}" "${REMOTE_URL}"
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -1,7 +1,8 @@
# This workflow runs all workflows to check, build and test the project on
# various Linux flavors, as well as on MacOS and Windows, on every push to a
# user branch. However, it will not run if the pull request is a draft unless it
# has the 'DraftRunCI' label.
# has the 'DraftRunCI' label. For commits to PRs that target a release branch,
# it also uploads the libxrpl recipe to the Conan remote.
name: PR
on:
@@ -53,12 +54,12 @@ jobs:
.github/scripts/rename/**
.github/workflows/reusable-check-levelization.yml
.github/workflows/reusable-check-rename.yml
.github/workflows/reusable-notify-clio.yml
.github/workflows/on-pr.yml
# Keep the paths below in sync with those in `on-trigger.yml`.
.github/actions/build-deps/**
.github/actions/build-test/**
.github/actions/generate-version/**
.github/actions/setup-conan/**
.github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml
@@ -66,6 +67,7 @@ jobs:
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/reusable-upload-recipe.yml
.codecov.yml
cmake/**
conan/**
@@ -121,22 +123,42 @@ jobs:
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
upload-recipe:
needs:
- should-run
- build-test
if: ${{ needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }}
uses: ./.github/workflows/reusable-notify-clio.yml
# Only run when committing to a PR that targets a release branch in the
# XRPLF repository.
if: ${{ github.repository_owner == 'XRPLF' && needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }}
uses: ./.github/workflows/reusable-upload-recipe.yml
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
notify-clio:
needs: upload-recipe
runs-on: ubuntu-latest
steps:
# Notify the Clio repository about the newly proposed release version, so
# it can be checked for compatibility before the release is actually made.
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[ref]=${{ needs.upload-recipe.outputs.recipe_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"
passed:
if: failure() || cancelled()
needs:
- build-test
- check-levelization
- check-rename
- build-test
- upload-recipe
- notify-clio
runs-on: ubuntu-latest
steps:
- name: Fail

25
.github/workflows/on-tag.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
# This workflow uploads the libxrpl recipe to the Conan remote when a versioned
# tag is pushed.
name: Tag
on:
push:
tags:
- "v*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
upload-recipe:
# Only run when a tag is pushed to the XRPLF repository.
if: ${{ github.repository_owner == 'XRPLF' }}
uses: ./.github/workflows/reusable-upload-recipe.yml
secrets:
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}

View File

@@ -1,8 +1,7 @@
# This workflow runs all workflows to build and test the code on various Linux
# flavors, as well as on MacOS and Windows, on a scheduled basis, on merge into
# the 'develop' or 'release*' branches, or when requested manually. Upon
# successful completion, it also uploads the built libxrpl package to the Conan
# remote.
# the 'develop' or 'release*' branches, or when requested manually. Upon pushes
# to the develop branch it also uploads the libxrpl recipe to the Conan remote.
name: Trigger
on:
@@ -17,6 +16,7 @@ on:
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
- ".github/actions/build-test/**"
- ".github/actions/generate-version/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
@@ -24,6 +24,7 @@ on:
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/reusable-upload-recipe.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
@@ -76,3 +77,12 @@ jobs:
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
upload-recipe:
needs: build-test
# Only run when pushing to the develop branch in the XRPLF repository.
if: ${{ github.repository_owner == 'XRPLF' && github.event_name == 'push' && github.ref == 'refs/heads/develop' }}
uses: ./.github/workflows/reusable-upload-recipe.yml
secrets:
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}

View File

@@ -1,91 +0,0 @@
# This workflow exports the built libxrpl package to the Conan remote on a
# a channel named after the pull request, and notifies the Clio repository about
# the new version so it can check for compatibility.
name: Notify Clio
# This workflow can only be triggered by other workflows.
on:
workflow_call:
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
type: string
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
type: string
default: https://conan.ripplex.io
secrets:
clio_notify_token:
description: "The GitHub token to notify Clio about new versions."
required: true
conan_remote_username:
description: "The username for logging into the Conan remote."
required: true
conan_remote_password:
description: "The password for logging into the Conan remote."
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-clio
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
upload:
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs
id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
id: conan_ref
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
notify:
needs: upload
runs-on: ubuntu-latest
steps:
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"

View File

@@ -0,0 +1,73 @@
# This workflow exports the built libxrpl package to the Conan remote.
name: Upload Conan recipe
# This workflow can only be triggered by other workflows.
on:
workflow_call:
inputs:
remote_name:
description: "The name of the Conan remote to use."
required: false
type: string
default: xrplf
remote_url:
description: "The URL of the Conan endpoint to use."
required: false
type: string
default: https://conan.ripplex.io
secrets:
remote_username:
description: "The username for logging into the Conan remote."
required: true
remote_password:
description: "The password for logging into the Conan remote."
required: true
outputs:
recipe_ref:
description: "The Conan recipe reference ('name/version') that was uploaded."
value: ${{ jobs.upload.outputs.ref }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-upload-recipe
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
upload:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate build version number
id: version
uses: ./.github/actions/generate-version
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
remote_name: ${{ inputs.remote_name }}
remote_url: ${{ inputs.remote_url }}
- name: Log into Conan remote
env:
REMOTE_NAME: ${{ inputs.remote_name }}
REMOTE_USERNAME: ${{ secrets.remote_username }}
REMOTE_PASSWORD: ${{ secrets.remote_password }}
run: conan remote login "${REMOTE_NAME}" "${REMOTE_USERNAME}" --password "${REMOTE_PASSWORD}"
- name: Upload Conan recipe
env:
REMOTE_NAME: ${{ inputs.remote_name }}
run: |
conan export . --version=${{ steps.version.outputs.version }}
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/${{ steps.version.outputs.version }}
outputs:
ref: xrpl/${{ steps.version.outputs.version }}

View File

@@ -86,8 +86,8 @@ jobs:
- name: Setup Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
remote_name: ${{ env.CONAN_REMOTE_NAME }}
remote_url: ${{ env.CONAN_REMOTE_URL }}
- name: Build dependencies
uses: ./.github/actions/build-deps

View File

@@ -8,7 +8,9 @@ if(POLICY CMP0077)
endif()
# Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows.
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
if(DEFINED CMAKE_MODULE_PATH)
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
endif()
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
project(xrpl)

View File

@@ -78,72 +78,61 @@ To report a qualifying bug, please send a detailed report to:
| Email Address | bugs@ripple.com |
| :-----------: | :-------------------------------------------------- |
| Short Key ID | `0xC57929BE` |
| Long Key ID | `0xCD49A0AFC57929BE` |
| Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
| Short Key ID | `0xA9F514E0` |
| Long Key ID | `0xD900855AA9F514E0` |
| Fingerprint | `B72C 0654 2F2A E250 2763 A268 D900 855A A9F5 14E0` |
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
kCpUYEDal0ygkKobu8SzOoATcDl18iCrScX39VpTm96vISFZMhmOryYCIp4QLJNN
4HKc2ZdBj6W4igNi6vj5Qo6JMyGpLY2mz4CZskbt0TNuUxWrGood+UrCzpY8x7/N
a93fcvNw+prgCr0rCH3hAPmAFfsOBbtGzNnmq7xf3jg5r4Z4sDiNIF1X1y53DAfV
rWDx49IKsuCEJfPMp1MnBSvDvLaQ2hKXs+cOpx1BCZgHn3skouEUxxgqbtTzBLt1
xXpmuijsaltWngPnGO7mOAzbpZSdBm82/Emrk9bPMuD0QaLQjWr7HkTSUs6ZsKt4
7CLPdWqxyY/QVw9UaxeHEtWGQGMIQGgVJGh1fjtUr5O1sC9z9jXcQ0HuIHnRCTls
GP7hklJmfH5V4SyAJQ06/hLuEhUJ7dn+BlqCsT0tLmYTgZYNzNcLHcqBFMEZHvHw
9GENMx/tDXgajKql4bJnzuTK0iGU/YepanANLd1JHECJ4jzTtmKOus9SOGlB2/l1
0t0ADDYAS3eqOdOcUvo9ElSLCI5vSVHhShSte/n2FMWU+kMUboTUisEG8CgQnrng
g2CvvQvqDkeOtZeqMcC7HdiZS0q3LJUWtwA/ViwxrVlBDCxiTUXCotyBWwARAQAB
tDBSaXBwbGUgTGFicyBCdWcgQm91bnR5IFByb2dyYW0gPGJ1Z3NAcmlwcGxlLmNv
bT6JAjcEEwEKACEFAlUwGHYCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
zUmgr8V5Kb6R0g//SwY/mVJY59k87iL26/KayauSoOcz7xjcST26l4ZHVVX85gOY
HYZl8k0+m8X3zxeYm9a3QAoAml8sfoaFRFQP8ynnefRrLUPaZ2MjbJ0SACMwZNef
T6o7Mi8LBAaiNZdYVyIfX1oM6YXtqYkuJdav6ZCyvVYqc9OvMJPY2ZzJYuI/ZtvQ
/lTndxCeg9ALNX/iezOLGdfMpf4HuIFVwcPPlwGi+HDlB9/bggDEHC8z434SXVFc
aQatXAPcDkjMUweU7y0CZtYEj00HITd4pSX6MqGiHrxlDZTqinCOPs1Ieqp7qufs
MzlM6irLGucxj1+wa16ieyYvEtGaPIsksUKkywx0O7cf8N2qKg+eIkUk6O0Uc6eO
CszizmiXIXy4O6OiLlVHGKkXHMSW9Nwe9GE95O8G9WR8OZCEuDv+mHPAutO+IjdP
PDAAUvy+3XnkceO+HGWRpVvJZfFP2YH4A33InFL5yqlJmSoR/yVingGLxk55bZDM
+HYGR3VeMb8Xj1rf/02qERsZyccMCFdAvKDbTwmvglyHdVLu5sPmktxbBYiemfyJ
qxMxmYXCc9S0hWrWZW7edktBa9NpE58z1mx+hRIrDNbS2sDHrib9PULYCySyVYcF
P+PWEe1CAS5jqkR2ker5td2/pHNnJIycynBEs7l6zbc9fu+nktFJz0q2B+GJAhwE
EAEKAAYFAlUwGaQACgkQ+tiY1qQ2QkjMFw//f2hNY3BPNe+1qbhzumMDCnbTnGif
kLuAGl9OKt81VHG1f6RnaGiLpR696+6Ja45KzH15cQ5JJl5Bgs1YkR/noTGX8IAD
c70eNwiFu8JXTaaeeJrsmFkF9Tueufb364risYkvPP8tNUD3InBFEZT3WN7JKwix
coD4/BwekUwOZVDd/uCFEyhlhZsROxdKNisNo3VtAq2s+3tIBAmTrriFUl0K+ZC5
zgavcpnPN57zMtW9aK+VO3wXqAKYLYmtgxkVzSLUZt2M7JuwOaAdyuYWAneKZPCu
1AXkmyo+d84sd5mZaKOr5xArAFiNMWPUcZL4rkS1Fq4dKtGAqzzR7a7hWtA5o27T
6vynuxZ1n0PPh0er2O/zF4znIjm5RhTlfjp/VmhZdQfpulFEQ/dMxxGkQ9z5IYbX
mTlSDbCSb+FMsanRBJ7Drp5EmBIudVGY6SHI5Re1RQiEh7GoDfUMUwZO+TVDII5R
Ra7WyuimYleJgDo/+7HyfuIyGDaUCVj6pwVtYtYIdOI3tTw1R1Mr0V8yaNVnJghL
CHcEJQL+YHSmiMM3ySil3O6tm1By6lFz8bVe/rgG/5uklQrnjMR37jYboi1orCC4
yeIoQeV0ItlxeTyBwYIV/o1DBNxDevTZvJabC93WiGLw2XFjpZ0q/9+zI2rJUZJh
qxmKP+D4e27lCI65Ag0EVTAYdgEQAMvttYNqeRNBRpSX8fk45WVIV8Fb21fWdwk6
2SkZnJURbiC0LxQnOi7wrtii7DeFZtwM2kFHihS1VHekBnIKKZQSgGoKuFAQMGyu
a426H4ZsSmA9Ufd7kRbvdtEcp7/RTAanhrSL4lkBhaKJrXlxBJ27o3nd7/rh7r3a
OszbPY6DJ5bWClX3KooPTDl/RF2lHn+fweFk58UvuunHIyo4BWJUdilSXIjLun+P
Qaik4ZAsZVwNhdNz05d+vtai4AwbYoO7adboMLRkYaXSQwGytkm+fM6r7OpXHYuS
cR4zB/OK5hxCVEpWfiwN71N2NMvnEMaWd/9uhqxJzyvYgkVUXV9274TUe16pzXnW
ZLfmitjwc91e7mJBBfKNenDdhaLEIlDRwKTLj7k58f9srpMnyZFacntu5pUMNblB
cjXwWxz5ZaQikLnKYhIvrIEwtWPyjqOzNXNvYfZamve/LJ8HmWGCKao3QHoAIDvB
9XBxrDyTJDpxbog6Qu4SY8AdgVlan6c/PsLDc7EUegeYiNTzsOK+eq3G5/E92eIu
TsUXlciypFcRm1q8vLRr+HYYe2mJDo4GetB1zLkAFBcYJm/x9iJQbu0hn5NxJvZO
R0Y5nOJQdyi+muJzKYwhkuzaOlswzqVXkq/7+QCjg7QsycdcwDjiQh3OrsgXHrwl
M7gyafL9ABEBAAGJAh8EGAEKAAkFAlUwGHYCGwwACgkQzUmgr8V5Kb50BxAAhj9T
TwmNrgRldTHszj+Qc+v8RWqV6j+R+zc0cn5XlUa6XFaXI1OFFg71H4dhCPEiYeN0
IrnocyMNvCol+eKIlPKbPTmoixjQ4udPTR1DC1Bx1MyW5FqOrsgBl5t0e1VwEViM
NspSStxu5Hsr6oWz2GD48lXZWJOgoL1RLs+uxjcyjySD/em2fOKASwchYmI+ezRv
plfhAFIMKTSCN2pgVTEOaaz13M0U+MoprThqF1LWzkGkkC7n/1V1f5tn83BWiagG
2N2Q4tHLfyouzMUKnX28kQ9sXfxwmYb2sA9FNIgxy+TdKU2ofLxivoWT8zS189z/
Yj9fErmiMjns2FzEDX+bipAw55X4D/RsaFgC+2x2PDbxeQh6JalRA2Wjq32Ouubx
u+I4QhEDJIcVwt9x6LPDuos1F+M5QW0AiUhKrZJ17UrxOtaquh/nPUL9T3l2qPUn
1ChrZEEEhHO6vA8+jn0+cV9n5xEz30Str9iHnDQ5QyR5LyV4UBPgTdWyQzNVKA69
KsSr9lbHEtQFRzGuBKwt6UlSFv9vPWWJkJit5XDKAlcKuGXj0J8OlltToocGElkF
+gEBZfoOWi/IBjRLrFW2cT3p36DTR5O1Ud/1DLnWRqgWNBLrbs2/KMKE6EnHttyD
7Tz8SQkuxltX/yBXMV3Ddy0t6nWV2SZEfuxJAQI=
=spg4
mQINBGkSZAQBEACprU199OhgdsOsygNjiQV4msuN3vDOUooehL+NwfsGfW79Tbqq
Q2u7uQ3NZjW+M2T4nsDwuhkr7pe7xSReR5W8ssaczvtUyxkvbMClilcgZ2OSCAuC
N9tzJsqOqkwBvXoNXkn//T2jnPz0ZU2wSF+NrEibq5FeuyGdoX3yXXBxq9pW9HzK
HkQll63QSl6BzVSGRQq+B6lGgaZGLwf3mzmIND9Z5VGLNK2jKynyz9z091whNG/M
kV+E7/r/bujHk7WIVId07G5/COTXmSr7kFnNEkd2Umw42dkgfiNKvlmJ9M7c1wLK
KbL9Eb4ADuW6rRc5k4s1e6GT8R4/VPliWbCl9SE32hXH8uTkqVIFZP2eyM5WRRHs
aKzitkQG9UK9gcb0kdgUkxOvvgPHAe5IuZlcHFzU4y0dBbU1VEFWVpiLU0q+IuNw
5BRemeHc59YNsngkmAZ+/9zouoShRusZmC8Wzotv75C2qVBcjijPvmjWAUz0Zunm
Lsr+O71vqHE73pERjD07wuD/ISjiYRYYE/bVrXtXLZijC7qAH4RE3nID+2ojcZyO
/2jMQvt7un56RsGH4UBHi3aBHi9bUoDGCXKiQY981cEuNaOxpou7Mh3x/ONzzSvk
sTV6nl1LOZHykN1JyKwaNbTSAiuyoN+7lOBqbV04DNYAHL88PrT21P83aQARAQAB
tB1SaXBwbGUgTGFicyA8YnVnc0ByaXBwbGUuY29tPokCTgQTAQgAOBYhBLcsBlQv
KuJQJ2OiaNkAhVqp9RTgBQJpEmQEAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA
AAoJENkAhVqp9RTgBzgP/i7y+aDWl1maig1XMdyb+o0UGusumFSW4Hmj278wlKVv
usgLPihYgHE0PKrv6WRyKOMC1tQEcYYN93M+OeQ1vFhS2YyURq6RCMmh4zq/awXG
uZbG36OURB5NH8lGBOHiN/7O+nY0CgenBT2JWm+GW3nEOAVOVm4+r5GlpPlv+Dp1
NPBThcKXFMnH73++NpSQoDzTfRYHPxhDAX3jkLi/moXfSanOLlR6l94XNNN0jBHW
Quao0rzf4WSXq9g6AS224xhAA5JyIcFl8TX7hzj5HaFn3VWo3COoDu4U7H+BM0fl
85yqiMQypp7EhN2gxpMMWaHY5TFM85U/bFXFYfEgihZ4/gt4uoIzsNI9jlX7mYvG
KFdDij+oTlRsuOxdIy60B3dKcwOH9nZZCz0SPsN/zlRWgKzK4gDKdGhFkU9OlvPu
94ZqscanoiWKDoZkF96+sjgfjkuHsDK7Lwc1Xi+T4drHG/3aVpkYabXox+lrKB/S
yxZjeqOIQzWPhnLgCaLyvsKo5hxKzL0w3eURu8F3IS7RgOOlljv4M+Me9sEVcdNV
aN3/tQwbaomSX1X5D5YXqhBwC3rU3wXwamsscRTGEpkV+JCX6KUqGP7nWmxCpAly
FL05XuOd5SVHJjXLeuje0JqLUpN514uL+bThWwDbDTdAdwW3oK/2WbXz7IfJRLBj
uQINBGkSZAQBEADdI3SL2F72qkrgFqXWE6HSRBu9bsAvTE5QrRPWk7ux6at537r4
S4sIw2dOwLvbyIrDgKNq3LQ5wCK88NO/NeCOFm4AiCJSl3pJHXYnTDoUxTrrxx+o
vSRI4I3fHEql/MqzgiAb0YUezjgFdh3vYheMPp/309PFbOLhiFqEcx80Mx5h06UH
gDzu1qNj3Ec+31NLic5zwkrAkvFvD54d6bqYR3SEgMau6aYEewpGHbWBi2pLqSi2
lQcAeOFixqGpTwDmAnYR8YtjBYepy0MojEAdTHcQQlOYSDk4q4elG+io2N8vECfU
rD6ORecN48GXdZINYWTAdslrUeanmBdgQrYkSpce8TSghgT9P01SNaXxmyaehVUO
lqI4pcg5G2oojAE8ncNS3TwDtt7daTaTC3bAdr4PXDVAzNAiewjMNZPB7xidkDGQ
Y4W1LxTMXyJVWxehYOH7tsbBRKninlfRnLgYzmtIbNRAAvNcsxU6ihv3AV0WFknN
YbSzotEv1Xq/5wk309x8zCDe+sP0cQicvbXafXmUzPAZzeqFg+VLFn7F9MP1WGlW
B1u7VIvBF1Mp9Nd3EAGBAoLRdRu+0dVWIjPTQuPIuD9cCatJA0wVaKUrjYbBMl88
a12LixNVGeSFS9N7ADHx0/o7GNT6l88YbaLP6zggUHpUD/bR+cDN7vllIQARAQAB
iQI2BBgBCAAgFiEEtywGVC8q4lAnY6Jo2QCFWqn1FOAFAmkSZAQCGwwACgkQ2QCF
Wqn1FOAfAA/8CYq4p0p4bobY20CKEMsZrkBTFJyPDqzFwMeTjgpzqbD7Y3Qq5QCK
OBbvY02GWdiIsNOzKdBxiuam2xYP9WHZj4y7/uWEvT0qlPVmDFu+HXjoJ43oxwFd
CUp2gMuQ4cSL3X94VRJ3BkVL+tgBm8CNY0vnTLLOO3kum/R69VsGJS1JSGUWjNM+
4qwS3mz+73xJu1HmERyN2RZF/DGIZI2PyONQQ6aH85G1Dd2ohu2/DBAkQAMBrPbj
FrbDaBLyFhODxU3kTWqnfLlaElSm2EGdIU2yx7n4BggEa//NZRMm5kyeo4vzhtlQ
YIVUMLAOLZvnEqDnsLKp+22FzNR/O+htBQC4lPywl53oYSALdhz1IQlcAC1ru5KR
XPzhIXV6IIzkcx9xNkEclZxmsuy5ERXyKEmLbIHAlzFmnrldlt2ZgXDtzaorLmxj
klKibxd5tF50qOpOivz+oPtFo7n+HmFa1nlVAMxlDCUdM0pEVeYDKI5zfVwalyhZ
NnjpakdZSXMwgc7NP/hH9buF35hKDp7EckT2y3JNYwHsDdy1icXN2q40XZw5tSIn
zkPWdu3OUY8PISohN6Pw4h0RH4ZmoX97E8sEfmdKaT58U4Hf2aAv5r9IWCSrAVqY
u5jvac29CzQR9Kal0A+8phHAXHNFD83SwzIC0syaT9ficAguwGH8X6Q=
=nGuD
-----END PGP PUBLIC KEY BLOCK-----
```

View File

@@ -13,6 +13,7 @@ include_guard(GLOBAL)
set(is_clang FALSE)
set(is_gcc FALSE)
set(is_msvc FALSE)
set(is_xcode FALSE)
if(CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") # Clang or AppleClang
set(is_clang TRUE)
@@ -24,6 +25,11 @@ else()
message(FATAL_ERROR "Unsupported C++ compiler: ${CMAKE_CXX_COMPILER_ID}")
endif()
# Xcode generator detection
if(CMAKE_GENERATOR STREQUAL "Xcode")
set(is_xcode TRUE)
endif()
# --------------------------------------------------------------------
# Operating system detection

View File

@@ -32,14 +32,14 @@ target_protobuf_sources(xrpl.libpb xrpl/proto
target_compile_options(xrpl.libpb
PUBLIC
$<$<BOOL:${MSVC}>:-wd4996>
$<$<BOOL:${XCODE}>:
$<$<BOOL:${is_msvc}>:-wd4996>
$<$<BOOL:${is_xcode}>:
--system-header-prefix="google/protobuf"
-Wno-deprecated-dynamic-exception-spec
>
PRIVATE
$<$<BOOL:${MSVC}>:-wd4065>
$<$<NOT:$<BOOL:${MSVC}>>:-Wno-deprecated-declarations>
$<$<BOOL:${is_msvc}>:-wd4065>
$<$<NOT:$<BOOL:${is_msvc}>>:-Wno-deprecated-declarations>
)
target_link_libraries(xrpl.libpb

View File

@@ -4,6 +4,12 @@
include(create_symbolic_link)
# If no suffix is defined for executables (e.g. Windows uses .exe but Linux
# and macOS use none), then explicitly set it to the empty string.
if(NOT DEFINED suffix)
set(suffix "")
endif()
install (
TARGETS
common

View File

@@ -4,6 +4,11 @@
include(CompilationEnv)
# Set defaults for optional variables to avoid uninitialized variable warnings
if(NOT DEFINED voidstar)
set(voidstar OFF)
endif()
add_library (opts INTERFACE)
add_library (Xrpl::opts ALIAS opts)
target_compile_definitions (opts
@@ -52,7 +57,7 @@ add_library (xrpl_syslibs INTERFACE)
add_library (Xrpl::syslibs ALIAS xrpl_syslibs)
target_link_libraries (xrpl_syslibs
INTERFACE
$<$<BOOL:${MSVC}>:
$<$<BOOL:${is_msvc}>:
legacy_stdio_definitions.lib
Shlwapi
kernel32
@@ -69,10 +74,10 @@ target_link_libraries (xrpl_syslibs
odbccp32
crypt32
>
$<$<NOT:$<BOOL:${MSVC}>>:dl>
$<$<NOT:$<OR:$<BOOL:${MSVC}>,$<BOOL:${APPLE}>>>:rt>)
$<$<NOT:$<BOOL:${is_msvc}>>:dl>
$<$<NOT:$<OR:$<BOOL:${is_msvc}>,$<BOOL:${is_macos}>>>:rt>)
if (NOT MSVC)
if (NOT is_msvc)
set (THREADS_PREFER_PTHREAD_FLAG ON)
find_package (Threads)
target_link_libraries (xrpl_syslibs INTERFACE Threads::Threads)

View File

@@ -43,7 +43,10 @@
include(CompilationEnv)
# Read environment variable
set(SANITIZERS $ENV{SANITIZERS})
set(SANITIZERS "")
if(DEFINED ENV{SANITIZERS})
set(SANITIZERS "$ENV{SANITIZERS}")
endif()
# Set SANITIZERS_ENABLED flag for use in other modules
if(SANITIZERS MATCHES "address|thread|undefinedbehavior")

View File

@@ -4,10 +4,11 @@
include(CompilationEnv)
if("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
set(is_ci TRUE)
else()
set(is_ci FALSE)
set(is_ci FALSE)
if(DEFINED ENV{CI})
if("$ENV{CI}" STREQUAL "true")
set(is_ci TRUE)
endif()
endif()
get_directory_property(has_parent PARENT_DIRECTORY)

View File

@@ -4,6 +4,7 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <ostream>

View File

@@ -1,4 +1,5 @@
#include <xrpl/beast/core/CurrentThreadName.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <string>
#include <string_view>
@@ -95,6 +96,11 @@ setCurrentThreadNameImpl(std::string_view name)
std::cerr << "WARNING: Thread name \"" << name << "\" (length "
<< name.size() << ") exceeds maximum of "
<< maxThreadNameLength << " characters on Linux.\n";
XRPL_ASSERT(
false,
"beast::detail::setCurrentThreadNameImpl : Thread name exceeds "
"maximum length for Linux");
}
#endif
}

View File

@@ -41,7 +41,7 @@ Job::queue_time() const
void
Job::doJob()
{
beast::setCurrentThreadName("doJob: " + mName);
beast::setCurrentThreadName("j:" + mName);
m_loadEvent->start();
m_loadEvent->setName(mName);

View File

@@ -88,20 +88,15 @@ public:
BEAST_EXPECT(stateB == 2);
}
#if BOOST_OS_LINUX
// On Linux, verify that thread names longer than 15 characters
// are truncated to 15 characters (the 16th character is reserved for
// the null terminator).
// On Linux, verify that thread names within the 15 character limit
// are set correctly (the 16th character is reserved for the null
// terminator).
{
testName(
"123456789012345",
"123456789012345"); // 15 chars, no truncation
testName(
"1234567890123456", "123456789012345"); // 16 chars, truncated
testName(
"ThisIsAVeryLongThreadNameExceedingLimit",
"ThisIsAVeryLong"); // 39 chars, truncated
"123456789012345"); // 15 chars, maximum allowed
testName("", ""); // empty name
testName("short", "short"); // short name, no truncation
testName("short", "short"); // short name
}
#endif
}

View File

@@ -56,7 +56,7 @@ public:
gate g1, g2;
std::shared_ptr<JobQueue::Coro> c;
env.app().getJobQueue().postCoro(
jtCLIENT, "Coroutine-Test", [&](auto const& cr) {
jtCLIENT, "CoroTest", [&](auto const& cr) {
c = cr;
g1.signal();
c->yield();
@@ -83,7 +83,7 @@ public:
gate g;
env.app().getJobQueue().postCoro(
jtCLIENT, "Coroutine-Test", [&](auto const& c) {
jtCLIENT, "CoroTest", [&](auto const& c) {
c->post();
c->yield();
g.signal();
@@ -109,7 +109,7 @@ public:
BEAST_EXPECT(*lv == -1);
gate g;
jq.addJob(jtCLIENT, "LocalValue-Test", [&]() {
jq.addJob(jtCLIENT, "LocalValTest", [&]() {
this->BEAST_EXPECT(*lv == -1);
*lv = -2;
this->BEAST_EXPECT(*lv == -2);
@@ -120,7 +120,7 @@ public:
for (int i = 0; i < N; ++i)
{
jq.postCoro(jtCLIENT, "Coroutine-Test", [&, id = i](auto const& c) {
jq.postCoro(jtCLIENT, "CoroTest", [&, id = i](auto const& c) {
a[id] = c;
g.signal();
c->yield();
@@ -148,7 +148,7 @@ public:
c->join();
}
jq.addJob(jtCLIENT, "LocalValue-Test", [&]() {
jq.addJob(jtCLIENT, "LocalValTest", [&]() {
this->BEAST_EXPECT(*lv == -2);
g.signal();
});

View File

@@ -119,9 +119,7 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
acquiringLedger_ = hash;
app_.getJobQueue().addJob(
jtADVANCE,
"getConsensusLedger1",
[id = hash, &app = app_, this]() {
jtADVANCE, "GetConsL1", [id = hash, &app = app_, this]() {
JLOG(j_.debug())
<< "JOB advanceLedger getConsensusLedger1 started";
app.getInboundLedgers().acquireAsync(
@@ -420,7 +418,7 @@ RCLConsensus::Adaptor::onAccept(
{
app_.getJobQueue().addJob(
jtACCEPT,
"acceptLedger",
"AcceptLedger",
[=, this, cj = std::move(consensusJson)]() mutable {
// Note that no lock is held or acquired during this job.
// This is because generic Consensus guarantees that once a ledger

View File

@@ -122,13 +122,11 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
Application* pApp = &app_;
app_.getJobQueue().addJob(
jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() {
JLOG(j_.debug())
<< "JOB advanceLedger getConsensusLedger2 started";
pApp->getInboundLedgers().acquireAsync(
hash, 0, InboundLedger::Reason::CONSENSUS);
});
app_.getJobQueue().addJob(jtADVANCE, "GetConsL2", [pApp, hash, this]() {
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger2 started";
pApp->getInboundLedgers().acquireAsync(
hash, 0, InboundLedger::Reason::CONSENSUS);
});
return std::nullopt;
}

View File

@@ -46,7 +46,7 @@ ConsensusTransSetSF::gotNode(
"xrpl::ConsensusTransSetSF::gotNode : transaction hash "
"match");
auto const pap = &app_;
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
app_.getJobQueue().addJob(jtTRANSACTION, "TxsToTxn", [pap, stx]() {
pap->getOPs().submitTransaction(stx);
});
}

View File

@@ -48,9 +48,9 @@ OrderBookDB::setup(std::shared_ptr<ReadView const> const& ledger)
update(ledger);
else
app_.getJobQueue().addJob(
jtUPDATE_PF,
"OrderBookDB::update: " + std::to_string(ledger->seq()),
[this, ledger]() { update(ledger); });
jtUPDATE_PF, "OrderBookUpd", [this, ledger]() {
update(ledger);
});
}
}

View File

@@ -454,7 +454,7 @@ InboundLedger::done()
// We hold the PeerSet lock, so must dispatch
app_.getJobQueue().addJob(
jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
jtLEDGER_DATA, "AcqDone", [self = shared_from_this()]() {
if (self->complete_ && !self->failed_)
{
self->app_.getLedgerMaster().checkAccept(self->getLedger());

View File

@@ -192,7 +192,7 @@ public:
// dispatch
if (ledger->gotData(std::weak_ptr<Peer>(peer), packet))
app_.getJobQueue().addJob(
jtLEDGER_DATA, "processLedgerData", [ledger]() {
jtLEDGER_DATA, "ProcessLData", [ledger]() {
ledger->runData();
});
@@ -207,7 +207,7 @@ public:
if (packet->type() == protocol::liAS_NODE)
{
app_.getJobQueue().addJob(
jtLEDGER_DATA, "gotStaleData", [this, packet]() {
jtLEDGER_DATA, "GotStaleData", [this, packet]() {
gotStaleData(packet);
});
}

View File

@@ -21,7 +21,7 @@ LedgerDeltaAcquire::LedgerDeltaAcquire(
ledgerHash,
LedgerReplayParameters::SUB_TASK_TIMEOUT,
{jtREPLAY_TASK,
"LedgerReplayDelta",
"LedReplDelta",
LedgerReplayParameters::MAX_QUEUED_TASKS},
app.journal("LedgerReplayDelta"))
, inboundLedgers_(inboundLedgers)
@@ -225,7 +225,7 @@ LedgerDeltaAcquire::onLedgerBuilt(
}
app_.getJobQueue().addJob(
jtREPLAY_TASK,
"onLedgerBuilt",
"OnLedBuilt",
[=, ledger = this->fullLedger_, &app = this->app_]() {
for (auto reason : reasons)
{

View File

@@ -1344,7 +1344,7 @@ LedgerMaster::tryAdvance()
if (!mAdvanceThread && !mValidLedger.empty())
{
mAdvanceThread = true;
app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
app_.getJobQueue().addJob(jtADVANCE, "AdvanceLedger", [this]() {
std::unique_lock sl(m_mutex);
XRPL_ASSERT(
@@ -1482,7 +1482,7 @@ bool
LedgerMaster::newPathRequest()
{
std::unique_lock ml(m_mutex);
mPathFindNewRequest = newPFWork("pf:newRequest", ml);
mPathFindNewRequest = newPFWork("PthFindNewReq", ml);
return mPathFindNewRequest;
}
@@ -1503,7 +1503,7 @@ LedgerMaster::newOrderBookDB()
std::unique_lock ml(m_mutex);
mPathLedger.reset();
return newPFWork("pf:newOBDB", ml);
return newPFWork("PthFindOBDB", ml);
}
/** A thread needs to be dispatched to handle pathfinding work of some kind.
@@ -1841,7 +1841,7 @@ LedgerMaster::fetchForHistory(
mFillInProgress = seq;
}
app_.getJobQueue().addJob(
jtADVANCE, "tryFill", [this, ledger]() {
jtADVANCE, "TryFill", [this, ledger]() {
tryFill(ledger);
});
}
@@ -1980,7 +1980,7 @@ LedgerMaster::doAdvance(std::unique_lock<std::recursive_mutex>& sl)
}
app_.getOPs().clearNeedNetworkLedger();
progress = newPFWork("pf:newLedger", sl);
progress = newPFWork("PthFindNewLed", sl);
}
if (progress)
mAdvanceWork = true;
@@ -2011,7 +2011,7 @@ LedgerMaster::gotFetchPack(bool progress, std::uint32_t seq)
{
if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
{
app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
app_.getJobQueue().addJob(jtLEDGER_DATA, "GotFetchPack", [&]() {
app_.getInboundLedgers().gotFetchPack();
mGotFetchPackThread.clear(std::memory_order_release);
});

View File

@@ -77,7 +77,7 @@ LedgerReplayTask::LedgerReplayTask(
parameter.finishHash_,
LedgerReplayParameters::TASK_TIMEOUT,
{jtREPLAY_TASK,
"LedgerReplayTask",
"LedReplTask",
LedgerReplayParameters::MAX_QUEUED_TASKS},
app.journal("LedgerReplayTask"))
, inboundLedgers_(inboundLedgers)

View File

@@ -16,7 +16,7 @@ SkipListAcquire::SkipListAcquire(
ledgerHash,
LedgerReplayParameters::SUB_TASK_TIMEOUT,
{jtREPLAY_TASK,
"SkipListAcquire",
"SkipListAcq",
LedgerReplayParameters::MAX_QUEUED_TASKS},
app.journal("LedgerReplaySkipList"))
, inboundLedgers_(inboundLedgers)

View File

@@ -27,7 +27,7 @@ TransactionAcquire::TransactionAcquire(
app,
hash,
TX_ACQUIRE_TIMEOUT,
{jtTXN_DATA, "TransactionAcquire", {}},
{jtTXN_DATA, "TxAcq", {}},
app.journal("TransactionAcquire"))
, mHaveRoot(false)
, mPeerSet(std::move(peerSet))
@@ -60,7 +60,7 @@ TransactionAcquire::done()
// just updates the consensus and related structures when we acquire
// a transaction set. No need to update them if we're shutting down.
app_.getJobQueue().addJob(
jtTXN_DATA, "completeAcquire", [pap, hash, map]() {
jtTXN_DATA, "ComplAcquire", [pap, hash, map]() {
pap->getInboundTransactions().giveSet(hash, map, true);
});
}

View File

@@ -331,8 +331,7 @@ run(int argc, char** argv)
{
using namespace std;
beast::setCurrentThreadName(
"rippled: main " + BuildInfo::getVersionString());
beast::setCurrentThreadName("main");
po::variables_map vm;

View File

@@ -12,9 +12,8 @@ NodeStoreScheduler::scheduleTask(NodeStore::Task& task)
if (jobQueue_.isStopped())
return;
if (!jobQueue_.addJob(jtWRITE, "NodeObject::store", [&task]() {
task.performScheduledTask();
}))
if (!jobQueue_.addJob(
jtWRITE, "NObjStore", [&task]() { task.performScheduledTask(); }))
{
// Job not added, presumably because we're shutting down.
// Recover by executing the task synchronously.

View File

@@ -981,7 +981,7 @@ NetworkOPsImp::setHeartbeatTimer()
heartbeatTimer_,
mConsensus.parms().ledgerGRANULARITY,
[this]() {
m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
m_job_queue.addJob(jtNETOP_TIMER, "NetHeart", [this]() {
processHeartbeatTimer();
});
},
@@ -997,7 +997,7 @@ NetworkOPsImp::setClusterTimer()
clusterTimer_,
10s,
[this]() {
m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
m_job_queue.addJob(jtNETOP_CLUSTER, "NetCluster", [this]() {
processClusterTimer();
});
},
@@ -1225,7 +1225,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
auto tx = std::make_shared<Transaction>(trans, reason, app_);
m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
m_job_queue.addJob(jtTRANSACTION, "SubmitTxn", [this, tx]() {
auto t = tx;
processTransaction(t, false, false, FailHard::no);
});
@@ -1323,7 +1323,7 @@ NetworkOPsImp::doTransactionAsync(
if (mDispatchState == DispatchState::none)
{
if (m_job_queue.addJob(
jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
jtBATCH, "TxBatchAsync", [this]() { transactionBatch(); }))
{
mDispatchState = DispatchState::scheduled;
}
@@ -1370,7 +1370,7 @@ NetworkOPsImp::doTransactionSyncBatch(
if (mTransactions.size())
{
// More transactions need to be applied, but by another job.
if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
if (m_job_queue.addJob(jtBATCH, "TxBatchSync", [this]() {
transactionBatch();
}))
{
@@ -3208,19 +3208,16 @@ NetworkOPsImp::reportFeeChange()
if (f != mLastFeeSummary)
{
m_job_queue.addJob(
jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
pubServer();
});
jtCLIENT_FEE_CHANGE, "PubFee", [this]() { pubServer(); });
}
}
void
NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase)
{
m_job_queue.addJob(
jtCLIENT_CONSENSUS,
"reportConsensusStateChange->pubConsensus",
[this, phase]() { pubConsensus(phase); });
m_job_queue.addJob(jtCLIENT_CONSENSUS, "PubCons", [this, phase]() {
pubConsensus(phase);
});
}
inline void
@@ -3728,7 +3725,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
app_.getJobQueue().addJob(
jtCLIENT_ACCT_HIST,
"AccountHistoryTxStream",
"HistTxStream",
[this, dbType = databaseType, subInfo]() {
auto const& accountId = subInfo.index_->accountId_;
auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;

View File

@@ -1158,7 +1158,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMManifests> const& m)
fee_.update(Resource::feeModerateBurdenPeer, "oversize");
app_.getJobQueue().addJob(
jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
jtMANIFEST, "RcvManifests", [this, that = shared_from_this(), m]() {
overlay_.onManifests(m, that);
});
}
@@ -1452,7 +1452,7 @@ PeerImp::handleTransaction(
{
app_.getJobQueue().addJob(
jtTRANSACTION,
"recvTransaction->checkTransaction",
"RcvCheckTx",
[weak = std::weak_ptr<PeerImp>(shared_from_this()),
flags,
checkSignature,
@@ -1555,7 +1555,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
// Queue a job to process the request
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
app_.getJobQueue().addJob(jtLEDGER_REQ, "RcvGetLedger", [weak, m]() {
if (auto peer = weak.lock())
peer->processLedgerRequest(m);
});
@@ -1575,29 +1575,27 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProofPathRequest> const& m)
fee_.update(
Resource::feeModerateBurdenPeer, "received a proof path request");
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
if (auto peer = weak.lock())
app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvProofPReq", [weak, m]() {
if (auto peer = weak.lock())
{
auto reply =
peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
if (reply.has_error())
{
auto reply =
peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
if (reply.has_error())
{
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
peer->charge(
Resource::feeMalformedRequest,
"proof_path_request");
else
peer->charge(
Resource::feeRequestNoReply, "proof_path_request");
}
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
peer->charge(
Resource::feeMalformedRequest, "proof_path_request");
else
{
peer->send(std::make_shared<Message>(
reply, protocol::mtPROOF_PATH_RESPONSE));
}
peer->charge(
Resource::feeRequestNoReply, "proof_path_request");
}
});
else
{
peer->send(std::make_shared<Message>(
reply, protocol::mtPROOF_PATH_RESPONSE));
}
}
});
}
void
@@ -1629,30 +1627,27 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMReplayDeltaRequest> const& m)
fee_.fee = Resource::feeModerateBurdenPeer;
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
if (auto peer = weak.lock())
app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvReplDReq", [weak, m]() {
if (auto peer = weak.lock())
{
auto reply =
peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
if (reply.has_error())
{
auto reply =
peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
if (reply.has_error())
{
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
peer->charge(
Resource::feeMalformedRequest,
"replay_delta_request");
else
peer->charge(
Resource::feeRequestNoReply,
"replay_delta_request");
}
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
peer->charge(
Resource::feeMalformedRequest, "replay_delta_request");
else
{
peer->send(std::make_shared<Message>(
reply, protocol::mtREPLAY_DELTA_RESPONSE));
}
peer->charge(
Resource::feeRequestNoReply, "replay_delta_request");
}
});
else
{
peer->send(std::make_shared<Message>(
reply, protocol::mtREPLAY_DELTA_RESPONSE));
}
}
});
}
void
@@ -1748,7 +1743,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
{
std::weak_ptr<PeerImp> weak{shared_from_this()};
app_.getJobQueue().addJob(
jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
jtTXN_DATA, "RcvPeerData", [weak, ledgerHash, m]() {
if (auto peer = weak.lock())
{
peer->app_.getInboundTransactions().gotData(
@@ -1876,7 +1871,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
"recvPropose->checkPropose",
"checkPropose",
[weak, isTrusted, m, proposal]() {
if (auto peer = weak.lock())
peer->checkPropose(isTrusted, m, proposal);
@@ -2490,18 +2485,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
}
else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
{
std::string const name = [isTrusted, val]() {
std::string ret =
isTrusted ? "Trusted validation" : "Untrusted validation";
#ifdef DEBUG
ret += " " +
std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
to_string(val->getNodeID());
#endif
return ret;
}();
std::string const name = isTrusted ? "ChkTrust" : "ChkUntrust";
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
@@ -2561,11 +2545,10 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
}
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
jtREQUESTED_TXN, "doTransactions", [weak, m]() {
if (auto peer = weak.lock())
peer->doTransactions(m);
});
app_.getJobQueue().addJob(jtREQUESTED_TXN, "DoTxs", [weak, m]() {
if (auto peer = weak.lock())
peer->doTransactions(m);
});
return;
}
@@ -2705,11 +2688,10 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMHaveTransactions> const& m)
}
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
if (auto peer = weak.lock())
peer->handleHaveTransactions(m);
});
app_.getJobQueue().addJob(jtMISSING_TXN, "HandleHaveTxs", [weak, m]() {
if (auto peer = weak.lock())
peer->handleHaveTransactions(m);
});
}
void

View File

@@ -72,7 +72,7 @@ public:
JLOG(j_.info()) << "RPCCall::fromNetwork start";
mSending = m_jobQueue.addJob(
jtCLIENT_SUBSCRIBE, "RPCSub::sendThread", [this]() {
jtCLIENT_SUBSCRIBE, "RPCSubSendThr", [this]() {
sendThread();
});
}