mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Merge branch 'develop' into ripple/smart-escrow
This commit is contained in:
34
.github/workflows/build-test.yml
vendored
34
.github/workflows/build-test.yml
vendored
@@ -101,6 +101,7 @@ jobs:
|
|||||||
echo 'CMake arguments: ${{ matrix.cmake_args }}'
|
echo 'CMake arguments: ${{ matrix.cmake_args }}'
|
||||||
echo 'CMake target: ${{ matrix.cmake_target }}'
|
echo 'CMake target: ${{ matrix.cmake_target }}'
|
||||||
echo 'Config name: ${{ matrix.config_name }}'
|
echo 'Config name: ${{ matrix.config_name }}'
|
||||||
|
|
||||||
- name: Clean workspace (MacOS)
|
- name: Clean workspace (MacOS)
|
||||||
if: ${{ inputs.os == 'macos' }}
|
if: ${{ inputs.os == 'macos' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -111,18 +112,12 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
|
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||||
- name: Set up Python (Windows)
|
- name: Prepare runner
|
||||||
if: ${{ inputs.os == 'windows' }}
|
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
||||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
|
||||||
with:
|
|
||||||
python-version: 3.13
|
|
||||||
- name: Install build tools (Windows)
|
|
||||||
if: ${{ inputs.os == 'windows' }}
|
|
||||||
run: |
|
|
||||||
echo 'Installing build tools.'
|
|
||||||
pip install wheel conan
|
|
||||||
- name: Check configuration (Windows)
|
- name: Check configuration (Windows)
|
||||||
if: ${{ inputs.os == 'windows' }}
|
if: ${{ inputs.os == 'windows' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -134,11 +129,6 @@ jobs:
|
|||||||
|
|
||||||
echo 'Checking Conan version.'
|
echo 'Checking Conan version.'
|
||||||
conan --version
|
conan --version
|
||||||
- name: Install build tools (MacOS)
|
|
||||||
if: ${{ inputs.os == 'macos' }}
|
|
||||||
run: |
|
|
||||||
echo 'Installing build tools.'
|
|
||||||
brew install --quiet cmake conan ninja coreutils
|
|
||||||
- name: Check configuration (Linux and MacOS)
|
- name: Check configuration (Linux and MacOS)
|
||||||
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
|
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -162,18 +152,7 @@ jobs:
|
|||||||
|
|
||||||
echo 'Checking nproc version.'
|
echo 'Checking nproc version.'
|
||||||
nproc --version
|
nproc --version
|
||||||
- name: Set up Conan home directory (MacOS)
|
|
||||||
if: ${{ inputs.os == 'macos' }}
|
|
||||||
run: |
|
|
||||||
echo 'Setting up Conan home directory.'
|
|
||||||
export CONAN_HOME=${{ github.workspace }}/.conan
|
|
||||||
mkdir -p ${CONAN_HOME}
|
|
||||||
- name: Set up Conan home directory (Windows)
|
|
||||||
if: ${{ inputs.os == 'windows' }}
|
|
||||||
run: |
|
|
||||||
echo 'Setting up Conan home directory.'
|
|
||||||
set CONAN_HOME=${{ github.workspace }}\.conan
|
|
||||||
mkdir -p %CONAN_HOME%
|
|
||||||
- name: Set up Conan configuration
|
- name: Set up Conan configuration
|
||||||
run: |
|
run: |
|
||||||
echo 'Installing configuration.'
|
echo 'Installing configuration.'
|
||||||
@@ -196,6 +175,7 @@ jobs:
|
|||||||
|
|
||||||
echo 'Listing Conan remotes.'
|
echo 'Listing Conan remotes.'
|
||||||
conan remote list
|
conan remote list
|
||||||
|
|
||||||
- name: Build dependencies
|
- name: Build dependencies
|
||||||
uses: ./.github/actions/build-deps
|
uses: ./.github/actions/build-deps
|
||||||
with:
|
with:
|
||||||
|
|||||||
70
.github/workflows/on-pr.yml
vendored
70
.github/workflows/on-pr.yml
vendored
@@ -28,30 +28,26 @@ env:
|
|||||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# This job determines whether the workflow should run. It runs when the PR is
|
# This job determines whether the rest of the workflow should run. It runs
|
||||||
# not a draft or has the 'DraftRunCI' label.
|
# when the PR is not a draft (which should also cover merge-group) or
|
||||||
|
# has the 'DraftRunCI' label.
|
||||||
should-run:
|
should-run:
|
||||||
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
|
||||||
- name: No-op
|
|
||||||
run: true
|
|
||||||
|
|
||||||
# This job checks whether any files have changed that should cause the next
|
|
||||||
# jobs to run. We do it this way rather than using `paths` in the `on:`
|
|
||||||
# section, because all required checks must pass, even for changes that do not
|
|
||||||
# modify anything that affects those checks. We would therefore like to make
|
|
||||||
# the checks required only if the job runs, but GitHub does not support that
|
|
||||||
# directly. By always executing the workflow on new commits and by using the
|
|
||||||
# changed-files action below, we ensure that Github considers any skipped jobs
|
|
||||||
# to have passed, and in turn the required checks as well.
|
|
||||||
any-changed:
|
|
||||||
needs: should-run
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||||
- name: Determine changed files
|
- name: Determine changed files
|
||||||
|
# This step checks whether any files have changed that should
|
||||||
|
# cause the next jobs to run. We do it this way rather than
|
||||||
|
# using `paths` in the `on:` section, because all required
|
||||||
|
# checks must pass, even for changes that do not modify anything
|
||||||
|
# that affects those checks. We would therefore like to make the
|
||||||
|
# checks required only if the job runs, but GitHub does not
|
||||||
|
# support that directly. By always executing the workflow on new
|
||||||
|
# commits and by using the changed-files action below, we ensure
|
||||||
|
# that Github considers any skipped jobs to have passed, and in
|
||||||
|
# turn the required checks as well.
|
||||||
id: changes
|
id: changes
|
||||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||||
with:
|
with:
|
||||||
@@ -79,24 +75,40 @@ jobs:
|
|||||||
tests/**
|
tests/**
|
||||||
CMakeLists.txt
|
CMakeLists.txt
|
||||||
conanfile.py
|
conanfile.py
|
||||||
|
- name: Check whether to run
|
||||||
|
# This step determines whether the rest of the workflow should
|
||||||
|
# run. The rest of the workflow will run if this job runs AND at
|
||||||
|
# least one of:
|
||||||
|
# * Any of the files checked in the `changes` step were modified
|
||||||
|
# * The PR is NOT a draft and is labeled "Ready to merge"
|
||||||
|
# * The workflow is running from the merge queue
|
||||||
|
id: go
|
||||||
|
env:
|
||||||
|
FILES: ${{ steps.changes.outputs.any_changed }}
|
||||||
|
DRAFT: ${{ github.event.pull_request.draft }}
|
||||||
|
READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }}
|
||||||
|
MERGE: ${{ github.event_name == 'merge_group' }}
|
||||||
|
run: |
|
||||||
|
echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}"
|
||||||
|
cat "${GITHUB_OUTPUT}"
|
||||||
outputs:
|
outputs:
|
||||||
changed: ${{ steps.changes.outputs.any_changed }}
|
go: ${{ steps.go.outputs.go == 'true' }}
|
||||||
|
|
||||||
check-format:
|
check-format:
|
||||||
needs: any-changed
|
needs: should-run
|
||||||
if: needs.any-changed.outputs.changed == 'true'
|
if: needs.should-run.outputs.go == 'true'
|
||||||
uses: ./.github/workflows/check-format.yml
|
uses: ./.github/workflows/check-format.yml
|
||||||
|
|
||||||
check-levelization:
|
check-levelization:
|
||||||
needs: any-changed
|
needs: should-run
|
||||||
if: needs.any-changed.outputs.changed == 'true'
|
if: needs.should-run.outputs.go == 'true'
|
||||||
uses: ./.github/workflows/check-levelization.yml
|
uses: ./.github/workflows/check-levelization.yml
|
||||||
|
|
||||||
# This job works around the limitation that GitHub Actions does not support
|
# This job works around the limitation that GitHub Actions does not support
|
||||||
# using environment variables as inputs for reusable workflows.
|
# using environment variables as inputs for reusable workflows.
|
||||||
generate-outputs:
|
generate-outputs:
|
||||||
needs: any-changed
|
needs: should-run
|
||||||
if: needs.any-changed.outputs.changed == 'true'
|
if: needs.should-run.outputs.go == 'true'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: No-op
|
- name: No-op
|
||||||
@@ -130,3 +142,13 @@ jobs:
|
|||||||
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||||
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
|
||||||
|
passed:
|
||||||
|
needs:
|
||||||
|
- build-test
|
||||||
|
- check-format
|
||||||
|
- check-levelization
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: No-op
|
||||||
|
run: true
|
||||||
|
|||||||
@@ -14,12 +14,6 @@ find_package(Boost 1.82 REQUIRED
|
|||||||
|
|
||||||
add_library(ripple_boost INTERFACE)
|
add_library(ripple_boost INTERFACE)
|
||||||
add_library(Ripple::boost ALIAS ripple_boost)
|
add_library(Ripple::boost ALIAS ripple_boost)
|
||||||
if(XCODE)
|
|
||||||
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
|
||||||
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
|
||||||
else()
|
|
||||||
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
target_link_libraries(ripple_boost
|
target_link_libraries(ripple_boost
|
||||||
INTERFACE
|
INTERFACE
|
||||||
|
|||||||
@@ -157,7 +157,12 @@ enum error_code_i {
|
|||||||
// Pathfinding
|
// Pathfinding
|
||||||
rpcDOMAIN_MALFORMED = 97,
|
rpcDOMAIN_MALFORMED = 97,
|
||||||
|
|
||||||
rpcLAST = rpcDOMAIN_MALFORMED // rpcLAST should always equal the last code.
|
// ledger_entry
|
||||||
|
rpcENTRY_NOT_FOUND = 98,
|
||||||
|
rpcUNEXPECTED_LEDGER_TYPE = 99,
|
||||||
|
|
||||||
|
rpcLAST =
|
||||||
|
rpcUNEXPECTED_LEDGER_TYPE // rpcLAST should always equal the last code.
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Codes returned in the `warnings` array of certain RPC commands.
|
/** Codes returned in the `warnings` array of certain RPC commands.
|
||||||
|
|||||||
@@ -68,9 +68,13 @@ JSS(Flags); // in/out: TransactionSign; field.
|
|||||||
JSS(Holder); // field.
|
JSS(Holder); // field.
|
||||||
JSS(Invalid); //
|
JSS(Invalid); //
|
||||||
JSS(Issuer); // in: Credential transactions
|
JSS(Issuer); // in: Credential transactions
|
||||||
|
JSS(IssuingChainDoor); // field.
|
||||||
|
JSS(IssuingChainIssue); // field.
|
||||||
JSS(LastLedgerSequence); // in: TransactionSign; field
|
JSS(LastLedgerSequence); // in: TransactionSign; field
|
||||||
JSS(LastUpdateTime); // field.
|
JSS(LastUpdateTime); // field.
|
||||||
JSS(LimitAmount); // field.
|
JSS(LimitAmount); // field.
|
||||||
|
JSS(LockingChainDoor); // field.
|
||||||
|
JSS(LockingChainIssue); // field.
|
||||||
JSS(NetworkID); // field.
|
JSS(NetworkID); // field.
|
||||||
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
|
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
|
||||||
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens
|
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
#include <xrpl/json/json_value.h>
|
#include <xrpl/json/json_value.h>
|
||||||
#include <xrpl/json/json_writer.h>
|
#include <xrpl/json/json_writer.h>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <string>
|
#include <string>
|
||||||
@@ -685,7 +686,9 @@ Value::isConvertibleTo(ValueType other) const
|
|||||||
(other == intValue && value_.real_ >= minInt &&
|
(other == intValue && value_.real_ >= minInt &&
|
||||||
value_.real_ <= maxInt) ||
|
value_.real_ <= maxInt) ||
|
||||||
(other == uintValue && value_.real_ >= 0 &&
|
(other == uintValue && value_.real_ >= 0 &&
|
||||||
value_.real_ <= maxUInt) ||
|
value_.real_ <= maxUInt &&
|
||||||
|
std::fabs(round(value_.real_) - value_.real_) <
|
||||||
|
std::numeric_limits<double>::epsilon()) ||
|
||||||
other == realValue || other == stringValue ||
|
other == realValue || other == stringValue ||
|
||||||
other == booleanValue;
|
other == booleanValue;
|
||||||
|
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ namespace BuildInfo {
|
|||||||
// and follow the format described at http://semver.org/
|
// and follow the format described at http://semver.org/
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// clang-format off
|
// clang-format off
|
||||||
char const* const versionString = "2.6.0-rc3"
|
char const* const versionString = "2.6.0"
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
#if defined(DEBUG) || defined(SANITIZER)
|
#if defined(DEBUG) || defined(SANITIZER)
|
||||||
|
|||||||
@@ -117,7 +117,10 @@ constexpr static ErrorInfo unorderedErrorInfos[]{
|
|||||||
{rpcORACLE_MALFORMED, "oracleMalformed", "Oracle request is malformed.", 400},
|
{rpcORACLE_MALFORMED, "oracleMalformed", "Oracle request is malformed.", 400},
|
||||||
{rpcBAD_CREDENTIALS, "badCredentials", "Credentials do not exist, are not accepted, or have expired.", 400},
|
{rpcBAD_CREDENTIALS, "badCredentials", "Credentials do not exist, are not accepted, or have expired.", 400},
|
||||||
{rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400},
|
{rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400},
|
||||||
{rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400}};
|
{rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400},
|
||||||
|
{rpcENTRY_NOT_FOUND, "entryNotFound", "Entry not found.", 400},
|
||||||
|
{rpcUNEXPECTED_LEDGER_TYPE, "unexpectedLedgerType", "Unexpected ledger type.", 400},
|
||||||
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
// Sort and validate unorderedErrorInfos at compile time. Should be
|
// Sort and validate unorderedErrorInfos at compile time. Should be
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
#include <xrpl/protocol/STObject.h>
|
#include <xrpl/protocol/STObject.h>
|
||||||
#include <xrpl/protocol/STXChainBridge.h>
|
#include <xrpl/protocol/STXChainBridge.h>
|
||||||
#include <xrpl/protocol/Serializer.h>
|
#include <xrpl/protocol/Serializer.h>
|
||||||
|
#include <xrpl/protocol/jss.h>
|
||||||
|
|
||||||
#include <boost/format/free_funcs.hpp>
|
#include <boost/format/free_funcs.hpp>
|
||||||
|
|
||||||
@@ -98,12 +99,10 @@ STXChainBridge::STXChainBridge(SField const& name, Json::Value const& v)
|
|||||||
};
|
};
|
||||||
checkExtra(v);
|
checkExtra(v);
|
||||||
|
|
||||||
Json::Value const& lockingChainDoorStr =
|
Json::Value const& lockingChainDoorStr = v[jss::LockingChainDoor];
|
||||||
v[sfLockingChainDoor.getJsonName()];
|
Json::Value const& lockingChainIssue = v[jss::LockingChainIssue];
|
||||||
Json::Value const& lockingChainIssue = v[sfLockingChainIssue.getJsonName()];
|
Json::Value const& issuingChainDoorStr = v[jss::IssuingChainDoor];
|
||||||
Json::Value const& issuingChainDoorStr =
|
Json::Value const& issuingChainIssue = v[jss::IssuingChainIssue];
|
||||||
v[sfIssuingChainDoor.getJsonName()];
|
|
||||||
Json::Value const& issuingChainIssue = v[sfIssuingChainIssue.getJsonName()];
|
|
||||||
|
|
||||||
if (!lockingChainDoorStr.isString())
|
if (!lockingChainDoorStr.isString())
|
||||||
{
|
{
|
||||||
@@ -161,10 +160,10 @@ Json::Value
|
|||||||
STXChainBridge::getJson(JsonOptions jo) const
|
STXChainBridge::getJson(JsonOptions jo) const
|
||||||
{
|
{
|
||||||
Json::Value v;
|
Json::Value v;
|
||||||
v[sfLockingChainDoor.getJsonName()] = lockingChainDoor_.getJson(jo);
|
v[jss::LockingChainDoor] = lockingChainDoor_.getJson(jo);
|
||||||
v[sfLockingChainIssue.getJsonName()] = lockingChainIssue_.getJson(jo);
|
v[jss::LockingChainIssue] = lockingChainIssue_.getJson(jo);
|
||||||
v[sfIssuingChainDoor.getJsonName()] = issuingChainDoor_.getJson(jo);
|
v[jss::IssuingChainDoor] = issuingChainDoor_.getJson(jo);
|
||||||
v[sfIssuingChainIssue.getJsonName()] = issuingChainIssue_.getJson(jo);
|
v[jss::IssuingChainIssue] = issuingChainIssue_.getJson(jo);
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3028,18 +3028,6 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
"malformedRequest");
|
"malformedRequest");
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
testcase("RPC ledger_entry zero seq");
|
|
||||||
Json::Value jvParams;
|
|
||||||
jvParams[jss::ledger_index] = jss::validated;
|
|
||||||
jvParams[jss::vault][jss::owner] = issuer.human();
|
|
||||||
jvParams[jss::vault][jss::seq] = 0;
|
|
||||||
auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams));
|
|
||||||
BEAST_EXPECT(
|
|
||||||
jvVault[jss::result][jss::error].asString() ==
|
|
||||||
"malformedRequest");
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
testcase("RPC ledger_entry negative seq");
|
testcase("RPC ledger_entry negative seq");
|
||||||
Json::Value jvParams;
|
Json::Value jvParams;
|
||||||
|
|||||||
@@ -44,10 +44,10 @@ bridge(
|
|||||||
Issue const& issuingChainIssue)
|
Issue const& issuingChainIssue)
|
||||||
{
|
{
|
||||||
Json::Value jv;
|
Json::Value jv;
|
||||||
jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human();
|
jv[jss::LockingChainDoor] = lockingChainDoor.human();
|
||||||
jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue);
|
jv[jss::LockingChainIssue] = to_json(lockingChainIssue);
|
||||||
jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human();
|
jv[jss::IssuingChainDoor] = issuingChainDoor.human();
|
||||||
jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue);
|
jv[jss::IssuingChainIssue] = to_json(issuingChainIssue);
|
||||||
return jv;
|
return jv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,10 +60,10 @@ bridge_rpc(
|
|||||||
Issue const& issuingChainIssue)
|
Issue const& issuingChainIssue)
|
||||||
{
|
{
|
||||||
Json::Value jv;
|
Json::Value jv;
|
||||||
jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human();
|
jv[jss::LockingChainDoor] = lockingChainDoor.human();
|
||||||
jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue);
|
jv[jss::LockingChainIssue] = to_json(lockingChainIssue);
|
||||||
jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human();
|
jv[jss::IssuingChainDoor] = issuingChainDoor.human();
|
||||||
jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue);
|
jv[jss::IssuingChainIssue] = to_json(issuingChainIssue);
|
||||||
return jv;
|
return jv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ private:
|
|||||||
boost::asio::ip::make_address("172.1.1." + std::to_string(rid_)));
|
boost::asio::ip::make_address("172.1.1." + std::to_string(rid_)));
|
||||||
PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519)));
|
PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519)));
|
||||||
auto consumer = overlay.resourceManager().newInboundEndpoint(remote);
|
auto consumer = overlay.resourceManager().newInboundEndpoint(remote);
|
||||||
auto slot = overlay.peerFinder().new_inbound_slot(local, remote);
|
auto [slot, _] = overlay.peerFinder().new_inbound_slot(local, remote);
|
||||||
auto const peer = std::make_shared<PeerTest>(
|
auto const peer = std::make_shared<PeerTest>(
|
||||||
env.app(),
|
env.app(),
|
||||||
slot,
|
slot,
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
#include <test/unit_test/SuiteJournal.h>
|
#include <test/unit_test/SuiteJournal.h>
|
||||||
|
|
||||||
#include <xrpld/core/Config.h>
|
#include <xrpld/core/Config.h>
|
||||||
|
#include <xrpld/peerfinder/PeerfinderManager.h>
|
||||||
#include <xrpld/peerfinder/detail/Logic.h>
|
#include <xrpld/peerfinder/detail/Logic.h>
|
||||||
|
|
||||||
#include <xrpl/basics/chrono.h>
|
#include <xrpl/basics/chrono.h>
|
||||||
@@ -98,7 +99,7 @@ public:
|
|||||||
if (!list.empty())
|
if (!list.empty())
|
||||||
{
|
{
|
||||||
BEAST_EXPECT(list.size() == 1);
|
BEAST_EXPECT(list.size() == 1);
|
||||||
auto const slot = logic.new_outbound_slot(list.front());
|
auto const [slot, _] = logic.new_outbound_slot(list.front());
|
||||||
BEAST_EXPECT(logic.onConnected(
|
BEAST_EXPECT(logic.onConnected(
|
||||||
slot, beast::IP::Endpoint::from_string("65.0.0.2:5")));
|
slot, beast::IP::Endpoint::from_string("65.0.0.2:5")));
|
||||||
logic.on_closed(slot);
|
logic.on_closed(slot);
|
||||||
@@ -139,7 +140,7 @@ public:
|
|||||||
if (!list.empty())
|
if (!list.empty())
|
||||||
{
|
{
|
||||||
BEAST_EXPECT(list.size() == 1);
|
BEAST_EXPECT(list.size() == 1);
|
||||||
auto const slot = logic.new_outbound_slot(list.front());
|
auto const [slot, _] = logic.new_outbound_slot(list.front());
|
||||||
if (!BEAST_EXPECT(logic.onConnected(
|
if (!BEAST_EXPECT(logic.onConnected(
|
||||||
slot, beast::IP::Endpoint::from_string("65.0.0.2:5"))))
|
slot, beast::IP::Endpoint::from_string("65.0.0.2:5"))))
|
||||||
return;
|
return;
|
||||||
@@ -158,6 +159,7 @@ public:
|
|||||||
BEAST_EXPECT(n <= (seconds + 59) / 60);
|
BEAST_EXPECT(n <= (seconds + 59) / 60);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test accepting an incoming slot for an already existing outgoing slot
|
||||||
void
|
void
|
||||||
test_duplicateOutIn()
|
test_duplicateOutIn()
|
||||||
{
|
{
|
||||||
@@ -166,8 +168,6 @@ public:
|
|||||||
TestChecker checker;
|
TestChecker checker;
|
||||||
TestStopwatch clock;
|
TestStopwatch clock;
|
||||||
Logic<TestChecker> logic(clock, store, checker, journal_);
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
logic.addFixedPeer(
|
|
||||||
"test", beast::IP::Endpoint::from_string("65.0.0.1:5"));
|
|
||||||
{
|
{
|
||||||
Config c;
|
Config c;
|
||||||
c.autoConnect = false;
|
c.autoConnect = false;
|
||||||
@@ -176,28 +176,24 @@ public:
|
|||||||
logic.config(c);
|
logic.config(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const list = logic.autoconnect();
|
auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5");
|
||||||
if (BEAST_EXPECT(!list.empty()))
|
auto const [slot1, r] = logic.new_outbound_slot(remote);
|
||||||
{
|
BEAST_EXPECT(slot1 != nullptr);
|
||||||
BEAST_EXPECT(list.size() == 1);
|
BEAST_EXPECT(r == Result::success);
|
||||||
auto const remote = list.front();
|
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
|
||||||
auto const slot1 = logic.new_outbound_slot(remote);
|
|
||||||
if (BEAST_EXPECT(slot1 != nullptr))
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
||||||
{
|
auto const [slot2, r2] = logic.new_inbound_slot(local, remote);
|
||||||
BEAST_EXPECT(
|
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
|
||||||
logic.connectedAddresses_.count(remote.address()) == 1);
|
BEAST_EXPECT(r2 == Result::duplicatePeer);
|
||||||
auto const local =
|
|
||||||
beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
|
||||||
auto const slot2 = logic.new_inbound_slot(local, remote);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logic.connectedAddresses_.count(remote.address()) == 1);
|
|
||||||
if (!BEAST_EXPECT(slot2 == nullptr))
|
if (!BEAST_EXPECT(slot2 == nullptr))
|
||||||
logic.on_closed(slot2);
|
logic.on_closed(slot2);
|
||||||
|
|
||||||
logic.on_closed(slot1);
|
logic.on_closed(slot1);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// test establishing outgoing slot for an already existing incoming slot
|
||||||
void
|
void
|
||||||
test_duplicateInOut()
|
test_duplicateInOut()
|
||||||
{
|
{
|
||||||
@@ -206,8 +202,6 @@ public:
|
|||||||
TestChecker checker;
|
TestChecker checker;
|
||||||
TestStopwatch clock;
|
TestStopwatch clock;
|
||||||
Logic<TestChecker> logic(clock, store, checker, journal_);
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
logic.addFixedPeer(
|
|
||||||
"test", beast::IP::Endpoint::from_string("65.0.0.1:5"));
|
|
||||||
{
|
{
|
||||||
Config c;
|
Config c;
|
||||||
c.autoConnect = false;
|
c.autoConnect = false;
|
||||||
@@ -216,33 +210,202 @@ public:
|
|||||||
logic.config(c);
|
logic.config(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const list = logic.autoconnect();
|
auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5");
|
||||||
if (BEAST_EXPECT(!list.empty()))
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
||||||
{
|
|
||||||
BEAST_EXPECT(list.size() == 1);
|
auto const [slot1, r] = logic.new_inbound_slot(local, remote);
|
||||||
auto const remote = list.front();
|
BEAST_EXPECT(slot1 != nullptr);
|
||||||
auto const local =
|
BEAST_EXPECT(r == Result::success);
|
||||||
beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
|
||||||
auto const slot1 = logic.new_inbound_slot(local, remote);
|
|
||||||
if (BEAST_EXPECT(slot1 != nullptr))
|
auto const [slot2, r2] = logic.new_outbound_slot(remote);
|
||||||
{
|
BEAST_EXPECT(r2 == Result::duplicatePeer);
|
||||||
BEAST_EXPECT(
|
BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1);
|
||||||
logic.connectedAddresses_.count(remote.address()) == 1);
|
|
||||||
auto const slot2 = logic.new_outbound_slot(remote);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logic.connectedAddresses_.count(remote.address()) == 1);
|
|
||||||
if (!BEAST_EXPECT(slot2 == nullptr))
|
if (!BEAST_EXPECT(slot2 == nullptr))
|
||||||
logic.on_closed(slot2);
|
logic.on_closed(slot2);
|
||||||
logic.on_closed(slot1);
|
logic.on_closed(slot1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
test_peerLimitExceeded()
|
||||||
|
{
|
||||||
|
testcase("peer limit exceeded");
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
{
|
||||||
|
Config c;
|
||||||
|
c.autoConnect = false;
|
||||||
|
c.listeningPort = 1024;
|
||||||
|
c.ipLimit = 2;
|
||||||
|
logic.config(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
||||||
|
auto const [slot, r] = logic.new_inbound_slot(
|
||||||
|
local, beast::IP::Endpoint::from_string("55.104.0.2:1025"));
|
||||||
|
BEAST_EXPECT(slot != nullptr);
|
||||||
|
BEAST_EXPECT(r == Result::success);
|
||||||
|
|
||||||
|
auto const [slot1, r1] = logic.new_inbound_slot(
|
||||||
|
local, beast::IP::Endpoint::from_string("55.104.0.2:1026"));
|
||||||
|
BEAST_EXPECT(slot1 != nullptr);
|
||||||
|
BEAST_EXPECT(r1 == Result::success);
|
||||||
|
|
||||||
|
auto const [slot2, r2] = logic.new_inbound_slot(
|
||||||
|
local, beast::IP::Endpoint::from_string("55.104.0.2:1027"));
|
||||||
|
BEAST_EXPECT(r2 == Result::ipLimitExceeded);
|
||||||
|
|
||||||
|
if (!BEAST_EXPECT(slot2 == nullptr))
|
||||||
|
logic.on_closed(slot2);
|
||||||
|
logic.on_closed(slot1);
|
||||||
|
logic.on_closed(slot);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
test_activate_duplicate_peer()
|
||||||
|
{
|
||||||
|
testcase("test activate duplicate peer");
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
{
|
||||||
|
Config c;
|
||||||
|
c.autoConnect = false;
|
||||||
|
c.listeningPort = 1024;
|
||||||
|
c.ipLimit = 2;
|
||||||
|
logic.config(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
||||||
|
|
||||||
|
PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first);
|
||||||
|
|
||||||
|
auto const [slot, rSlot] = logic.new_outbound_slot(
|
||||||
|
beast::IP::Endpoint::from_string("55.104.0.2:1025"));
|
||||||
|
BEAST_EXPECT(slot != nullptr);
|
||||||
|
BEAST_EXPECT(rSlot == Result::success);
|
||||||
|
|
||||||
|
auto const [slot2, r2Slot] = logic.new_outbound_slot(
|
||||||
|
beast::IP::Endpoint::from_string("55.104.0.2:1026"));
|
||||||
|
BEAST_EXPECT(slot2 != nullptr);
|
||||||
|
BEAST_EXPECT(r2Slot == Result::success);
|
||||||
|
|
||||||
|
BEAST_EXPECT(logic.onConnected(slot, local));
|
||||||
|
BEAST_EXPECT(logic.onConnected(slot2, local));
|
||||||
|
|
||||||
|
BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success);
|
||||||
|
|
||||||
|
// activating a different slot with the same node ID (pk) must fail
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logic.activate(slot2, pk1, false) == Result::duplicatePeer);
|
||||||
|
|
||||||
|
logic.on_closed(slot);
|
||||||
|
|
||||||
|
// accept the same key for a new slot after removing the old slot
|
||||||
|
BEAST_EXPECT(logic.activate(slot2, pk1, false) == Result::success);
|
||||||
|
logic.on_closed(slot2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
test_activate_inbound_disabled()
|
||||||
|
{
|
||||||
|
testcase("test activate inbound disabled");
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
{
|
||||||
|
Config c;
|
||||||
|
c.autoConnect = false;
|
||||||
|
c.listeningPort = 1024;
|
||||||
|
c.ipLimit = 2;
|
||||||
|
logic.config(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first);
|
||||||
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024");
|
||||||
|
|
||||||
|
auto const [slot, rSlot] = logic.new_inbound_slot(
|
||||||
|
local, beast::IP::Endpoint::from_string("55.104.0.2:1025"));
|
||||||
|
BEAST_EXPECT(slot != nullptr);
|
||||||
|
BEAST_EXPECT(rSlot == Result::success);
|
||||||
|
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logic.activate(slot, pk1, false) == Result::inboundDisabled);
|
||||||
|
|
||||||
|
{
|
||||||
|
Config c;
|
||||||
|
c.autoConnect = false;
|
||||||
|
c.listeningPort = 1024;
|
||||||
|
c.ipLimit = 2;
|
||||||
|
c.inPeers = 1;
|
||||||
|
logic.config(c);
|
||||||
|
}
|
||||||
|
// new inbound slot must succeed when inbound connections are enabled
|
||||||
|
BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success);
|
||||||
|
|
||||||
|
// creating a new inbound slot must succeed as IP Limit is not exceeded
|
||||||
|
auto const [slot2, r2Slot] = logic.new_inbound_slot(
|
||||||
|
local, beast::IP::Endpoint::from_string("55.104.0.2:1026"));
|
||||||
|
BEAST_EXPECT(slot2 != nullptr);
|
||||||
|
BEAST_EXPECT(r2Slot == Result::success);
|
||||||
|
|
||||||
|
PublicKey const pk2(randomKeyPair(KeyType::secp256k1).first);
|
||||||
|
|
||||||
|
// an inbound slot exceeding inPeers limit must fail
|
||||||
|
BEAST_EXPECT(logic.activate(slot2, pk2, false) == Result::full);
|
||||||
|
|
||||||
|
logic.on_closed(slot2);
|
||||||
|
logic.on_closed(slot);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
test_addFixedPeer_no_port()
|
||||||
|
{
|
||||||
|
testcase("test addFixedPeer no port");
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
logic.addFixedPeer(
|
||||||
|
"test", beast::IP::Endpoint::from_string("65.0.0.2"));
|
||||||
|
fail("invalid endpoint successfully added");
|
||||||
|
}
|
||||||
|
catch (std::runtime_error const& e)
|
||||||
|
{
|
||||||
|
pass();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
test_onConnected_self_connection()
|
||||||
|
{
|
||||||
|
testcase("test onConnected self connection");
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
|
||||||
|
auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1234");
|
||||||
|
auto const [slot, r] = logic.new_outbound_slot(local);
|
||||||
|
BEAST_EXPECT(slot != nullptr);
|
||||||
|
BEAST_EXPECT(r == Result::success);
|
||||||
|
|
||||||
|
// Must fail when a slot is to our own IP address
|
||||||
|
BEAST_EXPECT(!logic.onConnected(slot, local));
|
||||||
|
logic.on_closed(slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
test_config()
|
test_config()
|
||||||
{
|
{
|
||||||
// if peers_max is configured then peers_in_max and peers_out_max are
|
// if peers_max is configured then peers_in_max and peers_out_max
|
||||||
// ignored
|
// are ignored
|
||||||
auto run = [&](std::string const& test,
|
auto run = [&](std::string const& test,
|
||||||
std::optional<std::uint16_t> maxPeers,
|
std::optional<std::uint16_t> maxPeers,
|
||||||
std::optional<std::uint16_t> maxIn,
|
std::optional<std::uint16_t> maxIn,
|
||||||
@@ -282,13 +445,21 @@ public:
|
|||||||
Counts counts;
|
Counts counts;
|
||||||
counts.onConfig(config);
|
counts.onConfig(config);
|
||||||
BEAST_EXPECT(
|
BEAST_EXPECT(
|
||||||
counts.out_max() == expectOut &&
|
counts.out_max() == expectOut && counts.in_max() == expectIn &&
|
||||||
counts.inboundSlots() == expectIn &&
|
|
||||||
config.ipLimit == expectIpLimit);
|
config.ipLimit == expectIpLimit);
|
||||||
|
|
||||||
|
TestStore store;
|
||||||
|
TestChecker checker;
|
||||||
|
TestStopwatch clock;
|
||||||
|
Logic<TestChecker> logic(clock, store, checker, journal_);
|
||||||
|
logic.config(config);
|
||||||
|
|
||||||
|
BEAST_EXPECT(logic.config() == config);
|
||||||
};
|
};
|
||||||
|
|
||||||
// if max_peers == 0 => maxPeers = 21,
|
// if max_peers == 0 => maxPeers = 21,
|
||||||
// else if max_peers < 10 => maxPeers = 10 else maxPeers = max_peers
|
// else if max_peers < 10 => maxPeers = 10 else maxPeers =
|
||||||
|
// max_peers
|
||||||
// expectOut => if legacy => max(0.15 * maxPeers, 10),
|
// expectOut => if legacy => max(0.15 * maxPeers, 10),
|
||||||
// if legacy && !wantIncoming => maxPeers else max_out_peers
|
// if legacy && !wantIncoming => maxPeers else max_out_peers
|
||||||
// expectIn => if legacy && wantIncoming => maxPeers - outPeers
|
// expectIn => if legacy && wantIncoming => maxPeers - outPeers
|
||||||
@@ -364,6 +535,11 @@ public:
|
|||||||
test_duplicateInOut();
|
test_duplicateInOut();
|
||||||
test_config();
|
test_config();
|
||||||
test_invalid_config();
|
test_invalid_config();
|
||||||
|
test_peerLimitExceeded();
|
||||||
|
test_activate_duplicate_peer();
|
||||||
|
test_activate_inbound_disabled();
|
||||||
|
test_addFixedPeer_no_port();
|
||||||
|
test_onConnected_self_connection();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -195,14 +195,16 @@ OverlayImpl::onHandoff(
|
|||||||
if (consumer.disconnect(journal))
|
if (consumer.disconnect(journal))
|
||||||
return handoff;
|
return handoff;
|
||||||
|
|
||||||
auto const slot = m_peerFinder->new_inbound_slot(
|
auto const [slot, result] = m_peerFinder->new_inbound_slot(
|
||||||
beast::IPAddressConversion::from_asio(local_endpoint),
|
beast::IPAddressConversion::from_asio(local_endpoint),
|
||||||
beast::IPAddressConversion::from_asio(remote_endpoint));
|
beast::IPAddressConversion::from_asio(remote_endpoint));
|
||||||
|
|
||||||
if (slot == nullptr)
|
if (slot == nullptr)
|
||||||
{
|
{
|
||||||
// self-connect, close
|
// connection refused either IP limit exceeded or self-connect
|
||||||
handoff.moved = false;
|
handoff.moved = false;
|
||||||
|
JLOG(journal.debug())
|
||||||
|
<< "Peer " << remote_endpoint << " refused, " << to_string(result);
|
||||||
return handoff;
|
return handoff;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,10 +404,11 @@ OverlayImpl::connect(beast::IP::Endpoint const& remote_endpoint)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const slot = peerFinder().new_outbound_slot(remote_endpoint);
|
auto const [slot, result] = peerFinder().new_outbound_slot(remote_endpoint);
|
||||||
if (slot == nullptr)
|
if (slot == nullptr)
|
||||||
{
|
{
|
||||||
JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint;
|
JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint
|
||||||
|
<< ": " << to_string(result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -109,6 +109,9 @@ struct Config
|
|||||||
std::uint16_t port,
|
std::uint16_t port,
|
||||||
bool validationPublicKey,
|
bool validationPublicKey,
|
||||||
int ipLimit);
|
int ipLimit);
|
||||||
|
|
||||||
|
friend bool
|
||||||
|
operator==(Config const& lhs, Config const& rhs);
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
@@ -136,7 +139,13 @@ using Endpoints = std::vector<Endpoint>;
|
|||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
/** Possible results from activating a slot. */
|
/** Possible results from activating a slot. */
|
||||||
enum class Result { duplicate, full, success };
|
enum class Result {
|
||||||
|
inboundDisabled,
|
||||||
|
duplicatePeer,
|
||||||
|
ipLimitExceeded,
|
||||||
|
full,
|
||||||
|
success
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Converts a `Result` enum value to its string representation.
|
* @brief Converts a `Result` enum value to its string representation.
|
||||||
@@ -157,12 +166,16 @@ to_string(Result result) noexcept
|
|||||||
{
|
{
|
||||||
switch (result)
|
switch (result)
|
||||||
{
|
{
|
||||||
case Result::success:
|
case Result::inboundDisabled:
|
||||||
return "success";
|
return "inbound disabled";
|
||||||
case Result::duplicate:
|
case Result::duplicatePeer:
|
||||||
return "duplicate connection";
|
return "peer already connected";
|
||||||
|
case Result::ipLimitExceeded:
|
||||||
|
return "ip limit exceeded";
|
||||||
case Result::full:
|
case Result::full:
|
||||||
return "slots full";
|
return "slots full";
|
||||||
|
case Result::success:
|
||||||
|
return "success";
|
||||||
}
|
}
|
||||||
|
|
||||||
return "unknown";
|
return "unknown";
|
||||||
@@ -234,7 +247,7 @@ public:
|
|||||||
If nullptr is returned, then the slot could not be assigned.
|
If nullptr is returned, then the slot could not be assigned.
|
||||||
Usually this is because of a detected self-connection.
|
Usually this is because of a detected self-connection.
|
||||||
*/
|
*/
|
||||||
virtual std::shared_ptr<Slot>
|
virtual std::pair<std::shared_ptr<Slot>, Result>
|
||||||
new_inbound_slot(
|
new_inbound_slot(
|
||||||
beast::IP::Endpoint const& local_endpoint,
|
beast::IP::Endpoint const& local_endpoint,
|
||||||
beast::IP::Endpoint const& remote_endpoint) = 0;
|
beast::IP::Endpoint const& remote_endpoint) = 0;
|
||||||
@@ -243,7 +256,7 @@ public:
|
|||||||
If nullptr is returned, then the slot could not be assigned.
|
If nullptr is returned, then the slot could not be assigned.
|
||||||
Usually this is because of a duplicate connection.
|
Usually this is because of a duplicate connection.
|
||||||
*/
|
*/
|
||||||
virtual std::shared_ptr<Slot>
|
virtual std::pair<std::shared_ptr<Slot>, Result>
|
||||||
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) = 0;
|
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) = 0;
|
||||||
|
|
||||||
/** Called when mtENDPOINTS is received. */
|
/** Called when mtENDPOINTS is received. */
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ public:
|
|||||||
|
|
||||||
/** Returns the total number of inbound slots. */
|
/** Returns the total number of inbound slots. */
|
||||||
int
|
int
|
||||||
inboundSlots() const
|
in_max() const
|
||||||
{
|
{
|
||||||
return m_in_max;
|
return m_in_max;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -172,9 +172,7 @@ public:
|
|||||||
void
|
void
|
||||||
addFixedPeer(std::string const& name, beast::IP::Endpoint const& ep)
|
addFixedPeer(std::string const& name, beast::IP::Endpoint const& ep)
|
||||||
{
|
{
|
||||||
std::vector<beast::IP::Endpoint> v;
|
addFixedPeer(name, std::vector<beast::IP::Endpoint>{ep});
|
||||||
v.push_back(ep);
|
|
||||||
addFixedPeer(name, v);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -261,7 +259,7 @@ public:
|
|||||||
|
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
|
|
||||||
SlotImp::ptr
|
std::pair<SlotImp::ptr, Result>
|
||||||
new_inbound_slot(
|
new_inbound_slot(
|
||||||
beast::IP::Endpoint const& local_endpoint,
|
beast::IP::Endpoint const& local_endpoint,
|
||||||
beast::IP::Endpoint const& remote_endpoint)
|
beast::IP::Endpoint const& remote_endpoint)
|
||||||
@@ -277,12 +275,12 @@ public:
|
|||||||
{
|
{
|
||||||
auto const count =
|
auto const count =
|
||||||
connectedAddresses_.count(remote_endpoint.address());
|
connectedAddresses_.count(remote_endpoint.address());
|
||||||
if (count > config_.ipLimit)
|
if (count + 1 > config_.ipLimit)
|
||||||
{
|
{
|
||||||
JLOG(m_journal.debug())
|
JLOG(m_journal.debug())
|
||||||
<< beast::leftw(18) << "Logic dropping inbound "
|
<< beast::leftw(18) << "Logic dropping inbound "
|
||||||
<< remote_endpoint << " because of ip limits.";
|
<< remote_endpoint << " because of ip limits.";
|
||||||
return SlotImp::ptr();
|
return {SlotImp::ptr(), Result::ipLimitExceeded};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,7 +290,7 @@ public:
|
|||||||
JLOG(m_journal.debug())
|
JLOG(m_journal.debug())
|
||||||
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
|
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
|
||||||
<< " as duplicate incoming";
|
<< " as duplicate incoming";
|
||||||
return SlotImp::ptr();
|
return {SlotImp::ptr(), Result::duplicatePeer};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the slot
|
// Create the slot
|
||||||
@@ -314,11 +312,11 @@ public:
|
|||||||
// Update counts
|
// Update counts
|
||||||
counts_.add(*slot);
|
counts_.add(*slot);
|
||||||
|
|
||||||
return result.first->second;
|
return {result.first->second, Result::success};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can't check for self-connect because we don't know the local endpoint
|
// Can't check for self-connect because we don't know the local endpoint
|
||||||
SlotImp::ptr
|
std::pair<SlotImp::ptr, Result>
|
||||||
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint)
|
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint)
|
||||||
{
|
{
|
||||||
JLOG(m_journal.debug())
|
JLOG(m_journal.debug())
|
||||||
@@ -332,7 +330,7 @@ public:
|
|||||||
JLOG(m_journal.debug())
|
JLOG(m_journal.debug())
|
||||||
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
|
<< beast::leftw(18) << "Logic dropping " << remote_endpoint
|
||||||
<< " as duplicate connect";
|
<< " as duplicate connect";
|
||||||
return SlotImp::ptr();
|
return {SlotImp::ptr(), Result::duplicatePeer};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the slot
|
// Create the slot
|
||||||
@@ -353,7 +351,7 @@ public:
|
|||||||
// Update counts
|
// Update counts
|
||||||
counts_.add(*slot);
|
counts_.add(*slot);
|
||||||
|
|
||||||
return result.first->second;
|
return {result.first->second, Result::success};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -417,7 +415,7 @@ public:
|
|||||||
|
|
||||||
// Check for duplicate connection by key
|
// Check for duplicate connection by key
|
||||||
if (keys_.find(key) != keys_.end())
|
if (keys_.find(key) != keys_.end())
|
||||||
return Result::duplicate;
|
return Result::duplicatePeer;
|
||||||
|
|
||||||
// If the peer belongs to a cluster or is reserved,
|
// If the peer belongs to a cluster or is reserved,
|
||||||
// update the slot to reflect that.
|
// update the slot to reflect that.
|
||||||
@@ -430,6 +428,8 @@ public:
|
|||||||
{
|
{
|
||||||
if (!slot->inbound())
|
if (!slot->inbound())
|
||||||
bootcache_.on_success(slot->remote_endpoint());
|
bootcache_.on_success(slot->remote_endpoint());
|
||||||
|
if (slot->inbound() && counts_.in_max() == 0)
|
||||||
|
return Result::inboundDisabled;
|
||||||
return Result::full;
|
return Result::full;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -651,7 +651,7 @@ public:
|
|||||||
// 2. We have slots
|
// 2. We have slots
|
||||||
// 3. We haven't failed the firewalled test
|
// 3. We haven't failed the firewalled test
|
||||||
//
|
//
|
||||||
if (config_.wantIncoming && counts_.inboundSlots() > 0)
|
if (config_.wantIncoming && counts_.in_max() > 0)
|
||||||
{
|
{
|
||||||
Endpoint ep;
|
Endpoint ep;
|
||||||
ep.hops = 0;
|
ep.hops = 0;
|
||||||
|
|||||||
@@ -34,6 +34,17 @@ Config::Config()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
operator==(Config const& lhs, Config const& rhs)
|
||||||
|
{
|
||||||
|
return lhs.autoConnect == rhs.autoConnect &&
|
||||||
|
lhs.peerPrivate == rhs.peerPrivate &&
|
||||||
|
lhs.wantIncoming == rhs.wantIncoming && lhs.inPeers == rhs.inPeers &&
|
||||||
|
lhs.maxPeers == rhs.maxPeers && lhs.outPeers == rhs.outPeers &&
|
||||||
|
lhs.features == lhs.features && lhs.ipLimit == rhs.ipLimit &&
|
||||||
|
lhs.listeningPort == rhs.listeningPort;
|
||||||
|
}
|
||||||
|
|
||||||
std::size_t
|
std::size_t
|
||||||
Config::calcOutPeers() const
|
Config::calcOutPeers() const
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -125,7 +125,7 @@ public:
|
|||||||
|
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
|
|
||||||
std::shared_ptr<Slot>
|
std::pair<std::shared_ptr<Slot>, Result>
|
||||||
new_inbound_slot(
|
new_inbound_slot(
|
||||||
beast::IP::Endpoint const& local_endpoint,
|
beast::IP::Endpoint const& local_endpoint,
|
||||||
beast::IP::Endpoint const& remote_endpoint) override
|
beast::IP::Endpoint const& remote_endpoint) override
|
||||||
@@ -133,7 +133,7 @@ public:
|
|||||||
return m_logic.new_inbound_slot(local_endpoint, remote_endpoint);
|
return m_logic.new_inbound_slot(local_endpoint, remote_endpoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Slot>
|
std::pair<std::shared_ptr<Slot>, Result>
|
||||||
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) override
|
new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) override
|
||||||
{
|
{
|
||||||
return m_logic.new_outbound_slot(remote_endpoint);
|
return m_logic.new_outbound_slot(remote_endpoint);
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ getAccountObjects(
|
|||||||
|
|
||||||
auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue);
|
auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue);
|
||||||
|
|
||||||
// this is a mutable version of limit, used to seemlessly switch
|
// this is a mutable version of limit, used to seamlessly switch
|
||||||
// to iterating directory entries when nftokenpages are exhausted
|
// to iterating directory entries when nftokenpages are exhausted
|
||||||
uint32_t mlimit = limit;
|
uint32_t mlimit = limit;
|
||||||
|
|
||||||
@@ -373,7 +373,7 @@ ledgerFromRequest(T& ledger, JsonContext& context)
|
|||||||
indexValue = legacyLedger;
|
indexValue = legacyLedger;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hashValue)
|
if (!hashValue.isNull())
|
||||||
{
|
{
|
||||||
if (!hashValue.isString())
|
if (!hashValue.isString())
|
||||||
return {rpcINVALID_PARAMS, "ledgerHashNotString"};
|
return {rpcINVALID_PARAMS, "ledgerHashNotString"};
|
||||||
@@ -384,6 +384,9 @@ ledgerFromRequest(T& ledger, JsonContext& context)
|
|||||||
return getLedger(ledger, ledgerHash, context);
|
return getLedger(ledger, ledgerHash, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!indexValue.isConvertibleTo(Json::stringValue))
|
||||||
|
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
|
||||||
|
|
||||||
auto const index = indexValue.asString();
|
auto const index = indexValue.asString();
|
||||||
|
|
||||||
if (index == "current" || index.empty())
|
if (index == "current" || index.empty())
|
||||||
@@ -395,11 +398,11 @@ ledgerFromRequest(T& ledger, JsonContext& context)
|
|||||||
if (index == "closed")
|
if (index == "closed")
|
||||||
return getLedger(ledger, LedgerShortcut::CLOSED, context);
|
return getLedger(ledger, LedgerShortcut::CLOSED, context);
|
||||||
|
|
||||||
std::uint32_t iVal;
|
std::uint32_t val;
|
||||||
if (beast::lexicalCastChecked(iVal, index))
|
if (!beast::lexicalCastChecked(val, index))
|
||||||
return getLedger(ledger, iVal, context);
|
|
||||||
|
|
||||||
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
|
return {rpcINVALID_PARAMS, "ledgerIndexMalformed"};
|
||||||
|
|
||||||
|
return getLedger(ledger, val, context);
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@@ -586,7 +589,7 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
|||||||
return Status::OK;
|
return Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Explicit instantiaion of above three functions
|
// Explicit instantiation of above three functions
|
||||||
template Status
|
template Status
|
||||||
getLedger<>(std::shared_ptr<ReadView const>&, uint32_t, Context&);
|
getLedger<>(std::shared_ptr<ReadView const>&, uint32_t, Context&);
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
299
src/xrpld/rpc/handlers/LedgerEntryHelpers.h
Normal file
299
src/xrpld/rpc/handlers/LedgerEntryHelpers.h
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of rippled: https://github.com/ripple/rippled
|
||||||
|
Copyright (c) 2012-2025 Ripple Labs Inc.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include <xrpld/rpc/detail/RPCHelpers.h>
|
||||||
|
|
||||||
|
#include <xrpl/basics/StringUtilities.h>
|
||||||
|
#include <xrpl/basics/strHex.h>
|
||||||
|
#include <xrpl/beast/core/LexicalCast.h>
|
||||||
|
#include <xrpl/json/json_errors.h>
|
||||||
|
#include <xrpl/protocol/ErrorCodes.h>
|
||||||
|
#include <xrpl/protocol/Indexes.h>
|
||||||
|
#include <xrpl/protocol/RPCErr.h>
|
||||||
|
#include <xrpl/protocol/STXChainBridge.h>
|
||||||
|
#include <xrpl/protocol/jss.h>
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
namespace ripple {
|
||||||
|
|
||||||
|
namespace LedgerEntryHelpers {
|
||||||
|
|
||||||
|
Unexpected<Json::Value>
|
||||||
|
missingFieldError(
|
||||||
|
Json::StaticString const field,
|
||||||
|
std::optional<std::string> err = std::nullopt)
|
||||||
|
{
|
||||||
|
Json::Value json = Json::objectValue;
|
||||||
|
auto error = RPC::missing_field_message(std::string(field.c_str()));
|
||||||
|
json[jss::error] = err.value_or("malformedRequest");
|
||||||
|
json[jss::error_code] = rpcINVALID_PARAMS;
|
||||||
|
json[jss::error_message] = std::move(error);
|
||||||
|
return Unexpected(json);
|
||||||
|
}
|
||||||
|
|
||||||
|
Unexpected<Json::Value>
|
||||||
|
invalidFieldError(
|
||||||
|
std::string const& err,
|
||||||
|
Json::StaticString const field,
|
||||||
|
std::string const& type)
|
||||||
|
{
|
||||||
|
Json::Value json = Json::objectValue;
|
||||||
|
auto error = RPC::expected_field_message(field, type);
|
||||||
|
json[jss::error] = err;
|
||||||
|
json[jss::error_code] = rpcINVALID_PARAMS;
|
||||||
|
json[jss::error_message] = std::move(error);
|
||||||
|
return Unexpected(json);
|
||||||
|
}
|
||||||
|
|
||||||
|
Unexpected<Json::Value>
|
||||||
|
malformedError(std::string const& err, std::string const& message)
|
||||||
|
{
|
||||||
|
Json::Value json = Json::objectValue;
|
||||||
|
json[jss::error] = err;
|
||||||
|
json[jss::error_code] = rpcINVALID_PARAMS;
|
||||||
|
json[jss::error_message] = message;
|
||||||
|
return Unexpected(json);
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<bool, Json::Value>
|
||||||
|
hasRequired(
|
||||||
|
Json::Value const& params,
|
||||||
|
std::initializer_list<Json::StaticString> fields,
|
||||||
|
std::optional<std::string> err = std::nullopt)
|
||||||
|
{
|
||||||
|
for (auto const field : fields)
|
||||||
|
{
|
||||||
|
if (!params.isMember(field) || params[field].isNull())
|
||||||
|
{
|
||||||
|
return missingFieldError(field, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
std::optional<T>
|
||||||
|
parse(Json::Value const& param);
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
Expected<T, Json::Value>
|
||||||
|
required(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::string const& err,
|
||||||
|
std::string const& expectedType)
|
||||||
|
{
|
||||||
|
if (!params.isMember(fieldName) || params[fieldName].isNull())
|
||||||
|
{
|
||||||
|
return missingFieldError(fieldName);
|
||||||
|
}
|
||||||
|
if (auto obj = parse<T>(params[fieldName]))
|
||||||
|
{
|
||||||
|
return *obj;
|
||||||
|
}
|
||||||
|
return invalidFieldError(err, fieldName, expectedType);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
std::optional<AccountID>
|
||||||
|
parse(Json::Value const& param)
|
||||||
|
{
|
||||||
|
if (!param.isString())
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
auto const account = parseBase58<AccountID>(param.asString());
|
||||||
|
if (!account || account->isZero())
|
||||||
|
{
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return account;
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<AccountID, Json::Value>
|
||||||
|
requiredAccountID(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::string const& err)
|
||||||
|
{
|
||||||
|
return required<AccountID>(params, fieldName, err, "AccountID");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<Blob>
|
||||||
|
parseHexBlob(Json::Value const& param, std::size_t maxLength)
|
||||||
|
{
|
||||||
|
if (!param.isString())
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
auto const blob = strUnHex(param.asString());
|
||||||
|
if (!blob || blob->empty() || blob->size() > maxLength)
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
return blob;
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<Blob, Json::Value>
|
||||||
|
requiredHexBlob(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::size_t maxLength,
|
||||||
|
std::string const& err)
|
||||||
|
{
|
||||||
|
if (!params.isMember(fieldName) || params[fieldName].isNull())
|
||||||
|
{
|
||||||
|
return missingFieldError(fieldName);
|
||||||
|
}
|
||||||
|
if (auto blob = parseHexBlob(params[fieldName], maxLength))
|
||||||
|
{
|
||||||
|
return *blob;
|
||||||
|
}
|
||||||
|
return invalidFieldError(err, fieldName, "hex string");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
std::optional<std::uint32_t>
|
||||||
|
parse(Json::Value const& param)
|
||||||
|
{
|
||||||
|
if (param.isUInt() || (param.isInt() && param.asInt() >= 0))
|
||||||
|
return param.asUInt();
|
||||||
|
|
||||||
|
if (param.isString())
|
||||||
|
{
|
||||||
|
std::uint32_t v;
|
||||||
|
if (beast::lexicalCastChecked(v, param.asString()))
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<std::uint32_t, Json::Value>
|
||||||
|
requiredUInt32(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::string const& err)
|
||||||
|
{
|
||||||
|
return required<std::uint32_t>(params, fieldName, err, "number");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
std::optional<uint256>
|
||||||
|
parse(Json::Value const& param)
|
||||||
|
{
|
||||||
|
uint256 uNodeIndex;
|
||||||
|
if (!param.isString() || !uNodeIndex.parseHex(param.asString()))
|
||||||
|
{
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return uNodeIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<uint256, Json::Value>
|
||||||
|
requiredUInt256(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::string const& err)
|
||||||
|
{
|
||||||
|
return required<uint256>(params, fieldName, err, "Hash256");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
std::optional<uint192>
|
||||||
|
parse(Json::Value const& param)
|
||||||
|
{
|
||||||
|
uint192 field;
|
||||||
|
if (!param.isString() || !field.parseHex(param.asString()))
|
||||||
|
{
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return field;
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<uint192, Json::Value>
|
||||||
|
requiredUInt192(
|
||||||
|
Json::Value const& params,
|
||||||
|
Json::StaticString const fieldName,
|
||||||
|
std::string const& err)
|
||||||
|
{
|
||||||
|
return required<uint192>(params, fieldName, err, "Hash192");
|
||||||
|
}
|
||||||
|
|
||||||
|
Expected<STXChainBridge, Json::Value>
|
||||||
|
parseBridgeFields(Json::Value const& params)
|
||||||
|
{
|
||||||
|
if (auto const value = hasRequired(
|
||||||
|
params,
|
||||||
|
{jss::LockingChainDoor,
|
||||||
|
jss::LockingChainIssue,
|
||||||
|
jss::IssuingChainDoor,
|
||||||
|
jss::IssuingChainIssue});
|
||||||
|
!value)
|
||||||
|
{
|
||||||
|
return Unexpected(value.error());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto const lockingChainDoor = requiredAccountID(
|
||||||
|
params, jss::LockingChainDoor, "malformedLockingChainDoor");
|
||||||
|
if (!lockingChainDoor)
|
||||||
|
{
|
||||||
|
return Unexpected(lockingChainDoor.error());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto const issuingChainDoor = requiredAccountID(
|
||||||
|
params, jss::IssuingChainDoor, "malformedIssuingChainDoor");
|
||||||
|
if (!issuingChainDoor)
|
||||||
|
{
|
||||||
|
return Unexpected(issuingChainDoor.error());
|
||||||
|
}
|
||||||
|
|
||||||
|
Issue lockingChainIssue;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
lockingChainIssue = issueFromJson(params[jss::LockingChainIssue]);
|
||||||
|
}
|
||||||
|
catch (std::runtime_error const& ex)
|
||||||
|
{
|
||||||
|
return invalidFieldError(
|
||||||
|
"malformedIssue", jss::LockingChainIssue, "Issue");
|
||||||
|
}
|
||||||
|
|
||||||
|
Issue issuingChainIssue;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
issuingChainIssue = issueFromJson(params[jss::IssuingChainIssue]);
|
||||||
|
}
|
||||||
|
catch (std::runtime_error const& ex)
|
||||||
|
{
|
||||||
|
return invalidFieldError(
|
||||||
|
"malformedIssue", jss::IssuingChainIssue, "Issue");
|
||||||
|
}
|
||||||
|
|
||||||
|
return STXChainBridge(
|
||||||
|
*lockingChainDoor,
|
||||||
|
lockingChainIssue,
|
||||||
|
*issuingChainDoor,
|
||||||
|
issuingChainIssue);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace LedgerEntryHelpers
|
||||||
|
|
||||||
|
} // namespace ripple
|
||||||
Reference in New Issue
Block a user