mirror of
https://github.com/XRPLF/rippled.git
synced 2026-01-09 01:05:26 +00:00
Compare commits
34 Commits
bthomee/me
...
a1q123456/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75e402ad7a | ||
|
|
976fd8229b | ||
|
|
472bcf6b03 | ||
|
|
2bf77cc8f6 | ||
|
|
5e33ca56fd | ||
|
|
7c39c810eb | ||
|
|
a7792ebcae | ||
|
|
83ee3788e1 | ||
|
|
ae719b86d3 | ||
|
|
dd722f8b3f | ||
|
|
30190a5feb | ||
|
|
3e4cb67db9 | ||
|
|
2acee44c29 | ||
|
|
ae351b81b4 | ||
|
|
fbe4f7dd9f | ||
|
|
bce1520d4b | ||
|
|
1dab9323a0 | ||
|
|
63ef46b676 | ||
|
|
6f0767a99e | ||
|
|
9a3a58d0f2 | ||
|
|
52439ebb2d | ||
|
|
622bb71cba | ||
|
|
528562792f | ||
|
|
34706ef0ac | ||
|
|
ab52fde56e | ||
|
|
671436c033 | ||
|
|
5f3b3a6a1e | ||
|
|
a70e60e0d8 | ||
|
|
7fb8f5f751 | ||
|
|
6fd30ebde1 | ||
|
|
a1f6580e54 | ||
|
|
32a3f0a867 | ||
|
|
ed6dcdb10f | ||
|
|
3adfa074bc |
12
.github/actions/build-deps/action.yml
vendored
12
.github/actions/build-deps/action.yml
vendored
@@ -31,14 +31,14 @@ runs:
|
||||
VERBOSITY: ${{ inputs.verbosity }}
|
||||
run: |
|
||||
echo 'Installing dependencies.'
|
||||
mkdir -p '${{ env.BUILD_DIR }}'
|
||||
cd '${{ env.BUILD_DIR }}'
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
conan install \
|
||||
--output-folder . \
|
||||
--build=${{ env.BUILD_OPTION }} \
|
||||
--build="${BUILD_OPTION}" \
|
||||
--options:host='&:tests=True' \
|
||||
--options:host='&:xrpld=True' \
|
||||
--settings:all build_type='${{ env.BUILD_TYPE }}' \
|
||||
--conf:all tools.build:verbosity='${{ env.VERBOSITY }}' \
|
||||
--conf:all tools.compilation:verbosity='${{ env.VERBOSITY }}' \
|
||||
--settings:all build_type="${BUILD_TYPE}" \
|
||||
--conf:all tools.build:verbosity="${VERBOSITY}" \
|
||||
--conf:all tools.compilation:verbosity="${VERBOSITY}" \
|
||||
..
|
||||
|
||||
4
.github/actions/setup-conan/action.yml
vendored
4
.github/actions/setup-conan/action.yml
vendored
@@ -39,8 +39,8 @@ runs:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
|
||||
run: |
|
||||
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at '${{ env.CONAN_REMOTE_URL }}'."
|
||||
conan remote add --index 0 --force '${{ env.CONAN_REMOTE_NAME }}' '${{ env.CONAN_REMOTE_URL }}'
|
||||
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
|
||||
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
|
||||
|
||||
echo 'Listing Conan remotes.'
|
||||
conan remote list
|
||||
|
||||
4
.github/workflows/publish-docs.yml
vendored
4
.github/workflows/publish-docs.yml
vendored
@@ -48,8 +48,8 @@ jobs:
|
||||
doxygen --version
|
||||
- name: Build documentation
|
||||
run: |
|
||||
mkdir -p ${{ env.BUILD_DIR }}
|
||||
cd ${{ env.BUILD_DIR }}
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
cmake -Donly_docs=ON ..
|
||||
cmake --build . --target docs --parallel $(nproc)
|
||||
- name: Publish documentation
|
||||
|
||||
9
.github/workflows/reusable-build.yml
vendored
9
.github/workflows/reusable-build.yml
vendored
@@ -48,6 +48,7 @@ jobs:
|
||||
name: Build ${{ inputs.config_name }}
|
||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
@@ -83,8 +84,8 @@ jobs:
|
||||
cmake \
|
||||
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
|
||||
${{ env.CMAKE_ARGS }} \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
${CMAKE_ARGS} \
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
@@ -96,9 +97,9 @@ jobs:
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config ${{ env.BUILD_TYPE }} \
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel $(nproc) \
|
||||
--target ${{ env.CMAKE_TARGET }}
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Upload rippled artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
|
||||
8
.github/workflows/reusable-notify-clio.yml
vendored
8
.github/workflows/reusable-notify-clio.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
run: |
|
||||
echo 'Generating user and channel.'
|
||||
echo "user=clio" >> "${GITHUB_OUTPUT}"
|
||||
echo "channel=pr_${{ env.PR_NUMBER }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
echo 'Extracting version.'
|
||||
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
|
||||
- name: Calculate conan reference
|
||||
@@ -66,13 +66,13 @@ jobs:
|
||||
- name: Log into Conan remote
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
||||
- name: Upload package
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: |
|
||||
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
|
||||
conan upload --confirm --check --remote=${{ env.CONAN_REMOTE_NAME }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
outputs:
|
||||
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
|
||||
|
||||
@@ -88,4 +88,4 @@ jobs:
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
|
||||
-F "client_payload[pr_url]=${{ env.PR_URL }}"
|
||||
-F "client_payload[pr_url]=${PR_URL}"
|
||||
|
||||
@@ -38,4 +38,4 @@ jobs:
|
||||
env:
|
||||
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
|
||||
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
|
||||
run: ./generate.py ${{ env.GENERATE_OPTION }} ${{ env.GENERATE_CONFIG }} >> "${GITHUB_OUTPUT}"
|
||||
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
1
.github/workflows/reusable-test.yml
vendored
1
.github/workflows/reusable-test.yml
vendored
@@ -31,6 +31,7 @@ jobs:
|
||||
name: Test ${{ inputs.config_name }}
|
||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Download rippled artifact
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
|
||||
4
.github/workflows/upload-conan-deps.yml
vendored
4
.github/workflows/upload-conan-deps.yml
vendored
@@ -85,10 +85,10 @@ jobs:
|
||||
|
||||
- name: Log into Conan remote
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" --remote='${{ env.CONAN_REMOTE_NAME }}' --confirm ${{ env.FORCE_OPTION }}
|
||||
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}
|
||||
|
||||
@@ -975,6 +975,47 @@
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
#
|
||||
# Optional keys for NuDB only:
|
||||
#
|
||||
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
||||
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
||||
#
|
||||
# This parameter controls the fundamental storage unit
|
||||
# size for NuDB's internal data structures. The choice
|
||||
# of block size can significantly impact performance
|
||||
# depending on your storage hardware and filesystem:
|
||||
#
|
||||
# - 4096 bytes: Optimal for most standard SSDs and
|
||||
# traditional filesystems (ext4, NTFS, HFS+).
|
||||
# Provides good balance of performance and storage
|
||||
# efficiency. Recommended for most deployments.
|
||||
# Minimizes memory footprint and provides consistent
|
||||
# low-latency access patterns across diverse hardware.
|
||||
#
|
||||
# - 8192-16384 bytes: May improve performance on
|
||||
# high-end NVMe SSDs and copy-on-write filesystems
|
||||
# like ZFS or Btrfs that benefit from larger block
|
||||
# alignment. Can reduce metadata overhead for large
|
||||
# databases. Offers better sequential throughput and
|
||||
# reduced I/O operations at the cost of higher memory
|
||||
# usage per operation.
|
||||
#
|
||||
# - 32768 bytes (32K): Maximum supported block size
|
||||
# for high-performance scenarios with very fast
|
||||
# storage. May increase memory usage and reduce
|
||||
# efficiency for smaller databases. Best suited for
|
||||
# enterprise environments with abundant RAM.
|
||||
#
|
||||
# Performance testing is recommended before deploying
|
||||
# any non-default block size in production environments.
|
||||
#
|
||||
# Note: This setting cannot be changed after database
|
||||
# creation without rebuilding the entire database.
|
||||
# Choose carefully based on your hardware and expected
|
||||
# database size.
|
||||
#
|
||||
# Example: nudb_block_size=4096
|
||||
#
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
#
|
||||
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled/db/nudb
|
||||
nudb_block_size=4096
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
|
||||
@@ -109,15 +109,12 @@ XRPL_FIX (MasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYe
|
||||
XRPL_FIX (TakerDryOfferRemoval, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1578, Supported::yes, VoteBehavior::DefaultYes)
|
||||
// fix1515: Use liquidity from strands that consume max offers, but mark as dry
|
||||
XRPL_FIX (1515, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1623, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1543, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1571, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes)
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
@@ -156,3 +153,5 @@ XRPL_RETIRE(fix1512)
|
||||
XRPL_RETIRE(fix1523)
|
||||
XRPL_RETIRE(fix1528)
|
||||
XRPL_RETIRE(FlowCross)
|
||||
XRPL_RETIRE(fix1513)
|
||||
XRPL_RETIRE(fix1515)
|
||||
|
||||
@@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
|
||||
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
mayDeleteMPT | mustModifyVault,
|
||||
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
{sfAmount, soeREQUIRED, soeMPTSupported},
|
||||
|
||||
@@ -1242,6 +1242,12 @@ addEmptyHolding(
|
||||
// If the line already exists, don't create it again.
|
||||
if (view.read(index))
|
||||
return tecDUPLICATE;
|
||||
|
||||
// Can the account cover the trust line reserve ?
|
||||
std::uint32_t const ownerCount = sleDst->at(sfOwnerCount);
|
||||
if (priorBalance < view.fees().accountReserve(ownerCount + 1))
|
||||
return tecNO_LINE_INSUF_RESERVE;
|
||||
|
||||
return trustCreate(
|
||||
view,
|
||||
high,
|
||||
|
||||
@@ -1346,14 +1346,11 @@ struct Flow_manual_test : public Flow_test
|
||||
{
|
||||
using namespace jtx;
|
||||
auto const all = testable_amendments();
|
||||
FeatureBitset const f1513{fix1513};
|
||||
FeatureBitset const permDex{featurePermissionedDEX};
|
||||
|
||||
testWithFeats(all - f1513 - permDex);
|
||||
testWithFeats(all - permDex);
|
||||
testWithFeats(all);
|
||||
|
||||
testEmptyStrand(all - f1513 - permDex);
|
||||
testEmptyStrand(all - permDex);
|
||||
testEmptyStrand(all);
|
||||
}
|
||||
|
||||
@@ -5450,13 +5450,12 @@ class Offer_manual_test : public OfferBaseUtil_test
|
||||
{
|
||||
using namespace jtx;
|
||||
FeatureBitset const all{testable_amendments()};
|
||||
FeatureBitset const f1513{fix1513};
|
||||
FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled};
|
||||
FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval};
|
||||
FeatureBitset const fillOrKill{fixFillOrKill};
|
||||
FeatureBitset const permDEX{featurePermissionedDEX};
|
||||
|
||||
testAll(all - f1513 - immediateOfferKilled - permDEX);
|
||||
testAll(all - immediateOfferKilled - permDEX);
|
||||
testAll(all - immediateOfferKilled - fillOrKill - permDEX);
|
||||
testAll(all - fillOrKill - permDEX);
|
||||
testAll(all - permDEX);
|
||||
|
||||
@@ -59,14 +59,15 @@ class Vault_test : public beast::unit_test::suite
|
||||
testSequences()
|
||||
{
|
||||
using namespace test::jtx;
|
||||
Account issuer{"issuer"};
|
||||
Account owner{"owner"};
|
||||
Account depositor{"depositor"};
|
||||
Account charlie{"charlie"}; // authorized 3rd party
|
||||
Account dave{"dave"};
|
||||
|
||||
auto const testSequence = [this](
|
||||
auto const testSequence = [&, this](
|
||||
std::string const& prefix,
|
||||
Env& env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
Account const& charlie,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset) {
|
||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
||||
@@ -104,11 +105,9 @@ class Vault_test : public beast::unit_test::suite
|
||||
|
||||
// Several 3rd party accounts which cannot receive funds
|
||||
Account alice{"alice"};
|
||||
Account dave{"dave"};
|
||||
Account erin{"erin"}; // not authorized by issuer
|
||||
env.fund(XRP(1000), alice, dave, erin);
|
||||
env.fund(XRP(1000), alice, erin);
|
||||
env(fset(alice, asfDepositAuth));
|
||||
env(fset(dave, asfRequireDest));
|
||||
env.close();
|
||||
|
||||
{
|
||||
@@ -328,19 +327,6 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(
|
||||
prefix +
|
||||
" fail to withdraw with tag but without destination");
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(1000)});
|
||||
tx[sfDestinationTag] = "0";
|
||||
env(tx, ter(temMALFORMED));
|
||||
env.close();
|
||||
}
|
||||
|
||||
if (!asset.raw().native())
|
||||
{
|
||||
testcase(
|
||||
@@ -368,12 +354,49 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(prefix + " withdraw to 3rd party lsfRequireDestTag");
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(50)});
|
||||
tx[sfDestination] = dave.human();
|
||||
tx[sfDestinationTag] = "0";
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(prefix + " deposit again");
|
||||
auto tx = vault.deposit(
|
||||
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(prefix + " fail to withdraw lsfRequireDestTag");
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||
env(tx, ter{tecDST_TAG_NEEDED});
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(prefix + " withdraw with tag");
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||
tx[sfDestinationTag] = "0";
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
|
||||
{
|
||||
testcase(prefix + " withdraw to authorized 3rd party");
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
.amount = asset(50)});
|
||||
tx[sfDestination] = charlie.human();
|
||||
env(tx);
|
||||
env.close();
|
||||
@@ -523,80 +546,56 @@ class Vault_test : public beast::unit_test::suite
|
||||
}
|
||||
};
|
||||
|
||||
auto testCases = [this, &testSequence](
|
||||
auto testCases = [&, this](
|
||||
std::string prefix,
|
||||
std::function<PrettyAsset(
|
||||
Env & env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
Account const& charlie)> setup) {
|
||||
std::function<PrettyAsset(Env & env)> setup) {
|
||||
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
||||
Account issuer{"issuer"};
|
||||
Account owner{"owner"};
|
||||
Account depositor{"depositor"};
|
||||
Account charlie{"charlie"}; // authorized 3rd party
|
||||
|
||||
Vault vault{env};
|
||||
env.fund(XRP(1000), issuer, owner, depositor, charlie);
|
||||
env.fund(XRP(1000), issuer, owner, depositor, charlie, dave);
|
||||
env.close();
|
||||
env(fset(issuer, asfAllowTrustLineClawback));
|
||||
env(fset(issuer, asfRequireAuth));
|
||||
env(fset(dave, asfRequireDest));
|
||||
env.close();
|
||||
env.require(flags(issuer, asfAllowTrustLineClawback));
|
||||
env.require(flags(issuer, asfRequireAuth));
|
||||
|
||||
PrettyAsset asset = setup(env, issuer, owner, depositor, charlie);
|
||||
testSequence(
|
||||
prefix, env, issuer, owner, depositor, charlie, vault, asset);
|
||||
PrettyAsset asset = setup(env);
|
||||
testSequence(prefix, env, vault, asset);
|
||||
};
|
||||
|
||||
testCases(
|
||||
"XRP",
|
||||
[](Env& env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
Account const& charlie) -> PrettyAsset {
|
||||
return {xrpIssue(), 1'000'000};
|
||||
});
|
||||
testCases("XRP", [&](Env& env) -> PrettyAsset {
|
||||
return {xrpIssue(), 1'000'000};
|
||||
});
|
||||
|
||||
testCases(
|
||||
"IOU",
|
||||
[](Env& env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
Account const& charlie) -> Asset {
|
||||
PrettyAsset asset = issuer["IOU"];
|
||||
env(trust(owner, asset(1000)));
|
||||
env(trust(depositor, asset(1000)));
|
||||
env(trust(charlie, asset(1000)));
|
||||
env(trust(issuer, asset(0), owner, tfSetfAuth));
|
||||
env(trust(issuer, asset(0), depositor, tfSetfAuth));
|
||||
env(trust(issuer, asset(0), charlie, tfSetfAuth));
|
||||
env(pay(issuer, depositor, asset(1000)));
|
||||
env.close();
|
||||
return asset;
|
||||
});
|
||||
testCases("IOU", [&](Env& env) -> Asset {
|
||||
PrettyAsset asset = issuer["IOU"];
|
||||
env(trust(owner, asset(1000)));
|
||||
env(trust(depositor, asset(1000)));
|
||||
env(trust(charlie, asset(1000)));
|
||||
env(trust(dave, asset(1000)));
|
||||
env(trust(issuer, asset(0), owner, tfSetfAuth));
|
||||
env(trust(issuer, asset(0), depositor, tfSetfAuth));
|
||||
env(trust(issuer, asset(0), charlie, tfSetfAuth));
|
||||
env(trust(issuer, asset(0), dave, tfSetfAuth));
|
||||
env(pay(issuer, depositor, asset(1000)));
|
||||
env.close();
|
||||
return asset;
|
||||
});
|
||||
|
||||
testCases(
|
||||
"MPT",
|
||||
[](Env& env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
Account const& charlie) -> Asset {
|
||||
MPTTester mptt{env, issuer, mptInitNoFund};
|
||||
mptt.create(
|
||||
{.flags =
|
||||
tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
|
||||
PrettyAsset asset = mptt.issuanceID();
|
||||
mptt.authorize({.account = depositor});
|
||||
mptt.authorize({.account = charlie});
|
||||
env(pay(issuer, depositor, asset(1000)));
|
||||
env.close();
|
||||
return asset;
|
||||
});
|
||||
testCases("MPT", [&](Env& env) -> Asset {
|
||||
MPTTester mptt{env, issuer, mptInitNoFund};
|
||||
mptt.create(
|
||||
{.flags = tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
|
||||
PrettyAsset asset = mptt.issuanceID();
|
||||
mptt.authorize({.account = depositor});
|
||||
mptt.authorize({.account = charlie});
|
||||
mptt.authorize({.account = dave});
|
||||
env(pay(issuer, depositor, asset(1000)));
|
||||
env.close();
|
||||
return asset;
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1672,6 +1671,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
{
|
||||
bool enableClawback = true;
|
||||
bool requireAuth = true;
|
||||
int initialXRP = 1000;
|
||||
};
|
||||
|
||||
auto testCase = [this](
|
||||
@@ -1688,7 +1688,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
Account issuer{"issuer"};
|
||||
Account owner{"owner"};
|
||||
Account depositor{"depositor"};
|
||||
env.fund(XRP(1000), issuer, owner, depositor);
|
||||
env.fund(XRP(args.initialXRP), issuer, owner, depositor);
|
||||
env.close();
|
||||
Vault vault{env};
|
||||
|
||||
@@ -1868,9 +1868,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
PrettyAsset const& asset,
|
||||
Vault& vault,
|
||||
MPTTester& mptt) {
|
||||
testcase(
|
||||
"MPT 3rd party without MPToken cannot be withdrawal "
|
||||
"destination");
|
||||
testcase("MPT depositor without MPToken, auth required");
|
||||
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
@@ -1880,10 +1878,32 @@ class Vault_test : public beast::unit_test::suite
|
||||
tx = vault.deposit(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
.amount = asset(1000)});
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
{
|
||||
// Remove depositor MPToken and it will not be re-created
|
||||
mptt.authorize(
|
||||
{.account = depositor, .flags = tfMPTUnauthorize});
|
||||
env.close();
|
||||
|
||||
auto const mptoken =
|
||||
keylet::mptoken(mptt.issuanceID(), depositor);
|
||||
auto const sleMPT1 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||
|
||||
tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
env(tx, ter{tecNO_AUTH});
|
||||
env.close();
|
||||
|
||||
auto const sleMPT2 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT2 == nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
// Set destination to 3rd party without MPToken
|
||||
Account charlie{"charlie"};
|
||||
@@ -1898,7 +1918,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx, ter(tecNO_AUTH));
|
||||
}
|
||||
},
|
||||
{.requireAuth = false});
|
||||
{.requireAuth = true});
|
||||
|
||||
testCase(
|
||||
[this](
|
||||
@@ -1909,7 +1929,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
PrettyAsset const& asset,
|
||||
Vault& vault,
|
||||
MPTTester& mptt) {
|
||||
testcase("MPT depositor without MPToken cannot withdraw");
|
||||
testcase("MPT depositor without MPToken, no auth required");
|
||||
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
@@ -1917,7 +1937,6 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
auto v = env.le(keylet);
|
||||
BEAST_EXPECT(v);
|
||||
MPTID share = (*v)[sfShareMPTID];
|
||||
|
||||
tx = vault.deposit(
|
||||
{.depositor = depositor,
|
||||
@@ -1927,41 +1946,120 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
|
||||
{
|
||||
// Remove depositor's MPToken and withdraw will fail
|
||||
// Remove depositor's MPToken and it will be re-created
|
||||
mptt.authorize(
|
||||
{.account = depositor, .flags = tfMPTUnauthorize});
|
||||
env.close();
|
||||
|
||||
auto const mptoken =
|
||||
env.le(keylet::mptoken(mptt.issuanceID(), depositor));
|
||||
BEAST_EXPECT(mptoken == nullptr);
|
||||
keylet::mptoken(mptt.issuanceID(), depositor);
|
||||
auto const sleMPT1 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||
|
||||
tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
env(tx, ter(tecNO_AUTH));
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
auto const sleMPT2 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT2 != nullptr);
|
||||
BEAST_EXPECT(sleMPT2->at(sfMPTAmount) == 100);
|
||||
}
|
||||
|
||||
{
|
||||
// Restore depositor's MPToken and withdraw will succeed
|
||||
mptt.authorize({.account = depositor});
|
||||
// Remove 3rd party MPToken and it will not be re-created
|
||||
mptt.authorize(
|
||||
{.account = owner, .flags = tfMPTUnauthorize});
|
||||
env.close();
|
||||
|
||||
auto const mptoken =
|
||||
keylet::mptoken(mptt.issuanceID(), owner);
|
||||
auto const sleMPT1 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||
|
||||
tx = vault.withdraw(
|
||||
{.depositor = depositor,
|
||||
.id = keylet.key,
|
||||
.amount = asset(1000)});
|
||||
env(tx);
|
||||
.amount = asset(100)});
|
||||
tx[sfDestination] = owner.human();
|
||||
env(tx, ter(tecNO_AUTH));
|
||||
env.close();
|
||||
|
||||
// Withdraw removed shares MPToken
|
||||
auto const mptSle =
|
||||
env.le(keylet::mptoken(share, depositor.id()));
|
||||
BEAST_EXPECT(mptSle == nullptr);
|
||||
auto const sleMPT2 = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT2 == nullptr);
|
||||
}
|
||||
},
|
||||
{.requireAuth = false});
|
||||
|
||||
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
|
||||
Env env{*this, testable_amendments()};
|
||||
return {
|
||||
env.current()->fees().accountReserve(0).drops() /
|
||||
DROPS_PER_XRP.drops(),
|
||||
env.current()->fees().increment.drops() /
|
||||
DROPS_PER_XRP.drops()};
|
||||
}();
|
||||
|
||||
testCase(
|
||||
[&, this](
|
||||
Env& env,
|
||||
Account const& issuer,
|
||||
Account const& owner,
|
||||
Account const& depositor,
|
||||
PrettyAsset const& asset,
|
||||
Vault& vault,
|
||||
MPTTester& mptt) {
|
||||
testcase("MPT failed reserve to re-create MPToken");
|
||||
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
env(tx);
|
||||
env.close();
|
||||
auto v = env.le(keylet);
|
||||
BEAST_EXPECT(v);
|
||||
|
||||
env(pay(depositor, owner, asset(1000)));
|
||||
env.close();
|
||||
|
||||
tx = vault.deposit(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = asset(1000)}); // all assets held by owner
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
{
|
||||
// Remove owners's MPToken and it will not be re-created
|
||||
mptt.authorize(
|
||||
{.account = owner, .flags = tfMPTUnauthorize});
|
||||
env.close();
|
||||
|
||||
auto const mptoken =
|
||||
keylet::mptoken(mptt.issuanceID(), owner);
|
||||
auto const sleMPT = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT == nullptr);
|
||||
|
||||
// No reserve to create MPToken for asset in VaultWithdraw
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
env(tx, ter{tecINSUFFICIENT_RESERVE});
|
||||
env.close();
|
||||
|
||||
env(pay(depositor, owner, XRP(incReserve)));
|
||||
env.close();
|
||||
|
||||
// Withdraw can now create asset MPToken, tx will succeed
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
},
|
||||
{.requireAuth = false,
|
||||
.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
|
||||
testCase([this](
|
||||
Env& env,
|
||||
Account const& issuer,
|
||||
@@ -2320,23 +2418,30 @@ class Vault_test : public beast::unit_test::suite
|
||||
{
|
||||
using namespace test::jtx;
|
||||
|
||||
struct CaseArgs
|
||||
{
|
||||
int initialXRP = 1000;
|
||||
double transferRate = 1.0;
|
||||
};
|
||||
|
||||
auto testCase =
|
||||
[&,
|
||||
this](std::function<void(
|
||||
Env & env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
std::function<Account(ripple::Keylet)> vaultAccount,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
std::function<MPTID(ripple::Keylet)> issuanceId)> test) {
|
||||
[&, this](
|
||||
std::function<void(
|
||||
Env & env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
std::function<Account(ripple::Keylet)> vaultAccount,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
std::function<MPTID(ripple::Keylet)> issuanceId)> test,
|
||||
CaseArgs args = {}) {
|
||||
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
||||
Account const owner{"owner"};
|
||||
Account const issuer{"issuer"};
|
||||
Account const charlie{"charlie"};
|
||||
Vault vault{env};
|
||||
env.fund(XRP(1000), issuer, owner, charlie);
|
||||
env.fund(XRP(args.initialXRP), issuer, owner, charlie);
|
||||
env(fset(issuer, asfAllowTrustLineClawback));
|
||||
env.close();
|
||||
|
||||
@@ -2344,7 +2449,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.trust(asset(1000), owner);
|
||||
env.trust(asset(1000), charlie);
|
||||
env(pay(issuer, owner, asset(200)));
|
||||
env(rate(issuer, 1.25));
|
||||
env(rate(issuer, args.transferRate));
|
||||
env.close();
|
||||
|
||||
auto const vaultAccount =
|
||||
@@ -2505,73 +2610,81 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.close();
|
||||
});
|
||||
|
||||
testCase([&, this](
|
||||
Env& env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
auto vaultAccount,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
auto issuanceId) {
|
||||
testcase("IOU transfer fees not applied");
|
||||
testCase(
|
||||
[&, this](
|
||||
Env& env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
auto vaultAccount,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
auto issuanceId) {
|
||||
testcase("IOU transfer fees not applied");
|
||||
|
||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
env(vault.deposit(
|
||||
{.depositor = owner, .id = keylet.key, .amount = asset(100)}));
|
||||
env.close();
|
||||
|
||||
auto const issue = asset.raw().get<Issue>();
|
||||
Asset const share = Asset(issuanceId(keylet));
|
||||
|
||||
// transfer fees ignored on deposit
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
||||
BEAST_EXPECT(
|
||||
env.balance(vaultAccount(keylet), issue) == asset(100));
|
||||
|
||||
{
|
||||
auto tx = vault.clawback(
|
||||
{.issuer = issuer,
|
||||
.id = keylet.key,
|
||||
.holder = owner,
|
||||
.amount = asset(50)});
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
|
||||
// transfer fees ignored on clawback
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(50));
|
||||
|
||||
env(vault.withdraw(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = share(20'000'000)}));
|
||||
|
||||
// transfer fees ignored on withdraw
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(30));
|
||||
|
||||
{
|
||||
auto tx = vault.withdraw(
|
||||
env(vault.deposit(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = share(30'000'000)});
|
||||
tx[sfDestination] = charlie.human();
|
||||
env(tx);
|
||||
}
|
||||
.amount = asset(100)}));
|
||||
env.close();
|
||||
|
||||
// transfer fees ignored on withdraw to 3rd party
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
|
||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(0));
|
||||
auto const issue = asset.raw().get<Issue>();
|
||||
Asset const share = Asset(issuanceId(keylet));
|
||||
|
||||
env(vault.del({.owner = owner, .id = keylet.key}));
|
||||
env.close();
|
||||
});
|
||||
// transfer fees ignored on deposit
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
||||
BEAST_EXPECT(
|
||||
env.balance(vaultAccount(keylet), issue) == asset(100));
|
||||
|
||||
{
|
||||
auto tx = vault.clawback(
|
||||
{.issuer = issuer,
|
||||
.id = keylet.key,
|
||||
.holder = owner,
|
||||
.amount = asset(50)});
|
||||
env(tx);
|
||||
env.close();
|
||||
}
|
||||
|
||||
// transfer fees ignored on clawback
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
||||
BEAST_EXPECT(
|
||||
env.balance(vaultAccount(keylet), issue) == asset(50));
|
||||
|
||||
env(vault.withdraw(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = share(20'000'000)}));
|
||||
|
||||
// transfer fees ignored on withdraw
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||
BEAST_EXPECT(
|
||||
env.balance(vaultAccount(keylet), issue) == asset(30));
|
||||
|
||||
{
|
||||
auto tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = share(30'000'000)});
|
||||
tx[sfDestination] = charlie.human();
|
||||
env(tx);
|
||||
}
|
||||
|
||||
// transfer fees ignored on withdraw to 3rd party
|
||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
|
||||
BEAST_EXPECT(
|
||||
env.balance(vaultAccount(keylet), issue) == asset(0));
|
||||
|
||||
env(vault.del({.owner = owner, .id = keylet.key}));
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.transferRate = 1.25});
|
||||
|
||||
testCase([&, this](
|
||||
Env& env,
|
||||
@@ -2713,6 +2826,103 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx1);
|
||||
});
|
||||
|
||||
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
|
||||
Env env{*this, testable_amendments()};
|
||||
return {
|
||||
env.current()->fees().accountReserve(0).drops() /
|
||||
DROPS_PER_XRP.drops(),
|
||||
env.current()->fees().increment.drops() /
|
||||
DROPS_PER_XRP.drops()};
|
||||
}();
|
||||
|
||||
testCase(
|
||||
[&, this](
|
||||
Env& env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
auto,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
auto&&...) {
|
||||
testcase("IOU no trust line to depositor no reserve");
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
// reset limit, so deposit of all funds will delete the trust
|
||||
// line
|
||||
env.trust(asset(0), owner);
|
||||
env.close();
|
||||
|
||||
env(vault.deposit(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = asset(200)}));
|
||||
env.close();
|
||||
|
||||
auto trustline =
|
||||
env.le(keylet::line(owner, asset.raw().get<Issue>()));
|
||||
BEAST_EXPECT(trustline == nullptr);
|
||||
|
||||
// Fail because not enough reserve to create trust line
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
.id = keylet.key,
|
||||
.amount = asset(10)});
|
||||
env(tx, ter{tecNO_LINE_INSUF_RESERVE});
|
||||
env.close();
|
||||
|
||||
env(pay(charlie, owner, XRP(incReserve)));
|
||||
env.close();
|
||||
|
||||
// Withdraw can now create trust line, will succeed
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
|
||||
testCase(
|
||||
[&, this](
|
||||
Env& env,
|
||||
Account const& owner,
|
||||
Account const& issuer,
|
||||
Account const& charlie,
|
||||
auto,
|
||||
Vault& vault,
|
||||
PrettyAsset const& asset,
|
||||
auto&&...) {
|
||||
testcase("IOU no reserve for share MPToken");
|
||||
auto [tx, keylet] =
|
||||
vault.create({.owner = owner, .asset = asset});
|
||||
env(tx);
|
||||
env.close();
|
||||
|
||||
env(pay(owner, charlie, asset(100)));
|
||||
env.close();
|
||||
|
||||
// Use up some reserve on tickets
|
||||
env(ticket::create(charlie, 2));
|
||||
env.close();
|
||||
|
||||
// Fail because not enough reserve to create MPToken for shares
|
||||
tx = vault.deposit(
|
||||
{.depositor = charlie,
|
||||
.id = keylet.key,
|
||||
.amount = asset(100)});
|
||||
env(tx, ter{tecINSUFFICIENT_RESERVE});
|
||||
env.close();
|
||||
|
||||
env(pay(issuer, charlie, XRP(incReserve)));
|
||||
env.close();
|
||||
|
||||
// Deposit can now create MPToken, will succeed
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
|
||||
testCase([&, this](
|
||||
Env& env,
|
||||
Account const& owner,
|
||||
|
||||
@@ -101,12 +101,21 @@ public:
|
||||
}));
|
||||
|
||||
gate g;
|
||||
env.app().getJobQueue().postCoro(
|
||||
gate gStart;
|
||||
auto coro = env.app().getJobQueue().postCoro(
|
||||
jtCLIENT, "Coroutine-Test", [&](auto const& c) {
|
||||
c->post();
|
||||
gStart.signal();
|
||||
c->yield();
|
||||
g.signal();
|
||||
});
|
||||
|
||||
// Wait for the coroutine to start.
|
||||
BEAST_EXPECT(gStart.wait_for(5s));
|
||||
|
||||
BEAST_EXPECT(coro->state() == JobQueue::Coro::CoroState::Suspended);
|
||||
// Post the coroutine.
|
||||
coro->post();
|
||||
|
||||
BEAST_EXPECT(g.wait_for(5s));
|
||||
}
|
||||
|
||||
@@ -175,12 +184,78 @@ public:
|
||||
BEAST_EXPECT(*lv == -1);
|
||||
}
|
||||
|
||||
void
|
||||
stopJobQueueWhenCoroutineSuspended()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using namespace jtx;
|
||||
|
||||
testcase("Stop JobQueue when a coroutine is suspended");
|
||||
|
||||
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->FORCE_MULTI_THREAD = true;
|
||||
return cfg;
|
||||
}));
|
||||
|
||||
bool started = false;
|
||||
bool finished = false;
|
||||
std::optional<bool> shouldStop;
|
||||
std::condition_variable cv;
|
||||
std::mutex m;
|
||||
std::unique_lock<std::mutex> lk(m);
|
||||
auto coro = env.app().getJobQueue().postCoro(
|
||||
jtCLIENT, "Coroutine-Test", [&](auto const& c) {
|
||||
started = true;
|
||||
cv.notify_all();
|
||||
c->yield();
|
||||
finished = true;
|
||||
shouldStop = c->shouldStop();
|
||||
cv.notify_all();
|
||||
});
|
||||
|
||||
cv.wait_for(lk, 5s, [&]() { return started; });
|
||||
env.app().getJobQueue().stop();
|
||||
|
||||
cv.wait_for(lk, 5s, [&]() { return finished; });
|
||||
BEAST_EXPECT(finished);
|
||||
BEAST_EXPECT(shouldStop.has_value() && *shouldStop == true);
|
||||
}
|
||||
|
||||
void
|
||||
coroutineGetsDestroyedBeforeExecuting()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using namespace jtx;
|
||||
|
||||
testcase("Coroutine gets destroyed before executing");
|
||||
|
||||
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->FORCE_MULTI_THREAD = true;
|
||||
return cfg;
|
||||
}));
|
||||
|
||||
{
|
||||
auto coro = std::make_shared<JobQueue::Coro>(
|
||||
Coro_create_t{},
|
||||
env.app().getJobQueue(),
|
||||
JobType::jtCLIENT,
|
||||
"test",
|
||||
[](auto coro) {
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
pass();
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
correct_order();
|
||||
incorrect_order();
|
||||
thread_specific_storage();
|
||||
stopJobQueueWhenCoroutineSuspended();
|
||||
coroutineGetsDestroyedBeforeExecuting();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -87,6 +87,8 @@ class JobQueue_test : public beast::unit_test::suite
|
||||
while (yieldCount == 0)
|
||||
;
|
||||
|
||||
coro->join();
|
||||
|
||||
// Now re-post until the Coro says it is done.
|
||||
int old = yieldCount;
|
||||
while (coro->runnable())
|
||||
@@ -118,6 +120,9 @@ class JobQueue_test : public beast::unit_test::suite
|
||||
return;
|
||||
}
|
||||
|
||||
while (yieldCount == 0)
|
||||
; // We should wait for the job to start and yield
|
||||
|
||||
// Wait for the Job to run and yield.
|
||||
coro->join();
|
||||
|
||||
|
||||
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
@@ -0,0 +1,478 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/nodestore/TestBase.h>
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <xrpld/nodestore/DummyScheduler.h>
|
||||
#include <xrpld/nodestore/Manager.h>
|
||||
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/beast/utility/temp_dir.h>
|
||||
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
class NuDBFactory_test : public TestBase
|
||||
{
|
||||
private:
|
||||
// Helper function to create a Section with specified parameters
|
||||
Section
|
||||
createSection(std::string const& path, std::string const& blockSize = "")
|
||||
{
|
||||
Section params;
|
||||
params.set("type", "nudb");
|
||||
params.set("path", path);
|
||||
if (!blockSize.empty())
|
||||
params.set("nudb_block_size", blockSize);
|
||||
return params;
|
||||
}
|
||||
|
||||
// Helper function to create a backend and test basic functionality
|
||||
bool
|
||||
testBackendFunctionality(
|
||||
Section const& params,
|
||||
std::size_t expectedBlocksize)
|
||||
{
|
||||
try
|
||||
{
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
if (!BEAST_EXPECT(backend))
|
||||
return false;
|
||||
|
||||
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
|
||||
return false;
|
||||
|
||||
backend->open();
|
||||
|
||||
if (!BEAST_EXPECT(backend->isOpen()))
|
||||
return false;
|
||||
|
||||
// Test basic store/fetch functionality
|
||||
auto batch = createPredictableBatch(10, 12345);
|
||||
storeBatch(*backend, batch);
|
||||
|
||||
Batch copy;
|
||||
fetchCopyOfBatch(*backend, ©, batch);
|
||||
|
||||
backend->close();
|
||||
|
||||
return areBatchesEqual(batch, copy);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to test log messages
|
||||
void
|
||||
testLogMessage(
|
||||
Section const& params,
|
||||
beast::severities::Severity level,
|
||||
std::string const& expectedMessage)
|
||||
{
|
||||
test::StreamSink sink(level);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
std::string logOutput = sink.messages().str();
|
||||
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
|
||||
}
|
||||
|
||||
// Helper function to test power of two validation
|
||||
void
|
||||
testPowerOfTwoValidation(std::string const& size, bool shouldWork)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
std::string logOutput = sink.messages().str();
|
||||
bool hasWarning =
|
||||
logOutput.find("Invalid nudb_block_size") != std::string::npos;
|
||||
|
||||
BEAST_EXPECT(hasWarning == !shouldWork);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
testDefaultBlockSize()
|
||||
{
|
||||
testcase("Default block size (no nudb_block_size specified)");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path());
|
||||
|
||||
// Should work with default 4096 block size
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
void
|
||||
testValidBlockSizes()
|
||||
{
|
||||
testcase("Valid block sizes");
|
||||
|
||||
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
|
||||
|
||||
for (auto const& size : validSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), to_string(size));
|
||||
|
||||
BEAST_EXPECT(testBackendFunctionality(params, size));
|
||||
}
|
||||
// Empty value is ignored by the config parser, so uses the
|
||||
// default
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "");
|
||||
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
void
|
||||
testInvalidBlockSizes()
|
||||
{
|
||||
testcase("Invalid block sizes");
|
||||
|
||||
std::vector<std::string> invalidSizes = {
|
||||
"2048", // Too small
|
||||
"1024", // Too small
|
||||
"65536", // Too large
|
||||
"131072", // Too large
|
||||
"5000", // Not power of 2
|
||||
"6000", // Not power of 2
|
||||
"10000", // Not power of 2
|
||||
"0", // Zero
|
||||
"-1", // Negative
|
||||
"abc", // Non-numeric
|
||||
"4k", // Invalid format
|
||||
"4096.5" // Decimal
|
||||
};
|
||||
|
||||
for (auto const& size : invalidSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
// Test whitespace cases separately since lexical_cast may handle them
|
||||
std::vector<std::string> whitespaceInvalidSizes = {
|
||||
"4096 ", // Trailing space - might be handled by lexical_cast
|
||||
" 4096" // Leading space - might be handled by lexical_cast
|
||||
};
|
||||
|
||||
for (auto const& size : whitespaceInvalidSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testLogMessages()
|
||||
{
|
||||
testcase("Log message verification");
|
||||
|
||||
// Test valid custom block size logging
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "8192");
|
||||
|
||||
testLogMessage(
|
||||
params,
|
||||
beast::severities::kInfo,
|
||||
"Using custom NuDB block size: 8192");
|
||||
}
|
||||
|
||||
// Test invalid block size failure
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "5000");
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
fail();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size: 5000") !=
|
||||
std::string::npos);
|
||||
BEAST_EXPECT(
|
||||
logOutput.find(
|
||||
"Must be power of 2 between 4096 and 32768") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
|
||||
// Test non-numeric value failure
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "invalid");
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
fail();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size value: invalid") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testPowerOfTwoValidation()
|
||||
{
|
||||
testcase("Power of 2 validation logic");
|
||||
|
||||
// Test edge cases around valid range
|
||||
std::vector<std::pair<std::string, bool>> testCases = {
|
||||
{"4095", false}, // Just below minimum
|
||||
{"4096", true}, // Minimum valid
|
||||
{"4097", false}, // Just above minimum, not power of 2
|
||||
{"8192", true}, // Valid power of 2
|
||||
{"8193", false}, // Just above valid power of 2
|
||||
{"16384", true}, // Valid power of 2
|
||||
{"32768", true}, // Maximum valid
|
||||
{"32769", false}, // Just above maximum
|
||||
{"65536", false} // Power of 2 but too large
|
||||
};
|
||||
|
||||
for (auto const& [size, shouldWork] : testCases)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// We test the validation logic by catching exceptions for invalid
|
||||
// values
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
BEAST_EXPECT(shouldWork);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testBothConstructorVariants()
|
||||
{
|
||||
testcase("Both constructor variants work with custom block size");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "16384");
|
||||
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
// Test first constructor (without nudb::context)
|
||||
{
|
||||
auto backend1 = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
BEAST_EXPECT(backend1 != nullptr);
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 16384));
|
||||
}
|
||||
|
||||
// Test second constructor (with nudb::context)
|
||||
// Note: This would require access to nudb::context, which might not be
|
||||
// easily testable without more complex setup. For now, we test that
|
||||
// the factory can create backends with the first constructor.
|
||||
}
|
||||
|
||||
void
|
||||
testConfigurationParsing()
|
||||
{
|
||||
testcase("Configuration parsing edge cases");
|
||||
|
||||
// Test that whitespace is handled correctly
|
||||
std::vector<std::string> validFormats = {
|
||||
"8192" // Basic valid format
|
||||
};
|
||||
|
||||
// Test whitespace handling separately since lexical_cast behavior may
|
||||
// vary
|
||||
std::vector<std::string> whitespaceFormats = {
|
||||
" 8192", // Leading space - may or may not be handled by
|
||||
// lexical_cast
|
||||
"8192 " // Trailing space - may or may not be handled by
|
||||
// lexical_cast
|
||||
};
|
||||
|
||||
// Test basic valid format
|
||||
for (auto const& format : validFormats)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), format);
|
||||
|
||||
test::StreamSink sink(beast::severities::kInfo);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
// Should log success message for valid values
|
||||
std::string logOutput = sink.messages().str();
|
||||
bool hasSuccessMessage =
|
||||
logOutput.find("Using custom NuDB block size") !=
|
||||
std::string::npos;
|
||||
BEAST_EXPECT(hasSuccessMessage);
|
||||
}
|
||||
|
||||
// Test whitespace formats - these should work if lexical_cast handles
|
||||
// them
|
||||
for (auto const& format : whitespaceFormats)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), format);
|
||||
|
||||
// Use a lower threshold to capture both info and warning messages
|
||||
test::StreamSink sink(beast::severities::kDebug);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
fail();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 8192));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testDataPersistence()
|
||||
{
|
||||
testcase("Data persistence with different block sizes");
|
||||
|
||||
std::vector<std::string> blockSizes = {
|
||||
"4096", "8192", "16384", "32768"};
|
||||
|
||||
for (auto const& size : blockSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
// Create test data
|
||||
auto batch = createPredictableBatch(50, 54321);
|
||||
|
||||
// Store data
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
backend->open();
|
||||
storeBatch(*backend, batch);
|
||||
backend->close();
|
||||
}
|
||||
|
||||
// Retrieve data in new backend instance
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
backend->open();
|
||||
|
||||
Batch copy;
|
||||
fetchCopyOfBatch(*backend, ©, batch);
|
||||
|
||||
BEAST_EXPECT(areBatchesEqual(batch, copy));
|
||||
backend->close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testDefaultBlockSize();
|
||||
testValidBlockSizes();
|
||||
testInvalidBlockSizes();
|
||||
testLogMessages();
|
||||
testPowerOfTwoValidation();
|
||||
testBothConstructorVariants();
|
||||
testConfigurationParsing();
|
||||
testDataPersistence();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
168
src/test/rpc/ServerDefinitions_test.cpp
Normal file
168
src/test/rpc/ServerDefinitions_test.cpp
Normal file
@@ -0,0 +1,168 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace test {
|
||||
|
||||
class ServerDefinitions_test : public beast::unit_test::suite
|
||||
{
|
||||
public:
|
||||
void
|
||||
testServerDefinitions()
|
||||
{
|
||||
testcase("server_definitions");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
{
|
||||
Env env(*this);
|
||||
auto const result = env.rpc("server_definitions");
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
|
||||
// test a random element of each result
|
||||
// (testing the whole output would be difficult to maintain)
|
||||
|
||||
{
|
||||
auto const firstField = result[jss::result][jss::FIELDS][0u];
|
||||
BEAST_EXPECT(firstField[0u].asString() == "Generic");
|
||||
BEAST_EXPECT(
|
||||
firstField[1][jss::isSerialized].asBool() == false);
|
||||
BEAST_EXPECT(
|
||||
firstField[1][jss::isSigningField].asBool() == false);
|
||||
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
|
||||
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
|
||||
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
|
||||
}
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
|
||||
.asUInt() == 97);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
|
||||
.asUInt() == 121);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
|
||||
.asUInt() == 0);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
|
||||
|
||||
// check exception SFields
|
||||
{
|
||||
auto const fieldExists = [&](std::string name) {
|
||||
for (auto& field : result[jss::result][jss::FIELDS])
|
||||
{
|
||||
if (field[0u].asString() == name)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
BEAST_EXPECT(fieldExists("Generic"));
|
||||
BEAST_EXPECT(fieldExists("Invalid"));
|
||||
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
|
||||
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
|
||||
BEAST_EXPECT(fieldExists("taker_gets_funded"));
|
||||
BEAST_EXPECT(fieldExists("taker_pays_funded"));
|
||||
BEAST_EXPECT(fieldExists("hash"));
|
||||
BEAST_EXPECT(fieldExists("index"));
|
||||
}
|
||||
|
||||
// test that base_uint types are replaced with "Hash" prefix
|
||||
{
|
||||
auto const types = result[jss::result][jss::TYPES];
|
||||
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
|
||||
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
|
||||
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
|
||||
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
|
||||
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
|
||||
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
|
||||
}
|
||||
}
|
||||
|
||||
// test providing the same hash
|
||||
{
|
||||
Env env(*this);
|
||||
auto const firstResult = env.rpc("server_definitions");
|
||||
auto const hash = firstResult[jss::result][jss::hash].asString();
|
||||
auto const hashParam =
|
||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||
|
||||
auto const result =
|
||||
env.rpc("json", "server_definitions", hashParam);
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(
|
||||
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
}
|
||||
|
||||
// test providing a different hash
|
||||
{
|
||||
Env env(*this);
|
||||
std::string const hash =
|
||||
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
|
||||
"D1";
|
||||
auto const hashParam =
|
||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||
|
||||
auto const result =
|
||||
env.rpc("json", "server_definitions", hashParam);
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testServerDefinitions();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(ServerDefinitions, rpc, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -174,137 +174,10 @@ admin = 127.0.0.1
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testServerDefinitions()
|
||||
{
|
||||
testcase("server_definitions");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
{
|
||||
Env env(*this);
|
||||
auto const result = env.rpc("server_definitions");
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
|
||||
// test a random element of each result
|
||||
// (testing the whole output would be difficult to maintain)
|
||||
|
||||
{
|
||||
auto const firstField = result[jss::result][jss::FIELDS][0u];
|
||||
BEAST_EXPECT(firstField[0u].asString() == "Generic");
|
||||
BEAST_EXPECT(
|
||||
firstField[1][jss::isSerialized].asBool() == false);
|
||||
BEAST_EXPECT(
|
||||
firstField[1][jss::isSigningField].asBool() == false);
|
||||
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
|
||||
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
|
||||
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
|
||||
}
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
|
||||
.asUInt() == 97);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
|
||||
.asUInt() == 121);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
|
||||
.asUInt() == 0);
|
||||
BEAST_EXPECT(
|
||||
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
|
||||
|
||||
// check exception SFields
|
||||
{
|
||||
auto const fieldExists = [&](std::string name) {
|
||||
for (auto& field : result[jss::result][jss::FIELDS])
|
||||
{
|
||||
if (field[0u].asString() == name)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
BEAST_EXPECT(fieldExists("Generic"));
|
||||
BEAST_EXPECT(fieldExists("Invalid"));
|
||||
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
|
||||
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
|
||||
BEAST_EXPECT(fieldExists("taker_gets_funded"));
|
||||
BEAST_EXPECT(fieldExists("taker_pays_funded"));
|
||||
BEAST_EXPECT(fieldExists("hash"));
|
||||
BEAST_EXPECT(fieldExists("index"));
|
||||
}
|
||||
|
||||
// test that base_uint types are replaced with "Hash" prefix
|
||||
{
|
||||
auto const types = result[jss::result][jss::TYPES];
|
||||
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
|
||||
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
|
||||
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
|
||||
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
|
||||
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
|
||||
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
|
||||
}
|
||||
}
|
||||
|
||||
// test providing the same hash
|
||||
{
|
||||
Env env(*this);
|
||||
auto const firstResult = env.rpc("server_definitions");
|
||||
auto const hash = firstResult[jss::result][jss::hash].asString();
|
||||
auto const hashParam =
|
||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||
|
||||
auto const result =
|
||||
env.rpc("json", "server_definitions", hashParam);
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(
|
||||
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
}
|
||||
|
||||
// test providing a different hash
|
||||
{
|
||||
Env env(*this);
|
||||
std::string const hash =
|
||||
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
|
||||
"D1";
|
||||
auto const hashParam =
|
||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||
|
||||
auto const result =
|
||||
env.rpc("json", "server_definitions", hashParam);
|
||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||
BEAST_EXPECT(
|
||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testServerInfo();
|
||||
testServerDefinitions();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ class BookStep : public StepImp<TIn, TOut, BookStep<TIn, TOut, TDerived>>
|
||||
protected:
|
||||
enum class OfferType { AMM, CLOB };
|
||||
|
||||
uint32_t const maxOffersToConsume_;
|
||||
static constexpr uint32_t MaxOffersToConsume{1000};
|
||||
Book book_;
|
||||
AccountID strandSrc_;
|
||||
AccountID strandDst_;
|
||||
@@ -82,18 +82,9 @@ protected:
|
||||
|
||||
std::optional<Cache> cache_;
|
||||
|
||||
static uint32_t
|
||||
getMaxOffersToConsume(StrandContext const& ctx)
|
||||
{
|
||||
if (ctx.view.rules().enabled(fix1515))
|
||||
return 1000;
|
||||
return 2000;
|
||||
}
|
||||
|
||||
public:
|
||||
BookStep(StrandContext const& ctx, Issue const& in, Issue const& out)
|
||||
: maxOffersToConsume_(getMaxOffersToConsume(ctx))
|
||||
, book_(in, out, ctx.domainID)
|
||||
: book_(in, out, ctx.domainID)
|
||||
, strandSrc_(ctx.strandSrc)
|
||||
, strandDst_(ctx.strandDst)
|
||||
, prevStep_(ctx.prevStep)
|
||||
@@ -738,7 +729,7 @@ BookStep<TIn, TOut, TDerived>::forEachOffer(
|
||||
ownerPaysTransferFee_ ? rate(book_.out.account) : QUALITY_ONE;
|
||||
|
||||
typename FlowOfferStream<TIn, TOut>::StepCounter counter(
|
||||
maxOffersToConsume_, j_);
|
||||
MaxOffersToConsume, j_);
|
||||
|
||||
FlowOfferStream<TIn, TOut> offers(
|
||||
sb, afView, book_, sb.parentCloseTime(), counter, j_);
|
||||
@@ -1093,18 +1084,9 @@ BookStep<TIn, TOut, TDerived>::revImp(
|
||||
offersUsed_ = offersConsumed;
|
||||
SetUnion(ofrsToRm, toRm);
|
||||
|
||||
if (offersConsumed >= maxOffersToConsume_)
|
||||
// Too many iterations, mark this strand as inactive
|
||||
if (offersConsumed >= MaxOffersToConsume)
|
||||
{
|
||||
// Too many iterations, mark this strand as inactive
|
||||
if (!afView.rules().enabled(fix1515))
|
||||
{
|
||||
// Don't use the liquidity
|
||||
cache_.emplace(beast::zero, beast::zero);
|
||||
return {beast::zero, beast::zero};
|
||||
}
|
||||
|
||||
// Use the liquidity, but use this to mark the strand as inactive so
|
||||
// it's not used further
|
||||
inactive_ = true;
|
||||
}
|
||||
}
|
||||
@@ -1266,18 +1248,9 @@ BookStep<TIn, TOut, TDerived>::fwdImp(
|
||||
offersUsed_ = offersConsumed;
|
||||
SetUnion(ofrsToRm, toRm);
|
||||
|
||||
if (offersConsumed >= maxOffersToConsume_)
|
||||
// Too many iterations, mark this strand as inactive (dry)
|
||||
if (offersConsumed >= MaxOffersToConsume)
|
||||
{
|
||||
// Too many iterations, mark this strand as inactive (dry)
|
||||
if (!afView.rules().enabled(fix1515))
|
||||
{
|
||||
// Don't use the liquidity
|
||||
cache_.emplace(beast::zero, beast::zero);
|
||||
return {beast::zero, beast::zero};
|
||||
}
|
||||
|
||||
// Use the liquidity, but use this to mark the strand as inactive so
|
||||
// it's not used further
|
||||
inactive_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1205,7 +1205,7 @@ EscrowFinish::doApply()
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.fatal()) << "Unable to delete Escrow from recipient.";
|
||||
return tefBAD_LEDGER; // LCOV_EXCL_LINE
|
||||
return tefBAD_LEDGER;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,8 +202,7 @@ VaultDeposit::doApply()
|
||||
else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner)
|
||||
{
|
||||
// No authorization needed, but must ensure there is MPToken
|
||||
auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_));
|
||||
if (!sleMpt)
|
||||
if (!view().exists(keylet::mptoken(mptIssuanceID, account_)))
|
||||
{
|
||||
if (auto const err = authorizeMPToken(
|
||||
view(),
|
||||
|
||||
@@ -52,12 +52,6 @@ VaultWithdraw::preflight(PreflightContext const& ctx)
|
||||
return temMALFORMED;
|
||||
}
|
||||
}
|
||||
else if (ctx.tx.isFieldPresent(sfDestinationTag))
|
||||
{
|
||||
JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but "
|
||||
"sfDestination is not";
|
||||
return temMALFORMED;
|
||||
}
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
@@ -116,37 +110,28 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx)
|
||||
}
|
||||
|
||||
auto const account = ctx.tx[sfAccount];
|
||||
auto const dstAcct = [&]() -> AccountID {
|
||||
if (ctx.tx.isFieldPresent(sfDestination))
|
||||
return ctx.tx.getAccountID(sfDestination);
|
||||
return account;
|
||||
}();
|
||||
auto const dstAcct = ctx.tx[~sfDestination].value_or(account);
|
||||
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
|
||||
if (sleDst == nullptr)
|
||||
return account == dstAcct ? tecINTERNAL : tecNO_DST;
|
||||
|
||||
if (sleDst->isFlag(lsfRequireDestTag) &&
|
||||
!ctx.tx.isFieldPresent(sfDestinationTag))
|
||||
return tecDST_TAG_NEEDED; // Cannot send without a tag
|
||||
|
||||
// Withdrawal to a 3rd party destination account is essentially a transfer,
|
||||
// via shares in the vault. Enforce all the usual asset transfer checks.
|
||||
AuthType authType = AuthType::Legacy;
|
||||
if (account != dstAcct)
|
||||
if (account != dstAcct && sleDst->isFlag(lsfDepositAuth))
|
||||
{
|
||||
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
|
||||
if (sleDst == nullptr)
|
||||
return tecNO_DST;
|
||||
|
||||
if (sleDst->isFlag(lsfRequireDestTag) &&
|
||||
!ctx.tx.isFieldPresent(sfDestinationTag))
|
||||
return tecDST_TAG_NEEDED; // Cannot send without a tag
|
||||
|
||||
if (sleDst->isFlag(lsfDepositAuth))
|
||||
{
|
||||
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
|
||||
return tecNO_PERMISSION;
|
||||
}
|
||||
// The destination account must have consented to receive the asset by
|
||||
// creating a RippleState or MPToken
|
||||
authType = AuthType::StrongAuth;
|
||||
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
|
||||
return tecNO_PERMISSION;
|
||||
}
|
||||
|
||||
// Destination MPToken (for an MPT) or trust line (for an IOU) must exist
|
||||
// if not sending to Account.
|
||||
// If sending to Account (i.e. not a transfer), we will also create (only
|
||||
// if authorized) a trust line or MPToken as needed, in doApply().
|
||||
// Destination MPToken or trust line must exist if _not_ sending to Account.
|
||||
AuthType const authType =
|
||||
account == dstAcct ? AuthType::WeakAuth : AuthType::StrongAuth;
|
||||
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType);
|
||||
!isTesSuccess(ter))
|
||||
return ter;
|
||||
@@ -307,11 +292,16 @@ VaultWithdraw::doApply()
|
||||
// else quietly ignore, account balance is not zero
|
||||
}
|
||||
|
||||
auto const dstAcct = [&]() -> AccountID {
|
||||
if (ctx_.tx.isFieldPresent(sfDestination))
|
||||
return ctx_.tx.getAccountID(sfDestination);
|
||||
return account_;
|
||||
}();
|
||||
auto const dstAcct = ctx_.tx[~sfDestination].value_or(account_);
|
||||
if (!vaultAsset.native() && //
|
||||
dstAcct != vaultAsset.getIssuer() && //
|
||||
dstAcct == account_)
|
||||
{
|
||||
if (auto const ter = addEmptyHolding(
|
||||
view(), account_, mPriorBalance, vaultAsset, j_);
|
||||
!isTesSuccess(ter) && ter != tecDUPLICATE)
|
||||
return ter;
|
||||
}
|
||||
|
||||
// Transfer assets from vault to depositor or destination account.
|
||||
if (auto const ter = accountSend(
|
||||
|
||||
@@ -34,17 +34,21 @@ JobQueue::Coro::Coro(
|
||||
: jq_(jq)
|
||||
, type_(type)
|
||||
, name_(name)
|
||||
, running_(false)
|
||||
, coro_(
|
||||
[this, fn = std::forward<F>(f)](
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type&
|
||||
do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
fn(shared_from_this());
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
#endif
|
||||
// self makes Coro alive until this function returns
|
||||
std::shared_ptr<Coro> self;
|
||||
if (!shouldStop())
|
||||
{
|
||||
self = shared_from_this();
|
||||
fn(self);
|
||||
}
|
||||
state_ = CoroState::Finished;
|
||||
cv_.notify_all();
|
||||
},
|
||||
boost::coroutines::attributes(megabytes(1)))
|
||||
{
|
||||
@@ -52,29 +56,58 @@ JobQueue::Coro::Coro(
|
||||
|
||||
inline JobQueue::Coro::~Coro()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
XRPL_ASSERT(finished_, "ripple::JobQueue::Coro::~Coro : is finished");
|
||||
#endif
|
||||
XRPL_ASSERT(
|
||||
state_ != CoroState::Running,
|
||||
"ripple::JobQueue::Coro::~Coro : is not running");
|
||||
exiting_ = true;
|
||||
// Resume the coroutine so that it has a chance to clean things up
|
||||
if (state_ == CoroState::Suspended)
|
||||
{
|
||||
resume();
|
||||
}
|
||||
|
||||
XRPL_ASSERT(
|
||||
state_ == CoroState::Finished,
|
||||
"ripple::JobQueue::Coro::~Coro : is finished");
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::yield() const
|
||||
inline bool
|
||||
JobQueue::Coro::yield()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
if (shouldStop())
|
||||
return false;
|
||||
|
||||
state_ = CoroState::Suspended;
|
||||
cv_.notify_all();
|
||||
|
||||
++jq_.nSuspend_;
|
||||
jq_.m_suspendedCoros[this] = weak_from_this();
|
||||
jq_.cv_.notify_all();
|
||||
}
|
||||
(*yield_)();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool
|
||||
JobQueue::Coro::post()
|
||||
{
|
||||
if (state_ == CoroState::Finished)
|
||||
{
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = true;
|
||||
// The coroutine will run until it finishes if the JobQueue has stopped.
|
||||
// In the case where make_shared<Coro>() succeeds and then the JobQueue
|
||||
// stops before coro_ gets executed, post() will still be called and
|
||||
// state_ will be Finished. We should return false and avoid XRPL_ASSERT
|
||||
// as it's a valid edge case.
|
||||
return false;
|
||||
}
|
||||
|
||||
XRPL_ASSERT(
|
||||
state_ == CoroState::Suspended,
|
||||
"ripple::JobQueue::Coro::post : should be suspended");
|
||||
|
||||
// sp keeps 'this' alive
|
||||
if (jq_.addJob(
|
||||
type_, name_, [this, sp = shared_from_this()]() { resume(); }))
|
||||
@@ -82,9 +115,6 @@ JobQueue::Coro::post()
|
||||
return true;
|
||||
}
|
||||
|
||||
// The coroutine will not run. Clean up running_.
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
return false;
|
||||
}
|
||||
@@ -92,13 +122,18 @@ JobQueue::Coro::post()
|
||||
inline void
|
||||
JobQueue::Coro::resume()
|
||||
{
|
||||
auto suspended = CoroState::Suspended;
|
||||
if (!state_.compare_exchange_strong(suspended, CoroState::Running))
|
||||
{
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = true;
|
||||
return;
|
||||
}
|
||||
cv_.notify_all();
|
||||
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
jq_.m_suspendedCoros.erase(this);
|
||||
--jq_.nSuspend_;
|
||||
jq_.cv_.notify_all();
|
||||
}
|
||||
auto saved = detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(&lvs_);
|
||||
@@ -109,43 +144,24 @@ JobQueue::Coro::resume()
|
||||
coro_();
|
||||
detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(saved);
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
inline bool
|
||||
JobQueue::Coro::runnable() const
|
||||
{
|
||||
return static_cast<bool>(coro_);
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::expectEarlyExit()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
if (!finished_)
|
||||
#endif
|
||||
{
|
||||
// expectEarlyExit() must only ever be called from outside the
|
||||
// Coro's stack. It you're inside the stack you can simply return
|
||||
// and be done.
|
||||
//
|
||||
// That said, since we're outside the Coro's stack, we need to
|
||||
// decrement the nSuspend that the Coro's call to yield caused.
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
#endif
|
||||
}
|
||||
// There's an edge case where the coroutine has updated the status
|
||||
// to Finished but the function hasn't exited and therefore, coro_ is
|
||||
// still valid. However, the coroutine is not technically runnable in this
|
||||
// case, because the coroutine is about to exit and static_cast<bool>(coro_)
|
||||
// is going to be false.
|
||||
return static_cast<bool>(coro_) && state_ != CoroState::Finished;
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::join()
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(mutex_run_);
|
||||
cv_.wait(lk, [this]() { return running_ == false; });
|
||||
cv_.wait(lk, [this]() { return state_ != CoroState::Running; });
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -57,23 +57,28 @@ struct Coro_create_t
|
||||
class JobQueue : private Workers::Callback
|
||||
{
|
||||
public:
|
||||
enum class QueueState { Accepting, Stopping, Stopped };
|
||||
|
||||
/** Coroutines must run to completion. */
|
||||
class Coro : public std::enable_shared_from_this<Coro>
|
||||
{
|
||||
friend class JobQueue;
|
||||
|
||||
public:
|
||||
enum class CoroState { None, Suspended, Running, Finished };
|
||||
|
||||
private:
|
||||
std::atomic_bool exiting_ = false;
|
||||
detail::LocalValues lvs_;
|
||||
JobQueue& jq_;
|
||||
JobType type_;
|
||||
std::string name_;
|
||||
bool running_;
|
||||
std::atomic<CoroState> state_ = CoroState::None;
|
||||
std::mutex mutex_;
|
||||
std::mutex mutex_run_;
|
||||
std::condition_variable cv_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::pull_type coro_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type* yield_;
|
||||
#ifndef NDEBUG
|
||||
bool finished_ = false;
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Private: Used in the implementation
|
||||
@@ -94,10 +99,12 @@ public:
|
||||
Note:
|
||||
The associated Job function returns.
|
||||
Undefined behavior if called consecutively without a corresponding
|
||||
post.
|
||||
post.
|
||||
It may not suspend at all if the JobQueue is stopping, and returns
|
||||
false in such a case.
|
||||
*/
|
||||
void
|
||||
yield() const;
|
||||
bool
|
||||
yield();
|
||||
|
||||
/** Schedule coroutine execution.
|
||||
Effects:
|
||||
@@ -127,17 +134,23 @@ public:
|
||||
void
|
||||
resume();
|
||||
|
||||
CoroState
|
||||
state() const
|
||||
{
|
||||
return state_;
|
||||
}
|
||||
|
||||
/** Returns true if the Coro is still runnable (has not returned). */
|
||||
bool
|
||||
runnable() const;
|
||||
|
||||
/** Once called, the Coro allows early exit without an assert. */
|
||||
void
|
||||
expectEarlyExit();
|
||||
|
||||
/** Waits until coroutine returns from the user function. */
|
||||
void
|
||||
join();
|
||||
|
||||
/** Returns true if the coroutine should stop executing */
|
||||
[[nodiscard]] bool
|
||||
shouldStop() const;
|
||||
};
|
||||
|
||||
using JobFunction = std::function<void()>;
|
||||
@@ -159,20 +172,17 @@ public:
|
||||
|
||||
@return true if jobHandler added to queue.
|
||||
*/
|
||||
template <
|
||||
typename JobHandler,
|
||||
typename = std::enable_if_t<std::is_same<
|
||||
decltype(std::declval<JobHandler&&>()()),
|
||||
void>::value>>
|
||||
template <typename JobHandler>
|
||||
bool
|
||||
addJob(JobType type, std::string const& name, JobHandler&& jobHandler)
|
||||
requires std::is_void_v<std::invoke_result_t<JobHandler>>
|
||||
{
|
||||
if (auto optionalCountedJob =
|
||||
jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
|
||||
if (queueState_ != QueueState::Accepting)
|
||||
{
|
||||
return addRefCountedJob(type, name, std::move(*optionalCountedJob));
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
return addJobNoStatusCheck(
|
||||
type, name, std::forward<JobHandler>(jobHandler));
|
||||
}
|
||||
|
||||
/** Creates a coroutine and adds a job to the queue which will run it.
|
||||
@@ -231,13 +241,16 @@ public:
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return stopping_;
|
||||
return queueState_ == QueueState::Stopping;
|
||||
}
|
||||
|
||||
// We may be able to move away from this, but we can keep it during the
|
||||
// transition.
|
||||
bool
|
||||
isStopped() const;
|
||||
isStopped() const
|
||||
{
|
||||
return queueState_ == QueueState::Stopped;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class Coro;
|
||||
@@ -249,8 +262,7 @@ private:
|
||||
std::uint64_t m_lastJob;
|
||||
std::set<Job> m_jobSet;
|
||||
JobCounter jobCounter_;
|
||||
std::atomic_bool stopping_{false};
|
||||
std::atomic_bool stopped_{false};
|
||||
std::atomic<QueueState> queueState_{QueueState::Accepting};
|
||||
JobDataMap m_jobData;
|
||||
JobTypeData m_invalidJobData;
|
||||
|
||||
@@ -260,6 +272,8 @@ private:
|
||||
// The number of suspended coroutines
|
||||
int nSuspend_ = 0;
|
||||
|
||||
std::map<void*, std::weak_ptr<Coro>> m_suspendedCoros;
|
||||
|
||||
Workers m_workers;
|
||||
|
||||
// Statistics tracking
|
||||
@@ -275,6 +289,22 @@ private:
|
||||
JobTypeData&
|
||||
getJobTypeData(JobType type);
|
||||
|
||||
template <typename JobHandler>
|
||||
bool
|
||||
addJobNoStatusCheck(
|
||||
JobType type,
|
||||
std::string const& name,
|
||||
JobHandler&& jobHandler)
|
||||
requires std::is_void_v<std::invoke_result_t<JobHandler>>
|
||||
{
|
||||
if (auto optionalCountedJob =
|
||||
jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
|
||||
{
|
||||
return addRefCountedJob(type, name, std::move(*optionalCountedJob));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Adds a reference counted job to the JobQueue.
|
||||
//
|
||||
// param type The type of job.
|
||||
@@ -412,6 +442,10 @@ template <class F>
|
||||
std::shared_ptr<JobQueue::Coro>
|
||||
JobQueue::postCoro(JobType t, std::string const& name, F&& f)
|
||||
{
|
||||
if (queueState_ != QueueState::Accepting)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
/* First param is a detail type to make construction private.
|
||||
Last param is the function the coroutine runs. Signature of
|
||||
void(std::shared_ptr<Coro>).
|
||||
@@ -422,7 +456,6 @@ JobQueue::postCoro(JobType t, std::string const& name, F&& f)
|
||||
{
|
||||
// The Coro was not successfully posted. Disable it so it's destructor
|
||||
// can run with no negative side effects. Then destroy it.
|
||||
coro->expectEarlyExit();
|
||||
coro.reset();
|
||||
}
|
||||
return coro;
|
||||
|
||||
@@ -26,6 +26,12 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
bool
|
||||
JobQueue::Coro::shouldStop() const
|
||||
{
|
||||
return jq_.queueState_ != QueueState::Accepting || exiting_;
|
||||
}
|
||||
|
||||
JobQueue::JobQueue(
|
||||
int threadCount,
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
@@ -295,7 +301,45 @@ JobQueue::getJobTypeData(JobType type)
|
||||
void
|
||||
JobQueue::stop()
|
||||
{
|
||||
stopping_ = true;
|
||||
// Once we stop accepting new jobs, all running coroutines won't be able to
|
||||
// get suspended and yield() will return immediately, so we can safely
|
||||
// move m_suspendedCoros, and we can assume that no coroutine will be
|
||||
// suspended in the future.
|
||||
if (queueState_ == QueueState::Stopped)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
auto accepting = QueueState::Accepting;
|
||||
|
||||
if (!queueState_.compare_exchange_strong(accepting, QueueState::Stopping))
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
false, "Incorrect queueState, should be accepting but not!");
|
||||
}
|
||||
std::map<void*, std::weak_ptr<Coro>> suspendedCoros;
|
||||
{
|
||||
std::unique_lock lock(m_mutex);
|
||||
suspendedCoros = std::move(m_suspendedCoros);
|
||||
}
|
||||
if (!suspendedCoros.empty())
|
||||
{
|
||||
// We should resume the suspended coroutines so that the coroutines
|
||||
// get a chance to exit cleanly.
|
||||
for (auto& [_, coro] : suspendedCoros)
|
||||
{
|
||||
if (auto coroPtr = coro.lock())
|
||||
{
|
||||
// We don't allow any new jobs from outside when we are
|
||||
// stopping, but we should allow new jobs from inside the class.
|
||||
addJobNoStatusCheck(
|
||||
coroPtr->type_, coroPtr->name_, [coroPtr]() {
|
||||
coroPtr->resume();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
jobCounter_.join("JobQueue", 1s, m_journal);
|
||||
{
|
||||
@@ -305,8 +349,9 @@ JobQueue::stop()
|
||||
// `Job::doJob` and the return of `JobQueue::processTask`. That is why
|
||||
// we must wait on the condition variable to make these assertions.
|
||||
std::unique_lock<std::mutex> lock(m_mutex);
|
||||
cv_.wait(
|
||||
lock, [this] { return m_processCount == 0 && m_jobSet.empty(); });
|
||||
cv_.wait(lock, [this] {
|
||||
return m_processCount == 0 && nSuspend_ == 0 && m_jobSet.empty();
|
||||
});
|
||||
XRPL_ASSERT(
|
||||
m_processCount == 0,
|
||||
"ripple::JobQueue::stop : all processes completed");
|
||||
@@ -314,14 +359,12 @@ JobQueue::stop()
|
||||
m_jobSet.empty(), "ripple::JobQueue::stop : all jobs completed");
|
||||
XRPL_ASSERT(
|
||||
nSuspend_ == 0, "ripple::JobQueue::stop : no coros suspended");
|
||||
stopped_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
JobQueue::isStopped() const
|
||||
{
|
||||
return stopped_;
|
||||
auto stopping = QueueState::Stopping;
|
||||
if (!queueState_.compare_exchange_strong(stopping, QueueState::Stopped))
|
||||
{
|
||||
XRPL_ASSERT(false, "Incorrect queueState, should be stopping but not!");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -53,6 +53,14 @@ public:
|
||||
virtual std::string
|
||||
getName() = 0;
|
||||
|
||||
/** Get the block size for backends that support it
|
||||
*/
|
||||
virtual std::optional<std::size_t>
|
||||
getBlockSize() const
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/** Open the backend.
|
||||
@param createIfMissing Create the database files if necessary.
|
||||
This allows the caller to catch exceptions.
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <xrpld/nodestore/detail/codec.h>
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/beast/core/LexicalCast.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
@@ -52,6 +53,7 @@ public:
|
||||
size_t const keyBytes_;
|
||||
std::size_t const burstSize_;
|
||||
std::string const name_;
|
||||
std::size_t const blockSize_;
|
||||
nudb::store db_;
|
||||
std::atomic<bool> deletePath_;
|
||||
Scheduler& scheduler_;
|
||||
@@ -66,6 +68,7 @@ public:
|
||||
, keyBytes_(keyBytes)
|
||||
, burstSize_(burstSize)
|
||||
, name_(get(keyValues, "path"))
|
||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||
, deletePath_(false)
|
||||
, scheduler_(scheduler)
|
||||
{
|
||||
@@ -85,6 +88,7 @@ public:
|
||||
, keyBytes_(keyBytes)
|
||||
, burstSize_(burstSize)
|
||||
, name_(get(keyValues, "path"))
|
||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||
, db_(context)
|
||||
, deletePath_(false)
|
||||
, scheduler_(scheduler)
|
||||
@@ -114,6 +118,12 @@ public:
|
||||
return name_;
|
||||
}
|
||||
|
||||
std::optional<std::size_t>
|
||||
getBlockSize() const override
|
||||
{
|
||||
return blockSize_;
|
||||
}
|
||||
|
||||
void
|
||||
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
||||
override
|
||||
@@ -145,7 +155,7 @@ public:
|
||||
uid,
|
||||
salt,
|
||||
keyBytes_,
|
||||
nudb::block_size(kp),
|
||||
blockSize_,
|
||||
0.50,
|
||||
ec);
|
||||
if (ec == nudb::errc::file_exists)
|
||||
@@ -361,6 +371,56 @@ public:
|
||||
{
|
||||
return 3;
|
||||
}
|
||||
|
||||
private:
|
||||
static std::size_t
|
||||
parseBlockSize(
|
||||
std::string const& name,
|
||||
Section const& keyValues,
|
||||
beast::Journal journal)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
auto const folder = path(name);
|
||||
auto const kp = (folder / "nudb.key").string();
|
||||
|
||||
std::size_t const defaultSize =
|
||||
nudb::block_size(kp); // Default 4K from NuDB
|
||||
std::size_t blockSize = defaultSize;
|
||||
std::string blockSizeStr;
|
||||
|
||||
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
|
||||
{
|
||||
return blockSize; // Early return with default
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
std::size_t const parsedBlockSize =
|
||||
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
|
||||
|
||||
// Validate: must be power of 2 between 4K and 32K
|
||||
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
|
||||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
|
||||
{
|
||||
std::stringstream s;
|
||||
s << "Invalid nudb_block_size: " << parsedBlockSize
|
||||
<< ". Must be power of 2 between 4096 and 32768.";
|
||||
Throw<std::runtime_error>(s.str());
|
||||
}
|
||||
|
||||
JLOG(journal.info())
|
||||
<< "Using custom NuDB block size: " << parsedBlockSize
|
||||
<< " bytes";
|
||||
return parsedBlockSize;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::stringstream s;
|
||||
s << "Invalid nudb_block_size value: " << blockSizeStr
|
||||
<< ". Error: " << e.what();
|
||||
Throw<std::runtime_error>(s.str());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -128,21 +128,17 @@ doRipplePathFind(RPC::JsonContext& context)
|
||||
// May 2017
|
||||
jvResult = context.app.getPathRequests().makeLegacyPathRequest(
|
||||
request,
|
||||
[&context]() {
|
||||
// Copying the shared_ptr keeps the coroutine alive up
|
||||
[coro = context.coro]() {
|
||||
// Capturing the shared_ptr keeps the coroutine alive up
|
||||
// through the return. Otherwise the storage under the
|
||||
// captured reference could evaporate when we return from
|
||||
// coroCopy->resume(). This is not strictly necessary, but
|
||||
// will make maintenance easier.
|
||||
std::shared_ptr<JobQueue::Coro> coroCopy{context.coro};
|
||||
if (!coroCopy->post())
|
||||
{
|
||||
// The post() failed, so we won't get a thread to let
|
||||
// the Coro finish. We'll call Coro::resume() so the
|
||||
// Coro can finish on our thread. Otherwise the
|
||||
// application will hang on shutdown.
|
||||
coroCopy->resume();
|
||||
}
|
||||
// coro->post().
|
||||
// When post() failed, we won't get a thread to let
|
||||
// the Coro finish. We should ignore the coroutine and
|
||||
// let it destruct, as the JobQueu has been signaled to
|
||||
// close, and resuming it manually messes up the internal
|
||||
// state in JobQueue.
|
||||
coro->post();
|
||||
},
|
||||
context.consumer,
|
||||
lpLedger,
|
||||
@@ -150,6 +146,14 @@ doRipplePathFind(RPC::JsonContext& context)
|
||||
if (request)
|
||||
{
|
||||
context.coro->yield();
|
||||
// Each time after we resume from yield(), we should
|
||||
// check if cancellation has been requested. It would
|
||||
// be a lot more elegant if we replace boost coroutine
|
||||
// with c++ standard coroutine.
|
||||
if (context.coro->shouldStop())
|
||||
{
|
||||
return jvResult;
|
||||
}
|
||||
jvResult = request->doStatus(context.params);
|
||||
}
|
||||
|
||||
|
||||
320
src/xrpld/rpc/handlers/ServerDefinitions.cpp
Normal file
320
src/xrpld/rpc/handlers/ServerDefinitions.cpp
Normal file
@@ -0,0 +1,320 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2023 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/Role.h>
|
||||
|
||||
#include <xrpl/json/json_value.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace detail {
|
||||
|
||||
class ServerDefinitions
|
||||
{
|
||||
private:
|
||||
std::string
|
||||
// translate e.g. STI_LEDGERENTRY to LedgerEntry
|
||||
translate(std::string const& inp);
|
||||
|
||||
uint256 defsHash_;
|
||||
Json::Value defs_;
|
||||
|
||||
public:
|
||||
ServerDefinitions();
|
||||
|
||||
bool
|
||||
hashMatches(uint256 hash) const
|
||||
{
|
||||
return defsHash_ == hash;
|
||||
}
|
||||
|
||||
Json::Value const&
|
||||
get() const
|
||||
{
|
||||
return defs_;
|
||||
}
|
||||
};
|
||||
|
||||
std::string
|
||||
ServerDefinitions::translate(std::string const& inp)
|
||||
{
|
||||
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
|
||||
std::string out = inp;
|
||||
boost::replace_all(out, oldStr, newStr);
|
||||
return out;
|
||||
};
|
||||
|
||||
auto contains = [&](char const* s) -> bool {
|
||||
return inp.find(s) != std::string::npos;
|
||||
};
|
||||
|
||||
if (contains("UINT"))
|
||||
{
|
||||
if (contains("512") || contains("384") || contains("256") ||
|
||||
contains("192") || contains("160") || contains("128"))
|
||||
return replace("UINT", "Hash");
|
||||
else
|
||||
return replace("UINT", "UInt");
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, std::string> replacements{
|
||||
{"OBJECT", "STObject"},
|
||||
{"ARRAY", "STArray"},
|
||||
{"ACCOUNT", "AccountID"},
|
||||
{"LEDGERENTRY", "LedgerEntry"},
|
||||
{"NOTPRESENT", "NotPresent"},
|
||||
{"PATHSET", "PathSet"},
|
||||
{"VL", "Blob"},
|
||||
{"XCHAIN_BRIDGE", "XChainBridge"},
|
||||
};
|
||||
|
||||
if (auto const& it = replacements.find(inp); it != replacements.end())
|
||||
{
|
||||
return it->second;
|
||||
}
|
||||
|
||||
std::string out;
|
||||
size_t pos = 0;
|
||||
std::string inpToProcess = inp;
|
||||
|
||||
// convert snake_case to CamelCase
|
||||
for (;;)
|
||||
{
|
||||
pos = inpToProcess.find("_");
|
||||
if (pos == std::string::npos)
|
||||
pos = inpToProcess.size();
|
||||
std::string token = inpToProcess.substr(0, pos);
|
||||
if (token.size() > 1)
|
||||
{
|
||||
boost::algorithm::to_lower(token);
|
||||
token.data()[0] -= ('a' - 'A');
|
||||
out += token;
|
||||
}
|
||||
else
|
||||
out += token;
|
||||
if (pos == inpToProcess.size())
|
||||
break;
|
||||
inpToProcess = inpToProcess.substr(pos + 1);
|
||||
}
|
||||
return out;
|
||||
};
|
||||
|
||||
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
|
||||
{
|
||||
// populate SerializedTypeID names and values
|
||||
defs_[jss::TYPES] = Json::objectValue;
|
||||
|
||||
defs_[jss::TYPES]["Done"] = -1;
|
||||
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
|
||||
for (auto const& [rawName, typeValue] : sTypeMap)
|
||||
{
|
||||
std::string typeName =
|
||||
translate(std::string(rawName).substr(4) /* remove STI_ */);
|
||||
defs_[jss::TYPES][typeName] = typeValue;
|
||||
typeMap[typeValue] = typeName;
|
||||
}
|
||||
|
||||
// populate LedgerEntryType names and values
|
||||
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
|
||||
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
|
||||
|
||||
for (auto const& f : LedgerFormats::getInstance())
|
||||
{
|
||||
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
|
||||
}
|
||||
|
||||
// populate SField serialization data
|
||||
defs_[jss::FIELDS] = Json::arrayValue;
|
||||
|
||||
uint32_t i = 0;
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "Generic";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 0;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Unknown";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "Invalid";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = -1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Unknown";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "ObjectEndMarker";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = true;
|
||||
v[jss::isSigningField] = true;
|
||||
v[jss::type] = "STObject";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "ArrayEndMarker";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = true;
|
||||
v[jss::isSigningField] = true;
|
||||
v[jss::type] = "STArray";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "taker_gets_funded";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 258;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Amount";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "taker_pays_funded";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 259;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Amount";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
|
||||
{
|
||||
if (f->fieldName == "")
|
||||
continue;
|
||||
|
||||
Json::Value innerObj = Json::objectValue;
|
||||
|
||||
uint32_t type = f->fieldType;
|
||||
|
||||
innerObj[jss::nth] = f->fieldValue;
|
||||
|
||||
// whether the field is variable-length encoded
|
||||
// this means that the length is included before the content
|
||||
innerObj[jss::isVLEncoded] =
|
||||
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
|
||||
type == 19U /* Vector256 */);
|
||||
|
||||
// whether the field is included in serialization
|
||||
innerObj[jss::isSerialized] =
|
||||
(type < 10000 && f->fieldName != "hash" &&
|
||||
f->fieldName != "index"); /* hash, index, TRANSACTION,
|
||||
LEDGER_ENTRY, VALIDATION, METADATA */
|
||||
|
||||
// whether the field is included in serialization when signing
|
||||
innerObj[jss::isSigningField] = f->shouldInclude(false);
|
||||
|
||||
innerObj[jss::type] = typeMap[type];
|
||||
|
||||
Json::Value innerArray = Json::arrayValue;
|
||||
innerArray[0U] = f->fieldName;
|
||||
innerArray[1U] = innerObj;
|
||||
|
||||
defs_[jss::FIELDS][i++] = innerArray;
|
||||
}
|
||||
|
||||
// populate TER code names and values
|
||||
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
|
||||
|
||||
for (auto const& [code, terInfo] : transResults())
|
||||
{
|
||||
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
|
||||
}
|
||||
|
||||
// populate TxType names and values
|
||||
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
|
||||
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
|
||||
for (auto const& f : TxFormats::getInstance())
|
||||
{
|
||||
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
|
||||
}
|
||||
|
||||
// generate hash
|
||||
{
|
||||
std::string const out = Json::FastWriter().write(defs_);
|
||||
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
|
||||
defs_[jss::hash] = to_string(defsHash_);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
Json::Value
|
||||
doServerDefinitions(RPC::JsonContext& context)
|
||||
{
|
||||
auto& params = context.params;
|
||||
|
||||
uint256 hash;
|
||||
if (params.isMember(jss::hash))
|
||||
{
|
||||
if (!params[jss::hash].isString() ||
|
||||
!hash.parseHex(params[jss::hash].asString()))
|
||||
return RPC::invalid_field_error(jss::hash);
|
||||
}
|
||||
|
||||
static detail::ServerDefinitions const defs{};
|
||||
if (defs.hashMatches(hash))
|
||||
{
|
||||
Json::Value jv = Json::objectValue;
|
||||
jv[jss::hash] = to_string(hash);
|
||||
return jv;
|
||||
}
|
||||
return defs.get();
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
@@ -23,301 +23,10 @@
|
||||
|
||||
#include <xrpl/json/json_value.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace detail {
|
||||
|
||||
class ServerDefinitions
|
||||
{
|
||||
private:
|
||||
std::string
|
||||
// translate e.g. STI_LEDGERENTRY to LedgerEntry
|
||||
translate(std::string const& inp);
|
||||
|
||||
uint256 defsHash_;
|
||||
Json::Value defs_;
|
||||
|
||||
public:
|
||||
ServerDefinitions();
|
||||
|
||||
bool
|
||||
hashMatches(uint256 hash) const
|
||||
{
|
||||
return defsHash_ == hash;
|
||||
}
|
||||
|
||||
Json::Value const&
|
||||
get() const
|
||||
{
|
||||
return defs_;
|
||||
}
|
||||
};
|
||||
|
||||
std::string
|
||||
ServerDefinitions::translate(std::string const& inp)
|
||||
{
|
||||
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
|
||||
std::string out = inp;
|
||||
boost::replace_all(out, oldStr, newStr);
|
||||
return out;
|
||||
};
|
||||
|
||||
auto contains = [&](char const* s) -> bool {
|
||||
return inp.find(s) != std::string::npos;
|
||||
};
|
||||
|
||||
if (contains("UINT"))
|
||||
{
|
||||
if (contains("512") || contains("384") || contains("256") ||
|
||||
contains("192") || contains("160") || contains("128"))
|
||||
return replace("UINT", "Hash");
|
||||
else
|
||||
return replace("UINT", "UInt");
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, std::string> replacements{
|
||||
{"OBJECT", "STObject"},
|
||||
{"ARRAY", "STArray"},
|
||||
{"ACCOUNT", "AccountID"},
|
||||
{"LEDGERENTRY", "LedgerEntry"},
|
||||
{"NOTPRESENT", "NotPresent"},
|
||||
{"PATHSET", "PathSet"},
|
||||
{"VL", "Blob"},
|
||||
{"XCHAIN_BRIDGE", "XChainBridge"},
|
||||
};
|
||||
|
||||
if (auto const& it = replacements.find(inp); it != replacements.end())
|
||||
{
|
||||
return it->second;
|
||||
}
|
||||
|
||||
std::string out;
|
||||
size_t pos = 0;
|
||||
std::string inpToProcess = inp;
|
||||
|
||||
// convert snake_case to CamelCase
|
||||
for (;;)
|
||||
{
|
||||
pos = inpToProcess.find("_");
|
||||
if (pos == std::string::npos)
|
||||
pos = inpToProcess.size();
|
||||
std::string token = inpToProcess.substr(0, pos);
|
||||
if (token.size() > 1)
|
||||
{
|
||||
boost::algorithm::to_lower(token);
|
||||
token.data()[0] -= ('a' - 'A');
|
||||
out += token;
|
||||
}
|
||||
else
|
||||
out += token;
|
||||
if (pos == inpToProcess.size())
|
||||
break;
|
||||
inpToProcess = inpToProcess.substr(pos + 1);
|
||||
}
|
||||
return out;
|
||||
};
|
||||
|
||||
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
|
||||
{
|
||||
// populate SerializedTypeID names and values
|
||||
defs_[jss::TYPES] = Json::objectValue;
|
||||
|
||||
defs_[jss::TYPES]["Done"] = -1;
|
||||
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
|
||||
for (auto const& [rawName, typeValue] : sTypeMap)
|
||||
{
|
||||
std::string typeName =
|
||||
translate(std::string(rawName).substr(4) /* remove STI_ */);
|
||||
defs_[jss::TYPES][typeName] = typeValue;
|
||||
typeMap[typeValue] = typeName;
|
||||
}
|
||||
|
||||
// populate LedgerEntryType names and values
|
||||
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
|
||||
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
|
||||
|
||||
for (auto const& f : LedgerFormats::getInstance())
|
||||
{
|
||||
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
|
||||
}
|
||||
|
||||
// populate SField serialization data
|
||||
defs_[jss::FIELDS] = Json::arrayValue;
|
||||
|
||||
uint32_t i = 0;
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "Generic";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 0;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Unknown";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "Invalid";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = -1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Unknown";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "ObjectEndMarker";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = true;
|
||||
v[jss::isSigningField] = true;
|
||||
v[jss::type] = "STObject";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "ArrayEndMarker";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 1;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = true;
|
||||
v[jss::isSigningField] = true;
|
||||
v[jss::type] = "STArray";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "taker_gets_funded";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 258;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Amount";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value a = Json::arrayValue;
|
||||
a[0U] = "taker_pays_funded";
|
||||
Json::Value v = Json::objectValue;
|
||||
v[jss::nth] = 259;
|
||||
v[jss::isVLEncoded] = false;
|
||||
v[jss::isSerialized] = false;
|
||||
v[jss::isSigningField] = false;
|
||||
v[jss::type] = "Amount";
|
||||
a[1U] = v;
|
||||
defs_[jss::FIELDS][i++] = a;
|
||||
}
|
||||
|
||||
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
|
||||
{
|
||||
if (f->fieldName == "")
|
||||
continue;
|
||||
|
||||
Json::Value innerObj = Json::objectValue;
|
||||
|
||||
uint32_t type = f->fieldType;
|
||||
|
||||
innerObj[jss::nth] = f->fieldValue;
|
||||
|
||||
// whether the field is variable-length encoded
|
||||
// this means that the length is included before the content
|
||||
innerObj[jss::isVLEncoded] =
|
||||
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
|
||||
type == 19U /* Vector256 */);
|
||||
|
||||
// whether the field is included in serialization
|
||||
innerObj[jss::isSerialized] =
|
||||
(type < 10000 && f->fieldName != "hash" &&
|
||||
f->fieldName != "index"); /* hash, index, TRANSACTION,
|
||||
LEDGER_ENTRY, VALIDATION, METADATA */
|
||||
|
||||
// whether the field is included in serialization when signing
|
||||
innerObj[jss::isSigningField] = f->shouldInclude(false);
|
||||
|
||||
innerObj[jss::type] = typeMap[type];
|
||||
|
||||
Json::Value innerArray = Json::arrayValue;
|
||||
innerArray[0U] = f->fieldName;
|
||||
innerArray[1U] = innerObj;
|
||||
|
||||
defs_[jss::FIELDS][i++] = innerArray;
|
||||
}
|
||||
|
||||
// populate TER code names and values
|
||||
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
|
||||
|
||||
for (auto const& [code, terInfo] : transResults())
|
||||
{
|
||||
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
|
||||
}
|
||||
|
||||
// populate TxType names and values
|
||||
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
|
||||
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
|
||||
for (auto const& f : TxFormats::getInstance())
|
||||
{
|
||||
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
|
||||
}
|
||||
|
||||
// generate hash
|
||||
{
|
||||
std::string const out = Json::FastWriter().write(defs_);
|
||||
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
|
||||
defs_[jss::hash] = to_string(defsHash_);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
Json::Value
|
||||
doServerDefinitions(RPC::JsonContext& context)
|
||||
{
|
||||
auto& params = context.params;
|
||||
|
||||
uint256 hash;
|
||||
if (params.isMember(jss::hash))
|
||||
{
|
||||
if (!params[jss::hash].isString() ||
|
||||
!hash.parseHex(params[jss::hash].asString()))
|
||||
return RPC::invalid_field_error(jss::hash);
|
||||
}
|
||||
|
||||
static detail::ServerDefinitions const defs{};
|
||||
if (defs.hashMatches(hash))
|
||||
{
|
||||
Json::Value jv = Json::objectValue;
|
||||
jv[jss::hash] = to_string(hash);
|
||||
return jv;
|
||||
}
|
||||
return defs.get();
|
||||
}
|
||||
|
||||
Json::Value
|
||||
doServerInfo(RPC::JsonContext& context)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user