Compare commits

...

21 Commits

Author SHA1 Message Date
JCW
bf4dc342c6 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-10-29 14:36:18 +00:00
JCW
a71cd5d271 Address PR comments
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-10-28 11:31:45 +00:00
JCW
45baf7339c Merge remote-tracking branch 'origin/develop' into a1q123456/remove-const-cast-from-tagged-cache 2025-10-28 11:26:58 +00:00
Shawn Xie
f4f7618173 Change fixMPTDeliveredAmount to Supported::yes (#5833)
Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-27 19:04:14 +00:00
Ayaz Salikhov
66f16469f9 fix: Upload all test binaries (#5932) 2025-10-27 17:27:56 +00:00
Ayaz Salikhov
1845b1c656 chore: Better pre-commit failure message (#5940) 2025-10-27 14:43:45 +00:00
Ayaz Salikhov
e192ffe964 fix: Clean up build profile options (#5934)
The `-Wno-missing-template-arg-list-after-template-kw` flag is only needed for the grpc library. Use `+=` for the default build flags to make it easier to extend in the future.

Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-24 15:16:15 +00:00
Pratik Mankawde
2bf77cc8f6 refactor: Retire fix1515 amendment (#5920)
Amendments activated for more than 2 years can be retired. This change retires the fix1515 amendment.

Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-23 13:35:54 +00:00
Ayaz Salikhov
5e33ca56fd Use "${ENVVAR}" instead of ${{ env.ENVVAR }} syntax in GitHub Actions (#5923) 2025-10-22 18:43:04 +00:00
Pratik Mankawde
7c39c810eb Moved fix1513 to retire state (#5919)
Signed-off-by: Pratik Mankawde <pmankawde@ripple.com>
2025-10-22 14:50:43 +00:00
Valon Mamudi
a7792ebcae Add configurable NuDB block size feature (#5468)
As XRPL network demand grows and ledger sizes increase, the default 4K NuDB block size becomes a performance bottleneck, especially on high-performance storage systems. Modern SSDs and enterprise storage often perform better with larger block sizes, but rippled previously had no way to configure this parameter. This change therefore implements configurable NuDB block size support, allowing operators to optimize storage performance based on their hardware configuration. The feature adds a new `nudb_block_size` configuration parameter that enables block sizes from 4K to 32K bytes, with comprehensive validation and backward compatibility.

Specific changes are:
- Implements `parseBlockSize()` function with validation.
- Adds `nudb_block_size` configuration parameter.
- Supports block sizes from 4K to 32K (power of 2).
- Adds comprehensive logging and error handling.
- Maintains backward compatibility with 4K default.
- Adds unit tests for block size validation.
- Updates configuration documentation with performance guidance.
- Marks feature as experimental.
- Applies code formatting fixes.

Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-21 00:51:44 +00:00
Bronek Kozicki
83ee3788e1 fix: Enforce reserve when creating trust line or MPToken in VaultWithdraw (#5857)
Similarly to other transaction typed that can create a trust line or MPToken for the transaction submitter (e.g. CashCheck #5285, EscrowFinish #5185 ), VaultWithdraw should enforce reserve before creating a new object. Additionally, the lsfRequireDestTag account flag should be enforced for the transaction submitter.

Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-20 23:07:12 +00:00
Mayukha Vadari
ae719b86d3 refactor: move server_definitions code to its own files (#5890) 2025-10-20 22:24:48 +00:00
Mayukha Vadari
dd722f8b3f chore: remove unnecessary LCOV_EXCL_LINE (#5913) 2025-10-20 22:23:52 +00:00
Bart
30190a5feb chore: Set explicit timeouts for build and test jobs (#5912)
The default job timeout is 5 hours, while build times are anywhere between 4-20 mins and test times between 2-10. As a runner occasionally gets stuck, we should fail much quicker.

Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com>
2025-10-20 20:49:19 +00:00
JCW
2bc2930a28 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 20:16:36 +01:00
JCW
e837171f7c Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 20:16:36 +01:00
JCW
6f05bd035c Add test case and improve test coverage
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 20:16:36 +01:00
JCW
b98b42bbec Fix comment and fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 20:16:36 +01:00
JCW
6e6ea4311b Add unittest
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-09-18 20:16:36 +01:00
JCW
f2271305e5 Fix attempt 2025-09-18 20:16:34 +01:00
32 changed files with 1724 additions and 742 deletions

View File

@@ -31,14 +31,14 @@ runs:
VERBOSITY: ${{ inputs.verbosity }}
run: |
echo 'Installing dependencies.'
mkdir -p '${{ env.BUILD_DIR }}'
cd '${{ env.BUILD_DIR }}'
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
conan install \
--output-folder . \
--build=${{ env.BUILD_OPTION }} \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \
--settings:all build_type='${{ env.BUILD_TYPE }}' \
--conf:all tools.build:verbosity='${{ env.VERBOSITY }}' \
--conf:all tools.compilation:verbosity='${{ env.VERBOSITY }}' \
--settings:all build_type="${BUILD_TYPE}" \
--conf:all tools.build:verbosity="${VERBOSITY}" \
--conf:all tools.compilation:verbosity="${VERBOSITY}" \
..

View File

@@ -39,8 +39,8 @@ runs:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: |
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at '${{ env.CONAN_REMOTE_URL }}'."
conan remote add --index 0 --force '${{ env.CONAN_REMOTE_NAME }}' '${{ env.CONAN_REMOTE_URL }}'
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -9,7 +9,7 @@ on:
jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@a8d7472b450eb53a1e5228f64552e5974457a21a
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
with:
runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'

View File

@@ -48,8 +48,8 @@ jobs:
doxygen --version
- name: Build documentation
run: |
mkdir -p ${{ env.BUILD_DIR }}
cd ${{ env.BUILD_DIR }}
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel $(nproc)
- name: Publish documentation

View File

@@ -48,6 +48,7 @@ jobs:
name: Build ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
@@ -83,8 +84,8 @@ jobs:
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
${{ env.CMAKE_ARGS }} \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
@@ -96,15 +97,31 @@ jobs:
run: |
cmake \
--build . \
--config ${{ env.BUILD_TYPE }} \
--config "${BUILD_TYPE}" \
--parallel $(nproc) \
--target ${{ env.CMAKE_TARGET }}
--target "${CMAKE_TARGET}"
- name: Put built binaries in one location
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE_DIR: ${{ runner.os == 'Windows' && inputs.build_type || '' }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
mkdir -p ./binaries/doctest/
cp ./${BUILD_TYPE_DIR}/rippled* ./binaries/
if [ "${CMAKE_TARGET}" != 'coverage' ]; then
cp ./src/tests/libxrpl/${BUILD_TYPE_DIR}/xrpl.test.* ./binaries/doctest/
fi
- name: Upload rippled artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ inputs.build_dir }}/${{ runner.os == 'Windows' && inputs.build_type || '' }}/rippled${{ runner.os == 'Windows' && '.exe' || '' }}
path: ${{ env.BUILD_DIR }}/binaries/
retention-days: 3
if-no-files-found: error

View File

@@ -51,7 +51,7 @@ jobs:
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${{ env.PR_NUMBER }}" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
@@ -66,13 +66,13 @@ jobs:
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote=${{ env.CONAN_REMOTE_NAME }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
@@ -88,4 +88,4 @@ jobs:
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${{ env.PR_URL }}"
-F "client_payload[pr_url]=${PR_URL}"

View File

@@ -38,4 +38,4 @@ jobs:
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${{ env.GENERATE_OPTION }} ${{ env.GENERATE_CONFIG }} >> "${GITHUB_OUTPUT}"
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"

View File

@@ -31,7 +31,12 @@ jobs:
name: Test ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 30
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Download rippled artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
@@ -61,9 +66,22 @@ jobs:
run: |
./rippled --version | grep libvoidstar
- name: Test the binary
- name: Run the embedded tests
if: ${{ inputs.run_tests }}
shell: bash
run: |
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
- name: Run the separate tests
if: ${{ inputs.run_tests }}
shell: bash
run: |
for test_file in ./doctest/*; do
echo "Executing $test_file"
chmod +x "$test_file"
if [[ "${{ runner.os }}" == "Windows" && "$test_file" == "./doctest/xrpl.test.net.exe" ]]; then
echo "Skipping $test_file on Windows"
else
"$test_file"
fi
done

View File

@@ -85,10 +85,10 @@ jobs:
- name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote='${{ env.CONAN_REMOTE_NAME }}' --confirm ${{ env.FORCE_OPTION }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}

View File

@@ -975,6 +975,47 @@
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# Optional keys for NuDB only:
#
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
# Must be a power of 2 between 4096 and 32768. Default is 4096.
#
# This parameter controls the fundamental storage unit
# size for NuDB's internal data structures. The choice
# of block size can significantly impact performance
# depending on your storage hardware and filesystem:
#
# - 4096 bytes: Optimal for most standard SSDs and
# traditional filesystems (ext4, NTFS, HFS+).
# Provides good balance of performance and storage
# efficiency. Recommended for most deployments.
# Minimizes memory footprint and provides consistent
# low-latency access patterns across diverse hardware.
#
# - 8192-16384 bytes: May improve performance on
# high-end NVMe SSDs and copy-on-write filesystems
# like ZFS or Btrfs that benefit from larger block
# alignment. Can reduce metadata overhead for large
# databases. Offers better sequential throughput and
# reduced I/O operations at the cost of higher memory
# usage per operation.
#
# - 32768 bytes (32K): Maximum supported block size
# for high-performance scenarios with very fast
# storage. May increase memory usage and reduce
# efficiency for smaller databases. Best suited for
# enterprise environments with abundant RAM.
#
# Performance testing is recommended before deploying
# any non-default block size in production environments.
#
# Note: This setting cannot be changed after database
# creation without rebuilding the entire database.
# Choose carefully based on your hardware and expected
# database size.
#
# Example: nudb_block_size=4096
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
[node_db]
type=NuDB
path=/var/lib/rippled/db/nudb
nudb_block_size=4096
online_delete=512
advisory_delete=0

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
)
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
add_executable(${target} ${ARGN} ${sources})
isolate_headers(
${target}

View File

@@ -21,11 +21,11 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags=['-Wno-restrict']
tools.build:cxxflags+=['-Wno-restrict']
{% endif %}

View File

@@ -127,15 +127,16 @@ public:
@param key The key corresponding to the object
@param data A shared pointer to the data corresponding to the object.
@param replace Function that decides if cache should be replaced
@param replaceCallback Function that decides if cache should be replaced
@return `true` If the key already existed.
@return First item: `true` If the key already existed; Second item: The
canonicalized item.
*/
template <class R>
bool
std::pair<bool, SharedPointerType>
canonicalize(
key_type const& key,
SharedPointerType& data,
SharedPointerType const& data,
R&& replaceCallback);
bool

View File

@@ -403,7 +403,7 @@ template <
class KeyEqual,
class Mutex>
template <class R>
inline bool
inline std::pair<bool, SharedPointerType>
TaggedCache<
Key,
T,
@@ -415,11 +415,9 @@ TaggedCache<
Mutex>::
canonicalize(
key_type const& key,
SharedPointerType& data,
SharedPointerType const& data,
R&& replaceCallback)
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
auto cit = m_cache.find(key);
@@ -431,62 +429,49 @@ TaggedCache<
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
return false;
return std::make_pair(false, data);
}
Entry& entry = cit->second;
entry.touch(m_clock.now());
auto shouldReplace = [&] {
auto replaceEntryIfNecessary = [&] {
bool shouldReplace = false;
if constexpr (std::is_invocable_r_v<bool, R>)
{
// The reason for this extra complexity is for intrusive
// strong/weak combo getting a strong is relatively expensive
// and not needed for many cases.
return replaceCallback();
shouldReplace = replaceCallback();
}
else
{
return replaceCallback(entry.ptr.getStrong());
shouldReplace = replaceCallback(entry.ptr.getStrong());
}
if (shouldReplace)
entry.ptr = data;
};
if (entry.isCached())
{
if (shouldReplace())
{
entry.ptr = data;
}
else
{
data = entry.ptr.getStrong();
}
return true;
replaceEntryIfNecessary();
return std::make_pair(true, entry.ptr.getStrong());
}
auto cachedData = entry.lock();
if (cachedData)
{
if (shouldReplace())
{
entry.ptr = data;
}
else
{
entry.ptr.convertToStrong();
data = cachedData;
}
replaceEntryIfNecessary();
entry.ptr.convertToStrong();
++m_cache_count;
return true;
return std::make_pair(true, entry.ptr.getStrong());
}
entry.ptr = data;
++m_cache_count;
return false;
return std::make_pair(false, data);
}
template <
@@ -512,8 +497,8 @@ TaggedCache<
key_type const& key,
SharedPointerType const& data)
{
return canonicalize(
key, const_cast<SharedPointerType&>(data), []() { return true; });
auto [alreadyExists, _] = canonicalize(key, data, []() { return true; });
return alreadyExists;
}
template <
@@ -537,7 +522,10 @@ TaggedCache<
Mutex>::
canonicalize_replace_client(key_type const& key, SharedPointerType& data)
{
return canonicalize(key, data, []() { return false; });
auto [alreadyExists, itemInCache] =
canonicalize(key, data, []() { return false; });
data = itemInCache;
return alreadyExists;
}
template <

View File

@@ -37,7 +37,7 @@ XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
@@ -109,15 +109,12 @@ XRPL_FIX (MasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYe
XRPL_FIX (TakerDryOfferRemoval, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1578, Supported::yes, VoteBehavior::DefaultYes)
// fix1515: Use liquidity from strands that consume max offers, but mark as dry
XRPL_FIX (1515, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1623, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1543, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1571, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes)
// The following amendments are obsolete, but must remain supported
@@ -156,3 +153,5 @@ XRPL_RETIRE(fix1512)
XRPL_RETIRE(fix1523)
XRPL_RETIRE(fix1528)
XRPL_RETIRE(FlowCross)
XRPL_RETIRE(fix1513)
XRPL_RETIRE(fix1515)

View File

@@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
Delegation::delegatable,
featureSingleAssetVault,
mayDeleteMPT | mustModifyVault,
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
({
{sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},

View File

@@ -1242,6 +1242,12 @@ addEmptyHolding(
// If the line already exists, don't create it again.
if (view.read(index))
return tecDUPLICATE;
// Can the account cover the trust line reserve ?
std::uint32_t const ownerCount = sleDst->at(sfOwnerCount);
if (priorBalance < view.fees().accountReserve(ownerCount + 1))
return tecNO_LINE_INSUF_RESERVE;
return trustCreate(
view,
high,

View File

@@ -1346,14 +1346,11 @@ struct Flow_manual_test : public Flow_test
{
using namespace jtx;
auto const all = testable_amendments();
FeatureBitset const f1513{fix1513};
FeatureBitset const permDex{featurePermissionedDEX};
testWithFeats(all - f1513 - permDex);
testWithFeats(all - permDex);
testWithFeats(all);
testEmptyStrand(all - f1513 - permDex);
testEmptyStrand(all - permDex);
testEmptyStrand(all);
}

View File

@@ -5450,13 +5450,12 @@ class Offer_manual_test : public OfferBaseUtil_test
{
using namespace jtx;
FeatureBitset const all{testable_amendments()};
FeatureBitset const f1513{fix1513};
FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled};
FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval};
FeatureBitset const fillOrKill{fixFillOrKill};
FeatureBitset const permDEX{featurePermissionedDEX};
testAll(all - f1513 - immediateOfferKilled - permDEX);
testAll(all - immediateOfferKilled - permDEX);
testAll(all - immediateOfferKilled - fillOrKill - permDEX);
testAll(all - fillOrKill - permDEX);
testAll(all - permDEX);

View File

@@ -59,14 +59,15 @@ class Vault_test : public beast::unit_test::suite
testSequences()
{
using namespace test::jtx;
Account issuer{"issuer"};
Account owner{"owner"};
Account depositor{"depositor"};
Account charlie{"charlie"}; // authorized 3rd party
Account dave{"dave"};
auto const testSequence = [this](
auto const testSequence = [&, this](
std::string const& prefix,
Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
Account const& charlie,
Vault& vault,
PrettyAsset const& asset) {
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
@@ -104,11 +105,9 @@ class Vault_test : public beast::unit_test::suite
// Several 3rd party accounts which cannot receive funds
Account alice{"alice"};
Account dave{"dave"};
Account erin{"erin"}; // not authorized by issuer
env.fund(XRP(1000), alice, dave, erin);
env.fund(XRP(1000), alice, erin);
env(fset(alice, asfDepositAuth));
env(fset(dave, asfRequireDest));
env.close();
{
@@ -328,19 +327,6 @@ class Vault_test : public beast::unit_test::suite
env.close();
}
{
testcase(
prefix +
" fail to withdraw with tag but without destination");
auto tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(1000)});
tx[sfDestinationTag] = "0";
env(tx, ter(temMALFORMED));
env.close();
}
if (!asset.raw().native())
{
testcase(
@@ -368,12 +354,49 @@ class Vault_test : public beast::unit_test::suite
env.close();
}
{
testcase(prefix + " withdraw to 3rd party lsfRequireDestTag");
auto tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(50)});
tx[sfDestination] = dave.human();
tx[sfDestinationTag] = "0";
env(tx);
env.close();
}
{
testcase(prefix + " deposit again");
auto tx = vault.deposit(
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
env(tx);
env.close();
}
{
testcase(prefix + " fail to withdraw lsfRequireDestTag");
auto tx = vault.withdraw(
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
env(tx, ter{tecDST_TAG_NEEDED});
env.close();
}
{
testcase(prefix + " withdraw with tag");
auto tx = vault.withdraw(
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
tx[sfDestinationTag] = "0";
env(tx);
env.close();
}
{
testcase(prefix + " withdraw to authorized 3rd party");
auto tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
.amount = asset(50)});
tx[sfDestination] = charlie.human();
env(tx);
env.close();
@@ -523,80 +546,56 @@ class Vault_test : public beast::unit_test::suite
}
};
auto testCases = [this, &testSequence](
auto testCases = [&, this](
std::string prefix,
std::function<PrettyAsset(
Env & env,
Account const& issuer,
Account const& owner,
Account const& depositor,
Account const& charlie)> setup) {
std::function<PrettyAsset(Env & env)> setup) {
Env env{*this, testable_amendments() | featureSingleAssetVault};
Account issuer{"issuer"};
Account owner{"owner"};
Account depositor{"depositor"};
Account charlie{"charlie"}; // authorized 3rd party
Vault vault{env};
env.fund(XRP(1000), issuer, owner, depositor, charlie);
env.fund(XRP(1000), issuer, owner, depositor, charlie, dave);
env.close();
env(fset(issuer, asfAllowTrustLineClawback));
env(fset(issuer, asfRequireAuth));
env(fset(dave, asfRequireDest));
env.close();
env.require(flags(issuer, asfAllowTrustLineClawback));
env.require(flags(issuer, asfRequireAuth));
PrettyAsset asset = setup(env, issuer, owner, depositor, charlie);
testSequence(
prefix, env, issuer, owner, depositor, charlie, vault, asset);
PrettyAsset asset = setup(env);
testSequence(prefix, env, vault, asset);
};
testCases(
"XRP",
[](Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
Account const& charlie) -> PrettyAsset {
return {xrpIssue(), 1'000'000};
});
testCases("XRP", [&](Env& env) -> PrettyAsset {
return {xrpIssue(), 1'000'000};
});
testCases(
"IOU",
[](Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
Account const& charlie) -> Asset {
PrettyAsset asset = issuer["IOU"];
env(trust(owner, asset(1000)));
env(trust(depositor, asset(1000)));
env(trust(charlie, asset(1000)));
env(trust(issuer, asset(0), owner, tfSetfAuth));
env(trust(issuer, asset(0), depositor, tfSetfAuth));
env(trust(issuer, asset(0), charlie, tfSetfAuth));
env(pay(issuer, depositor, asset(1000)));
env.close();
return asset;
});
testCases("IOU", [&](Env& env) -> Asset {
PrettyAsset asset = issuer["IOU"];
env(trust(owner, asset(1000)));
env(trust(depositor, asset(1000)));
env(trust(charlie, asset(1000)));
env(trust(dave, asset(1000)));
env(trust(issuer, asset(0), owner, tfSetfAuth));
env(trust(issuer, asset(0), depositor, tfSetfAuth));
env(trust(issuer, asset(0), charlie, tfSetfAuth));
env(trust(issuer, asset(0), dave, tfSetfAuth));
env(pay(issuer, depositor, asset(1000)));
env.close();
return asset;
});
testCases(
"MPT",
[](Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
Account const& charlie) -> Asset {
MPTTester mptt{env, issuer, mptInitNoFund};
mptt.create(
{.flags =
tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
PrettyAsset asset = mptt.issuanceID();
mptt.authorize({.account = depositor});
mptt.authorize({.account = charlie});
env(pay(issuer, depositor, asset(1000)));
env.close();
return asset;
});
testCases("MPT", [&](Env& env) -> Asset {
MPTTester mptt{env, issuer, mptInitNoFund};
mptt.create(
{.flags = tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
PrettyAsset asset = mptt.issuanceID();
mptt.authorize({.account = depositor});
mptt.authorize({.account = charlie});
mptt.authorize({.account = dave});
env(pay(issuer, depositor, asset(1000)));
env.close();
return asset;
});
}
void
@@ -1672,6 +1671,7 @@ class Vault_test : public beast::unit_test::suite
{
bool enableClawback = true;
bool requireAuth = true;
int initialXRP = 1000;
};
auto testCase = [this](
@@ -1688,7 +1688,7 @@ class Vault_test : public beast::unit_test::suite
Account issuer{"issuer"};
Account owner{"owner"};
Account depositor{"depositor"};
env.fund(XRP(1000), issuer, owner, depositor);
env.fund(XRP(args.initialXRP), issuer, owner, depositor);
env.close();
Vault vault{env};
@@ -1868,9 +1868,7 @@ class Vault_test : public beast::unit_test::suite
PrettyAsset const& asset,
Vault& vault,
MPTTester& mptt) {
testcase(
"MPT 3rd party without MPToken cannot be withdrawal "
"destination");
testcase("MPT depositor without MPToken, auth required");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
@@ -1880,10 +1878,32 @@ class Vault_test : public beast::unit_test::suite
tx = vault.deposit(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
.amount = asset(1000)});
env(tx);
env.close();
{
// Remove depositor MPToken and it will not be re-created
mptt.authorize(
{.account = depositor, .flags = tfMPTUnauthorize});
env.close();
auto const mptoken =
keylet::mptoken(mptt.issuanceID(), depositor);
auto const sleMPT1 = env.le(mptoken);
BEAST_EXPECT(sleMPT1 == nullptr);
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
env(tx, ter{tecNO_AUTH});
env.close();
auto const sleMPT2 = env.le(mptoken);
BEAST_EXPECT(sleMPT2 == nullptr);
}
{
// Set destination to 3rd party without MPToken
Account charlie{"charlie"};
@@ -1898,7 +1918,7 @@ class Vault_test : public beast::unit_test::suite
env(tx, ter(tecNO_AUTH));
}
},
{.requireAuth = false});
{.requireAuth = true});
testCase(
[this](
@@ -1909,7 +1929,7 @@ class Vault_test : public beast::unit_test::suite
PrettyAsset const& asset,
Vault& vault,
MPTTester& mptt) {
testcase("MPT depositor without MPToken cannot withdraw");
testcase("MPT depositor without MPToken, no auth required");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
@@ -1917,7 +1937,6 @@ class Vault_test : public beast::unit_test::suite
env.close();
auto v = env.le(keylet);
BEAST_EXPECT(v);
MPTID share = (*v)[sfShareMPTID];
tx = vault.deposit(
{.depositor = depositor,
@@ -1927,41 +1946,120 @@ class Vault_test : public beast::unit_test::suite
env.close();
{
// Remove depositor's MPToken and withdraw will fail
// Remove depositor's MPToken and it will be re-created
mptt.authorize(
{.account = depositor, .flags = tfMPTUnauthorize});
env.close();
auto const mptoken =
env.le(keylet::mptoken(mptt.issuanceID(), depositor));
BEAST_EXPECT(mptoken == nullptr);
keylet::mptoken(mptt.issuanceID(), depositor);
auto const sleMPT1 = env.le(mptoken);
BEAST_EXPECT(sleMPT1 == nullptr);
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
env(tx, ter(tecNO_AUTH));
env(tx);
env.close();
auto const sleMPT2 = env.le(mptoken);
BEAST_EXPECT(sleMPT2 != nullptr);
BEAST_EXPECT(sleMPT2->at(sfMPTAmount) == 100);
}
{
// Restore depositor's MPToken and withdraw will succeed
mptt.authorize({.account = depositor});
// Remove 3rd party MPToken and it will not be re-created
mptt.authorize(
{.account = owner, .flags = tfMPTUnauthorize});
env.close();
auto const mptoken =
keylet::mptoken(mptt.issuanceID(), owner);
auto const sleMPT1 = env.le(mptoken);
BEAST_EXPECT(sleMPT1 == nullptr);
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(1000)});
env(tx);
.amount = asset(100)});
tx[sfDestination] = owner.human();
env(tx, ter(tecNO_AUTH));
env.close();
// Withdraw removed shares MPToken
auto const mptSle =
env.le(keylet::mptoken(share, depositor.id()));
BEAST_EXPECT(mptSle == nullptr);
auto const sleMPT2 = env.le(mptoken);
BEAST_EXPECT(sleMPT2 == nullptr);
}
},
{.requireAuth = false});
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
Env env{*this, testable_amendments()};
return {
env.current()->fees().accountReserve(0).drops() /
DROPS_PER_XRP.drops(),
env.current()->fees().increment.drops() /
DROPS_PER_XRP.drops()};
}();
testCase(
[&, this](
Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
PrettyAsset const& asset,
Vault& vault,
MPTTester& mptt) {
testcase("MPT failed reserve to re-create MPToken");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
auto v = env.le(keylet);
BEAST_EXPECT(v);
env(pay(depositor, owner, asset(1000)));
env.close();
tx = vault.deposit(
{.depositor = owner,
.id = keylet.key,
.amount = asset(1000)}); // all assets held by owner
env(tx);
env.close();
{
// Remove owners's MPToken and it will not be re-created
mptt.authorize(
{.account = owner, .flags = tfMPTUnauthorize});
env.close();
auto const mptoken =
keylet::mptoken(mptt.issuanceID(), owner);
auto const sleMPT = env.le(mptoken);
BEAST_EXPECT(sleMPT == nullptr);
// No reserve to create MPToken for asset in VaultWithdraw
tx = vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = asset(100)});
env(tx, ter{tecINSUFFICIENT_RESERVE});
env.close();
env(pay(depositor, owner, XRP(incReserve)));
env.close();
// Withdraw can now create asset MPToken, tx will succeed
env(tx);
env.close();
}
},
{.requireAuth = false,
.initialXRP = acctReserve + incReserve * 4 - 1});
testCase([this](
Env& env,
Account const& issuer,
@@ -2320,23 +2418,30 @@ class Vault_test : public beast::unit_test::suite
{
using namespace test::jtx;
struct CaseArgs
{
int initialXRP = 1000;
double transferRate = 1.0;
};
auto testCase =
[&,
this](std::function<void(
Env & env,
Account const& owner,
Account const& issuer,
Account const& charlie,
std::function<Account(ripple::Keylet)> vaultAccount,
Vault& vault,
PrettyAsset const& asset,
std::function<MPTID(ripple::Keylet)> issuanceId)> test) {
[&, this](
std::function<void(
Env & env,
Account const& owner,
Account const& issuer,
Account const& charlie,
std::function<Account(ripple::Keylet)> vaultAccount,
Vault& vault,
PrettyAsset const& asset,
std::function<MPTID(ripple::Keylet)> issuanceId)> test,
CaseArgs args = {}) {
Env env{*this, testable_amendments() | featureSingleAssetVault};
Account const owner{"owner"};
Account const issuer{"issuer"};
Account const charlie{"charlie"};
Vault vault{env};
env.fund(XRP(1000), issuer, owner, charlie);
env.fund(XRP(args.initialXRP), issuer, owner, charlie);
env(fset(issuer, asfAllowTrustLineClawback));
env.close();
@@ -2344,7 +2449,7 @@ class Vault_test : public beast::unit_test::suite
env.trust(asset(1000), owner);
env.trust(asset(1000), charlie);
env(pay(issuer, owner, asset(200)));
env(rate(issuer, 1.25));
env(rate(issuer, args.transferRate));
env.close();
auto const vaultAccount =
@@ -2505,73 +2610,81 @@ class Vault_test : public beast::unit_test::suite
env.close();
});
testCase([&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto vaultAccount,
Vault& vault,
PrettyAsset const& asset,
auto issuanceId) {
testcase("IOU transfer fees not applied");
testCase(
[&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto vaultAccount,
Vault& vault,
PrettyAsset const& asset,
auto issuanceId) {
testcase("IOU transfer fees not applied");
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
env(vault.deposit(
{.depositor = owner, .id = keylet.key, .amount = asset(100)}));
env.close();
auto const issue = asset.raw().get<Issue>();
Asset const share = Asset(issuanceId(keylet));
// transfer fees ignored on deposit
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
BEAST_EXPECT(
env.balance(vaultAccount(keylet), issue) == asset(100));
{
auto tx = vault.clawback(
{.issuer = issuer,
.id = keylet.key,
.holder = owner,
.amount = asset(50)});
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
}
// transfer fees ignored on clawback
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(50));
env(vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = share(20'000'000)}));
// transfer fees ignored on withdraw
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(30));
{
auto tx = vault.withdraw(
env(vault.deposit(
{.depositor = owner,
.id = keylet.key,
.amount = share(30'000'000)});
tx[sfDestination] = charlie.human();
env(tx);
}
.amount = asset(100)}));
env.close();
// transfer fees ignored on withdraw to 3rd party
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(0));
auto const issue = asset.raw().get<Issue>();
Asset const share = Asset(issuanceId(keylet));
env(vault.del({.owner = owner, .id = keylet.key}));
env.close();
});
// transfer fees ignored on deposit
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
BEAST_EXPECT(
env.balance(vaultAccount(keylet), issue) == asset(100));
{
auto tx = vault.clawback(
{.issuer = issuer,
.id = keylet.key,
.holder = owner,
.amount = asset(50)});
env(tx);
env.close();
}
// transfer fees ignored on clawback
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
BEAST_EXPECT(
env.balance(vaultAccount(keylet), issue) == asset(50));
env(vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = share(20'000'000)}));
// transfer fees ignored on withdraw
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
BEAST_EXPECT(
env.balance(vaultAccount(keylet), issue) == asset(30));
{
auto tx = vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = share(30'000'000)});
tx[sfDestination] = charlie.human();
env(tx);
}
// transfer fees ignored on withdraw to 3rd party
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
BEAST_EXPECT(
env.balance(vaultAccount(keylet), issue) == asset(0));
env(vault.del({.owner = owner, .id = keylet.key}));
env.close();
},
CaseArgs{.transferRate = 1.25});
testCase([&, this](
Env& env,
@@ -2713,6 +2826,103 @@ class Vault_test : public beast::unit_test::suite
env(tx1);
});
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
Env env{*this, testable_amendments()};
return {
env.current()->fees().accountReserve(0).drops() /
DROPS_PER_XRP.drops(),
env.current()->fees().increment.drops() /
DROPS_PER_XRP.drops()};
}();
testCase(
[&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto,
Vault& vault,
PrettyAsset const& asset,
auto&&...) {
testcase("IOU no trust line to depositor no reserve");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
// reset limit, so deposit of all funds will delete the trust
// line
env.trust(asset(0), owner);
env.close();
env(vault.deposit(
{.depositor = owner,
.id = keylet.key,
.amount = asset(200)}));
env.close();
auto trustline =
env.le(keylet::line(owner, asset.raw().get<Issue>()));
BEAST_EXPECT(trustline == nullptr);
// Fail because not enough reserve to create trust line
tx = vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = asset(10)});
env(tx, ter{tecNO_LINE_INSUF_RESERVE});
env.close();
env(pay(charlie, owner, XRP(incReserve)));
env.close();
// Withdraw can now create trust line, will succeed
env(tx);
env.close();
},
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
testCase(
[&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto,
Vault& vault,
PrettyAsset const& asset,
auto&&...) {
testcase("IOU no reserve for share MPToken");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
env(pay(owner, charlie, asset(100)));
env.close();
// Use up some reserve on tickets
env(ticket::create(charlie, 2));
env.close();
// Fail because not enough reserve to create MPToken for shares
tx = vault.deposit(
{.depositor = charlie,
.id = keylet.key,
.amount = asset(100)});
env(tx, ter{tecINSUFFICIENT_RESERVE});
env.close();
env(pay(issuer, charlie, XRP(incReserve)));
env.close();
// Deposit can now create MPToken, will succeed
env(tx);
env.close();
},
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
testCase([&, this](
Env& env,
Account const& owner,

View File

@@ -24,6 +24,8 @@
#include <xrpl/basics/chrono.h>
#include <xrpl/protocol/Protocol.h>
#include <utility>
namespace ripple {
/*
@@ -148,6 +150,131 @@ public:
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
}
{
BEAST_EXPECT(!c.insert(5, "five"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
auto const p1 = c.fetch(5);
BEAST_EXPECT(p1 != nullptr);
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Advance the clock a lot
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 1);
auto p2 = std::make_shared<std::string>("five_2");
BEAST_EXPECT(c.canonicalize_replace_cache(5, p2));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Make sure we get the original object
BEAST_EXPECT(p1.get() != p2.get());
BEAST_EXPECT(*p2 == "five_2");
}
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
{
testcase("intrptr");
struct MyRefCountObject : IntrusiveRefCounts
{
std::string _data;
// Needed to support weak intrusive pointers
virtual void
partialDestructor() {};
MyRefCountObject() = default;
explicit MyRefCountObject(std::string data)
: _data(std::move(data))
{
}
explicit MyRefCountObject(char const* data) : _data(data)
{
}
MyRefCountObject&
operator=(MyRefCountObject const& other)
{
_data = other._data;
return *this;
}
bool
operator==(MyRefCountObject const& other) const
{
return _data == other._data;
}
bool
operator==(std::string const& other) const
{
return _data == other;
}
};
using IntrPtrCache = TaggedCache<
Key,
MyRefCountObject,
/*IsKeyCache*/ false,
intr_ptr::SharedWeakUnionPtr<MyRefCountObject>,
intr_ptr::SharedPtr<MyRefCountObject>>;
IntrPtrCache intrPtrCache("IntrPtrTest", 1, 1s, clock, journal);
intrPtrCache.canonicalize_replace_cache(
1, intr_ptr::make_shared<MyRefCountObject>("one"));
BEAST_EXPECT(intrPtrCache.getCacheSize() == 1);
BEAST_EXPECT(intrPtrCache.size() == 1);
{
{
intrPtrCache.canonicalize_replace_cache(
1,
intr_ptr::make_shared<MyRefCountObject>(
"one_replaced"));
auto p = intrPtrCache.fetch(1);
BEAST_EXPECT(*p == "one_replaced");
// Advance the clock a lot
++clock;
intrPtrCache.sweep();
BEAST_EXPECT(intrPtrCache.getCacheSize() == 0);
BEAST_EXPECT(intrPtrCache.size() == 1);
intrPtrCache.canonicalize_replace_cache(
1,
intr_ptr::make_shared<MyRefCountObject>(
"one_replaced_2"));
auto p2 = intrPtrCache.fetch(1);
BEAST_EXPECT(*p2 == "one_replaced_2");
intrPtrCache.del(1, true);
}
intrPtrCache.canonicalize_replace_cache(
1,
intr_ptr::make_shared<MyRefCountObject>("one_replaced_3"));
auto p3 = intrPtrCache.fetch(1);
BEAST_EXPECT(*p3 == "one_replaced_3");
}
++clock;
intrPtrCache.sweep();
BEAST_EXPECT(intrPtrCache.getCacheSize() == 0);
BEAST_EXPECT(intrPtrCache.size() == 0);
}
}
};

View File

@@ -0,0 +1,478 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/nodestore/TestBase.h>
#include <test/unit_test/SuiteJournal.h>
#include <xrpld/nodestore/DummyScheduler.h>
#include <xrpld/nodestore/Manager.h>
#include <xrpl/basics/BasicConfig.h>
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/temp_dir.h>
#include <memory>
#include <sstream>
namespace ripple {
namespace NodeStore {
class NuDBFactory_test : public TestBase
{
private:
// Helper function to create a Section with specified parameters
Section
createSection(std::string const& path, std::string const& blockSize = "")
{
Section params;
params.set("type", "nudb");
params.set("path", path);
if (!blockSize.empty())
params.set("nudb_block_size", blockSize);
return params;
}
// Helper function to create a backend and test basic functionality
bool
testBackendFunctionality(
Section const& params,
std::size_t expectedBlocksize)
{
try
{
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
if (!BEAST_EXPECT(backend))
return false;
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
return false;
backend->open();
if (!BEAST_EXPECT(backend->isOpen()))
return false;
// Test basic store/fetch functionality
auto batch = createPredictableBatch(10, 12345);
storeBatch(*backend, batch);
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
backend->close();
return areBatchesEqual(batch, copy);
}
catch (...)
{
return false;
}
}
// Helper function to test log messages
void
testLogMessage(
Section const& params,
beast::severities::Severity level,
std::string const& expectedMessage)
{
test::StreamSink sink(level);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
std::string logOutput = sink.messages().str();
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
}
// Helper function to test power of two validation
void
testPowerOfTwoValidation(std::string const& size, bool shouldWork)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
std::string logOutput = sink.messages().str();
bool hasWarning =
logOutput.find("Invalid nudb_block_size") != std::string::npos;
BEAST_EXPECT(hasWarning == !shouldWork);
}
public:
void
testDefaultBlockSize()
{
testcase("Default block size (no nudb_block_size specified)");
beast::temp_dir tempDir;
auto params = createSection(tempDir.path());
// Should work with default 4096 block size
BEAST_EXPECT(testBackendFunctionality(params, 4096));
}
void
testValidBlockSizes()
{
testcase("Valid block sizes");
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
for (auto const& size : validSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), to_string(size));
BEAST_EXPECT(testBackendFunctionality(params, size));
}
// Empty value is ignored by the config parser, so uses the
// default
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "");
BEAST_EXPECT(testBackendFunctionality(params, 4096));
}
void
testInvalidBlockSizes()
{
testcase("Invalid block sizes");
std::vector<std::string> invalidSizes = {
"2048", // Too small
"1024", // Too small
"65536", // Too large
"131072", // Too large
"5000", // Not power of 2
"6000", // Not power of 2
"10000", // Not power of 2
"0", // Zero
"-1", // Negative
"abc", // Non-numeric
"4k", // Invalid format
"4096.5" // Decimal
};
for (auto const& size : invalidSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
}
// Test whitespace cases separately since lexical_cast may handle them
std::vector<std::string> whitespaceInvalidSizes = {
"4096 ", // Trailing space - might be handled by lexical_cast
" 4096" // Leading space - might be handled by lexical_cast
};
for (auto const& size : whitespaceInvalidSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
}
}
void
testLogMessages()
{
testcase("Log message verification");
// Test valid custom block size logging
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "8192");
testLogMessage(
params,
beast::severities::kInfo,
"Using custom NuDB block size: 8192");
}
// Test invalid block size failure
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "5000");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size: 5000") !=
std::string::npos);
BEAST_EXPECT(
logOutput.find(
"Must be power of 2 between 4096 and 32768") !=
std::string::npos);
}
}
// Test non-numeric value failure
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "invalid");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size value: invalid") !=
std::string::npos);
}
}
}
void
testPowerOfTwoValidation()
{
testcase("Power of 2 validation logic");
// Test edge cases around valid range
std::vector<std::pair<std::string, bool>> testCases = {
{"4095", false}, // Just below minimum
{"4096", true}, // Minimum valid
{"4097", false}, // Just above minimum, not power of 2
{"8192", true}, // Valid power of 2
{"8193", false}, // Just above valid power of 2
{"16384", true}, // Valid power of 2
{"32768", true}, // Maximum valid
{"32769", false}, // Just above maximum
{"65536", false} // Power of 2 but too large
};
for (auto const& [size, shouldWork] : testCases)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
// We test the validation logic by catching exceptions for invalid
// values
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
BEAST_EXPECT(shouldWork);
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size") !=
std::string::npos);
}
}
}
void
testBothConstructorVariants()
{
testcase("Both constructor variants work with custom block size");
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "16384");
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Test first constructor (without nudb::context)
{
auto backend1 = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend1 != nullptr);
BEAST_EXPECT(testBackendFunctionality(params, 16384));
}
// Test second constructor (with nudb::context)
// Note: This would require access to nudb::context, which might not be
// easily testable without more complex setup. For now, we test that
// the factory can create backends with the first constructor.
}
void
testConfigurationParsing()
{
testcase("Configuration parsing edge cases");
// Test that whitespace is handled correctly
std::vector<std::string> validFormats = {
"8192" // Basic valid format
};
// Test whitespace handling separately since lexical_cast behavior may
// vary
std::vector<std::string> whitespaceFormats = {
" 8192", // Leading space - may or may not be handled by
// lexical_cast
"8192 " // Trailing space - may or may not be handled by
// lexical_cast
};
// Test basic valid format
for (auto const& format : validFormats)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), format);
test::StreamSink sink(beast::severities::kInfo);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
// Should log success message for valid values
std::string logOutput = sink.messages().str();
bool hasSuccessMessage =
logOutput.find("Using custom NuDB block size") !=
std::string::npos;
BEAST_EXPECT(hasSuccessMessage);
}
// Test whitespace formats - these should work if lexical_cast handles
// them
for (auto const& format : whitespaceFormats)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), format);
// Use a lower threshold to capture both info and warning messages
test::StreamSink sink(beast::severities::kDebug);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (...)
{
// Fails
BEAST_EXPECT(!testBackendFunctionality(params, 8192));
}
}
}
void
testDataPersistence()
{
testcase("Data persistence with different block sizes");
std::vector<std::string> blockSizes = {
"4096", "8192", "16384", "32768"};
for (auto const& size : blockSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Create test data
auto batch = createPredictableBatch(50, 54321);
// Store data
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
storeBatch(*backend, batch);
backend->close();
}
// Retrieve data in new backend instance
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
BEAST_EXPECT(areBatchesEqual(batch, copy));
backend->close();
}
}
}
void
run() override
{
testDefaultBlockSize();
testValidBlockSizes();
testInvalidBlockSizes();
testLogMessages();
testPowerOfTwoValidation();
testBothConstructorVariants();
testConfigurationParsing();
testDataPersistence();
}
};
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
} // namespace NodeStore
} // namespace ripple

View File

@@ -0,0 +1,168 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <xrpl/beast/unit_test.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
namespace test {
class ServerDefinitions_test : public beast::unit_test::suite
{
public:
void
testServerDefinitions()
{
testcase("server_definitions");
using namespace test::jtx;
{
Env env(*this);
auto const result = env.rpc("server_definitions");
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
// test a random element of each result
// (testing the whole output would be difficult to maintain)
{
auto const firstField = result[jss::result][jss::FIELDS][0u];
BEAST_EXPECT(firstField[0u].asString() == "Generic");
BEAST_EXPECT(
firstField[1][jss::isSerialized].asBool() == false);
BEAST_EXPECT(
firstField[1][jss::isSigningField].asBool() == false);
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
}
BEAST_EXPECT(
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
.asUInt() == 97);
BEAST_EXPECT(
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
.asUInt() == 121);
BEAST_EXPECT(
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
.asUInt() == 0);
BEAST_EXPECT(
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
// check exception SFields
{
auto const fieldExists = [&](std::string name) {
for (auto& field : result[jss::result][jss::FIELDS])
{
if (field[0u].asString() == name)
{
return true;
}
}
return false;
};
BEAST_EXPECT(fieldExists("Generic"));
BEAST_EXPECT(fieldExists("Invalid"));
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
BEAST_EXPECT(fieldExists("taker_gets_funded"));
BEAST_EXPECT(fieldExists("taker_pays_funded"));
BEAST_EXPECT(fieldExists("hash"));
BEAST_EXPECT(fieldExists("index"));
}
// test that base_uint types are replaced with "Hash" prefix
{
auto const types = result[jss::result][jss::TYPES];
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
}
}
// test providing the same hash
{
Env env(*this);
auto const firstResult = env.rpc("server_definitions");
auto const hash = firstResult[jss::result][jss::hash].asString();
auto const hashParam =
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
auto const result =
env.rpc("json", "server_definitions", hashParam);
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
}
// test providing a different hash
{
Env env(*this);
std::string const hash =
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
"D1";
auto const hashParam =
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
auto const result =
env.rpc("json", "server_definitions", hashParam);
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
}
}
void
run() override
{
testServerDefinitions();
}
};
BEAST_DEFINE_TESTSUITE(ServerDefinitions, rpc, ripple);
} // namespace test
} // namespace ripple

View File

@@ -174,137 +174,10 @@ admin = 127.0.0.1
}
}
void
testServerDefinitions()
{
testcase("server_definitions");
using namespace test::jtx;
{
Env env(*this);
auto const result = env.rpc("server_definitions");
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
// test a random element of each result
// (testing the whole output would be difficult to maintain)
{
auto const firstField = result[jss::result][jss::FIELDS][0u];
BEAST_EXPECT(firstField[0u].asString() == "Generic");
BEAST_EXPECT(
firstField[1][jss::isSerialized].asBool() == false);
BEAST_EXPECT(
firstField[1][jss::isSigningField].asBool() == false);
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
}
BEAST_EXPECT(
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
.asUInt() == 97);
BEAST_EXPECT(
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
.asUInt() == 121);
BEAST_EXPECT(
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
.asUInt() == 0);
BEAST_EXPECT(
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
// check exception SFields
{
auto const fieldExists = [&](std::string name) {
for (auto& field : result[jss::result][jss::FIELDS])
{
if (field[0u].asString() == name)
{
return true;
}
}
return false;
};
BEAST_EXPECT(fieldExists("Generic"));
BEAST_EXPECT(fieldExists("Invalid"));
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
BEAST_EXPECT(fieldExists("taker_gets_funded"));
BEAST_EXPECT(fieldExists("taker_pays_funded"));
BEAST_EXPECT(fieldExists("hash"));
BEAST_EXPECT(fieldExists("index"));
}
// test that base_uint types are replaced with "Hash" prefix
{
auto const types = result[jss::result][jss::TYPES];
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
}
}
// test providing the same hash
{
Env env(*this);
auto const firstResult = env.rpc("server_definitions");
auto const hash = firstResult[jss::result][jss::hash].asString();
auto const hashParam =
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
auto const result =
env.rpc("json", "server_definitions", hashParam);
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
}
// test providing a different hash
{
Env env(*this);
std::string const hash =
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
"D1";
auto const hashParam =
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
auto const result =
env.rpc("json", "server_definitions", hashParam);
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
BEAST_EXPECT(
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
}
}
void
run() override
{
testServerInfo();
testServerDefinitions();
}
};

View File

@@ -47,7 +47,7 @@ class BookStep : public StepImp<TIn, TOut, BookStep<TIn, TOut, TDerived>>
protected:
enum class OfferType { AMM, CLOB };
uint32_t const maxOffersToConsume_;
static constexpr uint32_t MaxOffersToConsume{1000};
Book book_;
AccountID strandSrc_;
AccountID strandDst_;
@@ -82,18 +82,9 @@ protected:
std::optional<Cache> cache_;
static uint32_t
getMaxOffersToConsume(StrandContext const& ctx)
{
if (ctx.view.rules().enabled(fix1515))
return 1000;
return 2000;
}
public:
BookStep(StrandContext const& ctx, Issue const& in, Issue const& out)
: maxOffersToConsume_(getMaxOffersToConsume(ctx))
, book_(in, out, ctx.domainID)
: book_(in, out, ctx.domainID)
, strandSrc_(ctx.strandSrc)
, strandDst_(ctx.strandDst)
, prevStep_(ctx.prevStep)
@@ -738,7 +729,7 @@ BookStep<TIn, TOut, TDerived>::forEachOffer(
ownerPaysTransferFee_ ? rate(book_.out.account) : QUALITY_ONE;
typename FlowOfferStream<TIn, TOut>::StepCounter counter(
maxOffersToConsume_, j_);
MaxOffersToConsume, j_);
FlowOfferStream<TIn, TOut> offers(
sb, afView, book_, sb.parentCloseTime(), counter, j_);
@@ -1093,18 +1084,9 @@ BookStep<TIn, TOut, TDerived>::revImp(
offersUsed_ = offersConsumed;
SetUnion(ofrsToRm, toRm);
if (offersConsumed >= maxOffersToConsume_)
// Too many iterations, mark this strand as inactive
if (offersConsumed >= MaxOffersToConsume)
{
// Too many iterations, mark this strand as inactive
if (!afView.rules().enabled(fix1515))
{
// Don't use the liquidity
cache_.emplace(beast::zero, beast::zero);
return {beast::zero, beast::zero};
}
// Use the liquidity, but use this to mark the strand as inactive so
// it's not used further
inactive_ = true;
}
}
@@ -1266,18 +1248,9 @@ BookStep<TIn, TOut, TDerived>::fwdImp(
offersUsed_ = offersConsumed;
SetUnion(ofrsToRm, toRm);
if (offersConsumed >= maxOffersToConsume_)
// Too many iterations, mark this strand as inactive (dry)
if (offersConsumed >= MaxOffersToConsume)
{
// Too many iterations, mark this strand as inactive (dry)
if (!afView.rules().enabled(fix1515))
{
// Don't use the liquidity
cache_.emplace(beast::zero, beast::zero);
return {beast::zero, beast::zero};
}
// Use the liquidity, but use this to mark the strand as inactive so
// it's not used further
inactive_ = true;
}
}

View File

@@ -1205,7 +1205,7 @@ EscrowFinish::doApply()
{
// LCOV_EXCL_START
JLOG(j_.fatal()) << "Unable to delete Escrow from recipient.";
return tefBAD_LEDGER; // LCOV_EXCL_LINE
return tefBAD_LEDGER;
// LCOV_EXCL_STOP
}
}

View File

@@ -202,8 +202,7 @@ VaultDeposit::doApply()
else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner)
{
// No authorization needed, but must ensure there is MPToken
auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_));
if (!sleMpt)
if (!view().exists(keylet::mptoken(mptIssuanceID, account_)))
{
if (auto const err = authorizeMPToken(
view(),

View File

@@ -52,12 +52,6 @@ VaultWithdraw::preflight(PreflightContext const& ctx)
return temMALFORMED;
}
}
else if (ctx.tx.isFieldPresent(sfDestinationTag))
{
JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but "
"sfDestination is not";
return temMALFORMED;
}
return tesSUCCESS;
}
@@ -116,37 +110,28 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx)
}
auto const account = ctx.tx[sfAccount];
auto const dstAcct = [&]() -> AccountID {
if (ctx.tx.isFieldPresent(sfDestination))
return ctx.tx.getAccountID(sfDestination);
return account;
}();
auto const dstAcct = ctx.tx[~sfDestination].value_or(account);
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
if (sleDst == nullptr)
return account == dstAcct ? tecINTERNAL : tecNO_DST;
if (sleDst->isFlag(lsfRequireDestTag) &&
!ctx.tx.isFieldPresent(sfDestinationTag))
return tecDST_TAG_NEEDED; // Cannot send without a tag
// Withdrawal to a 3rd party destination account is essentially a transfer,
// via shares in the vault. Enforce all the usual asset transfer checks.
AuthType authType = AuthType::Legacy;
if (account != dstAcct)
if (account != dstAcct && sleDst->isFlag(lsfDepositAuth))
{
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
if (sleDst == nullptr)
return tecNO_DST;
if (sleDst->isFlag(lsfRequireDestTag) &&
!ctx.tx.isFieldPresent(sfDestinationTag))
return tecDST_TAG_NEEDED; // Cannot send without a tag
if (sleDst->isFlag(lsfDepositAuth))
{
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
return tecNO_PERMISSION;
}
// The destination account must have consented to receive the asset by
// creating a RippleState or MPToken
authType = AuthType::StrongAuth;
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
return tecNO_PERMISSION;
}
// Destination MPToken (for an MPT) or trust line (for an IOU) must exist
// if not sending to Account.
// If sending to Account (i.e. not a transfer), we will also create (only
// if authorized) a trust line or MPToken as needed, in doApply().
// Destination MPToken or trust line must exist if _not_ sending to Account.
AuthType const authType =
account == dstAcct ? AuthType::WeakAuth : AuthType::StrongAuth;
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType);
!isTesSuccess(ter))
return ter;
@@ -307,11 +292,16 @@ VaultWithdraw::doApply()
// else quietly ignore, account balance is not zero
}
auto const dstAcct = [&]() -> AccountID {
if (ctx_.tx.isFieldPresent(sfDestination))
return ctx_.tx.getAccountID(sfDestination);
return account_;
}();
auto const dstAcct = ctx_.tx[~sfDestination].value_or(account_);
if (!vaultAsset.native() && //
dstAcct != vaultAsset.getIssuer() && //
dstAcct == account_)
{
if (auto const ter = addEmptyHolding(
view(), account_, mPriorBalance, vaultAsset, j_);
!isTesSuccess(ter) && ter != tecDUPLICATE)
return ter;
}
// Transfer assets from vault to depositor or destination account.
if (auto const ter = accountSend(

View File

@@ -53,6 +53,14 @@ public:
virtual std::string
getName() = 0;
/** Get the block size for backends that support it
*/
virtual std::optional<std::size_t>
getBlockSize() const
{
return std::nullopt;
}
/** Open the backend.
@param createIfMissing Create the database files if necessary.
This allows the caller to catch exceptions.

View File

@@ -24,6 +24,7 @@
#include <xrpld/nodestore/detail/codec.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/filesystem.hpp>
@@ -52,6 +53,7 @@ public:
size_t const keyBytes_;
std::size_t const burstSize_;
std::string const name_;
std::size_t const blockSize_;
nudb::store db_;
std::atomic<bool> deletePath_;
Scheduler& scheduler_;
@@ -66,6 +68,7 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, deletePath_(false)
, scheduler_(scheduler)
{
@@ -85,6 +88,7 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, db_(context)
, deletePath_(false)
, scheduler_(scheduler)
@@ -114,6 +118,12 @@ public:
return name_;
}
std::optional<std::size_t>
getBlockSize() const override
{
return blockSize_;
}
void
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
override
@@ -145,7 +155,7 @@ public:
uid,
salt,
keyBytes_,
nudb::block_size(kp),
blockSize_,
0.50,
ec);
if (ec == nudb::errc::file_exists)
@@ -361,6 +371,56 @@ public:
{
return 3;
}
private:
static std::size_t
parseBlockSize(
std::string const& name,
Section const& keyValues,
beast::Journal journal)
{
using namespace boost::filesystem;
auto const folder = path(name);
auto const kp = (folder / "nudb.key").string();
std::size_t const defaultSize =
nudb::block_size(kp); // Default 4K from NuDB
std::size_t blockSize = defaultSize;
std::string blockSizeStr;
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
{
return blockSize; // Early return with default
}
try
{
std::size_t const parsedBlockSize =
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
// Validate: must be power of 2 between 4K and 32K
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
{
std::stringstream s;
s << "Invalid nudb_block_size: " << parsedBlockSize
<< ". Must be power of 2 between 4096 and 32768.";
Throw<std::runtime_error>(s.str());
}
JLOG(journal.info())
<< "Using custom NuDB block size: " << parsedBlockSize
<< " bytes";
return parsedBlockSize;
}
catch (std::exception const& e)
{
std::stringstream s;
s << "Invalid nudb_block_size value: " << blockSizeStr
<< ". Error: " << e.what();
Throw<std::runtime_error>(s.str());
}
}
};
//------------------------------------------------------------------------------

View File

@@ -0,0 +1,320 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2023 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/rpc/Context.h>
#include <xrpld/rpc/Role.h>
#include <xrpl/json/json_value.h>
#include <xrpl/json/json_writer.h>
#include <xrpl/protocol/LedgerFormats.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFormats.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/protocol/jss.h>
#include <boost/algorithm/string.hpp>
#include <unordered_map>
namespace ripple {
namespace detail {
class ServerDefinitions
{
private:
std::string
// translate e.g. STI_LEDGERENTRY to LedgerEntry
translate(std::string const& inp);
uint256 defsHash_;
Json::Value defs_;
public:
ServerDefinitions();
bool
hashMatches(uint256 hash) const
{
return defsHash_ == hash;
}
Json::Value const&
get() const
{
return defs_;
}
};
std::string
ServerDefinitions::translate(std::string const& inp)
{
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
std::string out = inp;
boost::replace_all(out, oldStr, newStr);
return out;
};
auto contains = [&](char const* s) -> bool {
return inp.find(s) != std::string::npos;
};
if (contains("UINT"))
{
if (contains("512") || contains("384") || contains("256") ||
contains("192") || contains("160") || contains("128"))
return replace("UINT", "Hash");
else
return replace("UINT", "UInt");
}
std::unordered_map<std::string, std::string> replacements{
{"OBJECT", "STObject"},
{"ARRAY", "STArray"},
{"ACCOUNT", "AccountID"},
{"LEDGERENTRY", "LedgerEntry"},
{"NOTPRESENT", "NotPresent"},
{"PATHSET", "PathSet"},
{"VL", "Blob"},
{"XCHAIN_BRIDGE", "XChainBridge"},
};
if (auto const& it = replacements.find(inp); it != replacements.end())
{
return it->second;
}
std::string out;
size_t pos = 0;
std::string inpToProcess = inp;
// convert snake_case to CamelCase
for (;;)
{
pos = inpToProcess.find("_");
if (pos == std::string::npos)
pos = inpToProcess.size();
std::string token = inpToProcess.substr(0, pos);
if (token.size() > 1)
{
boost::algorithm::to_lower(token);
token.data()[0] -= ('a' - 'A');
out += token;
}
else
out += token;
if (pos == inpToProcess.size())
break;
inpToProcess = inpToProcess.substr(pos + 1);
}
return out;
};
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
{
// populate SerializedTypeID names and values
defs_[jss::TYPES] = Json::objectValue;
defs_[jss::TYPES]["Done"] = -1;
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
for (auto const& [rawName, typeValue] : sTypeMap)
{
std::string typeName =
translate(std::string(rawName).substr(4) /* remove STI_ */);
defs_[jss::TYPES][typeName] = typeValue;
typeMap[typeValue] = typeName;
}
// populate LedgerEntryType names and values
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
for (auto const& f : LedgerFormats::getInstance())
{
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
}
// populate SField serialization data
defs_[jss::FIELDS] = Json::arrayValue;
uint32_t i = 0;
{
Json::Value a = Json::arrayValue;
a[0U] = "Generic";
Json::Value v = Json::objectValue;
v[jss::nth] = 0;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Unknown";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "Invalid";
Json::Value v = Json::objectValue;
v[jss::nth] = -1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Unknown";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "ObjectEndMarker";
Json::Value v = Json::objectValue;
v[jss::nth] = 1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = true;
v[jss::isSigningField] = true;
v[jss::type] = "STObject";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "ArrayEndMarker";
Json::Value v = Json::objectValue;
v[jss::nth] = 1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = true;
v[jss::isSigningField] = true;
v[jss::type] = "STArray";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "taker_gets_funded";
Json::Value v = Json::objectValue;
v[jss::nth] = 258;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Amount";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "taker_pays_funded";
Json::Value v = Json::objectValue;
v[jss::nth] = 259;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Amount";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
{
if (f->fieldName == "")
continue;
Json::Value innerObj = Json::objectValue;
uint32_t type = f->fieldType;
innerObj[jss::nth] = f->fieldValue;
// whether the field is variable-length encoded
// this means that the length is included before the content
innerObj[jss::isVLEncoded] =
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
type == 19U /* Vector256 */);
// whether the field is included in serialization
innerObj[jss::isSerialized] =
(type < 10000 && f->fieldName != "hash" &&
f->fieldName != "index"); /* hash, index, TRANSACTION,
LEDGER_ENTRY, VALIDATION, METADATA */
// whether the field is included in serialization when signing
innerObj[jss::isSigningField] = f->shouldInclude(false);
innerObj[jss::type] = typeMap[type];
Json::Value innerArray = Json::arrayValue;
innerArray[0U] = f->fieldName;
innerArray[1U] = innerObj;
defs_[jss::FIELDS][i++] = innerArray;
}
// populate TER code names and values
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
for (auto const& [code, terInfo] : transResults())
{
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
}
// populate TxType names and values
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
for (auto const& f : TxFormats::getInstance())
{
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
}
// generate hash
{
std::string const out = Json::FastWriter().write(defs_);
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
defs_[jss::hash] = to_string(defsHash_);
}
}
} // namespace detail
Json::Value
doServerDefinitions(RPC::JsonContext& context)
{
auto& params = context.params;
uint256 hash;
if (params.isMember(jss::hash))
{
if (!params[jss::hash].isString() ||
!hash.parseHex(params[jss::hash].asString()))
return RPC::invalid_field_error(jss::hash);
}
static detail::ServerDefinitions const defs{};
if (defs.hashMatches(hash))
{
Json::Value jv = Json::objectValue;
jv[jss::hash] = to_string(hash);
return jv;
}
return defs.get();
}
} // namespace ripple

View File

@@ -23,301 +23,10 @@
#include <xrpl/json/json_value.h>
#include <xrpl/json/json_writer.h>
#include <xrpl/protocol/LedgerFormats.h>
#include <xrpl/protocol/SField.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFormats.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/protocol/jss.h>
#include <boost/algorithm/string.hpp>
#include <unordered_map>
namespace ripple {
namespace detail {
class ServerDefinitions
{
private:
std::string
// translate e.g. STI_LEDGERENTRY to LedgerEntry
translate(std::string const& inp);
uint256 defsHash_;
Json::Value defs_;
public:
ServerDefinitions();
bool
hashMatches(uint256 hash) const
{
return defsHash_ == hash;
}
Json::Value const&
get() const
{
return defs_;
}
};
std::string
ServerDefinitions::translate(std::string const& inp)
{
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
std::string out = inp;
boost::replace_all(out, oldStr, newStr);
return out;
};
auto contains = [&](char const* s) -> bool {
return inp.find(s) != std::string::npos;
};
if (contains("UINT"))
{
if (contains("512") || contains("384") || contains("256") ||
contains("192") || contains("160") || contains("128"))
return replace("UINT", "Hash");
else
return replace("UINT", "UInt");
}
std::unordered_map<std::string, std::string> replacements{
{"OBJECT", "STObject"},
{"ARRAY", "STArray"},
{"ACCOUNT", "AccountID"},
{"LEDGERENTRY", "LedgerEntry"},
{"NOTPRESENT", "NotPresent"},
{"PATHSET", "PathSet"},
{"VL", "Blob"},
{"XCHAIN_BRIDGE", "XChainBridge"},
};
if (auto const& it = replacements.find(inp); it != replacements.end())
{
return it->second;
}
std::string out;
size_t pos = 0;
std::string inpToProcess = inp;
// convert snake_case to CamelCase
for (;;)
{
pos = inpToProcess.find("_");
if (pos == std::string::npos)
pos = inpToProcess.size();
std::string token = inpToProcess.substr(0, pos);
if (token.size() > 1)
{
boost::algorithm::to_lower(token);
token.data()[0] -= ('a' - 'A');
out += token;
}
else
out += token;
if (pos == inpToProcess.size())
break;
inpToProcess = inpToProcess.substr(pos + 1);
}
return out;
};
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
{
// populate SerializedTypeID names and values
defs_[jss::TYPES] = Json::objectValue;
defs_[jss::TYPES]["Done"] = -1;
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
for (auto const& [rawName, typeValue] : sTypeMap)
{
std::string typeName =
translate(std::string(rawName).substr(4) /* remove STI_ */);
defs_[jss::TYPES][typeName] = typeValue;
typeMap[typeValue] = typeName;
}
// populate LedgerEntryType names and values
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
for (auto const& f : LedgerFormats::getInstance())
{
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
}
// populate SField serialization data
defs_[jss::FIELDS] = Json::arrayValue;
uint32_t i = 0;
{
Json::Value a = Json::arrayValue;
a[0U] = "Generic";
Json::Value v = Json::objectValue;
v[jss::nth] = 0;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Unknown";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "Invalid";
Json::Value v = Json::objectValue;
v[jss::nth] = -1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Unknown";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "ObjectEndMarker";
Json::Value v = Json::objectValue;
v[jss::nth] = 1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = true;
v[jss::isSigningField] = true;
v[jss::type] = "STObject";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "ArrayEndMarker";
Json::Value v = Json::objectValue;
v[jss::nth] = 1;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = true;
v[jss::isSigningField] = true;
v[jss::type] = "STArray";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "taker_gets_funded";
Json::Value v = Json::objectValue;
v[jss::nth] = 258;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Amount";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
{
Json::Value a = Json::arrayValue;
a[0U] = "taker_pays_funded";
Json::Value v = Json::objectValue;
v[jss::nth] = 259;
v[jss::isVLEncoded] = false;
v[jss::isSerialized] = false;
v[jss::isSigningField] = false;
v[jss::type] = "Amount";
a[1U] = v;
defs_[jss::FIELDS][i++] = a;
}
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
{
if (f->fieldName == "")
continue;
Json::Value innerObj = Json::objectValue;
uint32_t type = f->fieldType;
innerObj[jss::nth] = f->fieldValue;
// whether the field is variable-length encoded
// this means that the length is included before the content
innerObj[jss::isVLEncoded] =
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
type == 19U /* Vector256 */);
// whether the field is included in serialization
innerObj[jss::isSerialized] =
(type < 10000 && f->fieldName != "hash" &&
f->fieldName != "index"); /* hash, index, TRANSACTION,
LEDGER_ENTRY, VALIDATION, METADATA */
// whether the field is included in serialization when signing
innerObj[jss::isSigningField] = f->shouldInclude(false);
innerObj[jss::type] = typeMap[type];
Json::Value innerArray = Json::arrayValue;
innerArray[0U] = f->fieldName;
innerArray[1U] = innerObj;
defs_[jss::FIELDS][i++] = innerArray;
}
// populate TER code names and values
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
for (auto const& [code, terInfo] : transResults())
{
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
}
// populate TxType names and values
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
for (auto const& f : TxFormats::getInstance())
{
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
}
// generate hash
{
std::string const out = Json::FastWriter().write(defs_);
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
defs_[jss::hash] = to_string(defsHash_);
}
}
} // namespace detail
Json::Value
doServerDefinitions(RPC::JsonContext& context)
{
auto& params = context.params;
uint256 hash;
if (params.isMember(jss::hash))
{
if (!params[jss::hash].isString() ||
!hash.parseHex(params[jss::hash].asString()))
return RPC::invalid_field_error(jss::hash);
}
static detail::ServerDefinitions const defs{};
if (defs.hashMatches(hash))
{
Json::Value jv = Json::objectValue;
jv[jss::hash] = to_string(hash);
return jv;
}
return defs.get();
}
Json::Value
doServerInfo(RPC::JsonContext& context)
{