mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-19 18:45:52 +00:00
Compare commits
23 Commits
a1q123456/
...
ximinez/tr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82c5e5db9b | ||
|
|
8895eb6b5d | ||
|
|
80e3bcabdc | ||
|
|
2f155d9273 | ||
|
|
53fcfda242 | ||
|
|
540f0aff7f | ||
|
|
20e0fefa2c | ||
|
|
0db558cf83 | ||
|
|
6385985ee2 | ||
|
|
4d7173b5d9 | ||
|
|
b3bc48999b | ||
|
|
aea76c8693 | ||
|
|
9d20f27a55 | ||
|
|
5026b64180 | ||
|
|
9fd0f72039 | ||
|
|
45f024ddef | ||
|
|
ec25b9d24a | ||
|
|
f8687226ea | ||
|
|
ec530a9b0c | ||
|
|
086b9f62d4 | ||
|
|
1eb4b08592 | ||
|
|
a7c9c69fbd | ||
|
|
6e11a3f1a3 |
15
.github/workflows/build-test.yml
vendored
15
.github/workflows/build-test.yml
vendored
@@ -102,16 +102,21 @@ jobs:
|
||||
echo 'CMake target: ${{ matrix.cmake_target }}'
|
||||
echo 'Config name: ${{ matrix.config_name }}'
|
||||
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
|
||||
- name: Clean workspace (MacOS)
|
||||
if: ${{ inputs.os == 'macos' }}
|
||||
run: |
|
||||
WORKSPACE=${{ github.workspace }}
|
||||
echo "Cleaning workspace '${WORKSPACE}'."
|
||||
if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then
|
||||
echo "Invalid working directory '${WORKSPACE}'."
|
||||
exit 1
|
||||
fi
|
||||
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Check configuration (Windows)
|
||||
if: ${{ inputs.os == 'windows' }}
|
||||
|
||||
35
.github/workflows/check-format.yml
vendored
35
.github/workflows/check-format.yml
vendored
@@ -17,10 +17,41 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/tools-rippled-pre-commit
|
||||
steps:
|
||||
# The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the
|
||||
# same directory for jobs running in containers. The actions/checkout step
|
||||
# is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to
|
||||
# safe.directory (see instructions at https://github.com/actions/checkout)
|
||||
# but that is apparently not happening for some container images. We
|
||||
# therefore preemptively add both directories to safe.directory. See also
|
||||
# https://github.com/actions/runner/issues/2058 for more details.
|
||||
- name: Configure git safe.directory
|
||||
run: |
|
||||
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
git config --global --add safe.directory ${{ github.workspace }}
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
||||
- name: Check configuration
|
||||
run: |
|
||||
echo 'Checking path.'
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
|
||||
echo 'Checking environment variables.'
|
||||
env | sort
|
||||
|
||||
echo 'Checking pre-commit version.'
|
||||
pre-commit --version
|
||||
|
||||
echo 'Checking clang-format version.'
|
||||
clang-format --version
|
||||
|
||||
echo 'Checking NPM version.'
|
||||
npm --version
|
||||
|
||||
echo 'Checking Node.js version.'
|
||||
node --version
|
||||
|
||||
echo 'Checking prettier version.'
|
||||
prettier --version
|
||||
- name: Format code
|
||||
run: pre-commit run --show-diff-on-failure --color=always --all-files
|
||||
- name: Check for differences
|
||||
|
||||
13
.github/workflows/notify-clio.yml
vendored
13
.github/workflows/notify-clio.yml
vendored
@@ -50,10 +50,6 @@ jobs:
|
||||
echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
|
||||
echo 'Extracting version.'
|
||||
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
|
||||
- name: Calculate conan reference
|
||||
id: conan_ref
|
||||
run: |
|
||||
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
|
||||
- name: Add Conan remote
|
||||
run: |
|
||||
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
|
||||
@@ -65,9 +61,10 @@ jobs:
|
||||
- name: Upload package
|
||||
run: |
|
||||
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
|
||||
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}
|
||||
outputs:
|
||||
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
|
||||
channel: ${{ steps.generate.outputs.channel }}
|
||||
version: ${{ steps.generate.outputs.version }}
|
||||
|
||||
notify:
|
||||
needs: upload
|
||||
@@ -79,5 +76,5 @@ jobs:
|
||||
run: |
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
|
||||
-F "client_payload[pr_url]=${{ github.event.pull_request.html_url }}"
|
||||
-F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \
|
||||
-F "client_payload[pr]=${{ github.event.pull_request.number }}"
|
||||
|
||||
1
.github/workflows/on-pr.yml
vendored
1
.github/workflows/on-pr.yml
vendored
@@ -75,7 +75,6 @@ jobs:
|
||||
tests/**
|
||||
CMakeLists.txt
|
||||
conanfile.py
|
||||
conan.lock
|
||||
- name: Check whether to run
|
||||
# This step determines whether the rest of the workflow should
|
||||
# run. The rest of the workflow will run if this job runs AND at
|
||||
|
||||
1
.github/workflows/on-trigger.yml
vendored
1
.github/workflows/on-trigger.yml
vendored
@@ -32,7 +32,6 @@ on:
|
||||
- "tests/**"
|
||||
- "CMakeLists.txt"
|
||||
- "conanfile.py"
|
||||
- "conan.lock"
|
||||
|
||||
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
|
||||
# will force all dependencies to be rebuilt, which is useful to verify that
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
# To run pre-commit hooks, first install pre-commit:
|
||||
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
|
||||
# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}`
|
||||
#
|
||||
# Depending on your system, you can use `brew install` or `apt install` as well
|
||||
# for installing the pre-commit package, but `pip` is needed to install the
|
||||
# hooks; you can also use `pipx` if you prefer.
|
||||
# Next, install the required formatters:
|
||||
# - `pip install clang-format==${CLANG_VERSION}`
|
||||
# - `npm install prettier@${PRETTIER_VERSION}`
|
||||
#
|
||||
# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml
|
||||
# for the versions used in the CI pipeline. You will need to have the exact same
|
||||
# versions of the tools installed on your system to produce the same results as
|
||||
# the pipeline.
|
||||
#
|
||||
# Then, run the following command to install the git hook scripts:
|
||||
# - `pre-commit install`
|
||||
@@ -7,33 +20,45 @@
|
||||
# - `pre-commit run --all-files`
|
||||
# To manually run a specific hook, use:
|
||||
# - `pre-commit run <hook_id> --all-files`
|
||||
# To run the hooks against only the staged files, use:
|
||||
# To run the hooks against only the files changed in the current commit, use:
|
||||
# - `pre-commit run`
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
- id: check-merge-conflict
|
||||
args: [--assume-in-merge]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: clang-format
|
||||
args: [--style=file]
|
||||
"types_or": [c++, c, proto]
|
||||
|
||||
- repo: https://github.com/rbubley/mirrors-prettier
|
||||
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
|
||||
name: clang-format
|
||||
language: system
|
||||
entry: clang-format -i
|
||||
files: '\.(cpp|hpp|h|ipp|proto)$'
|
||||
- id: trailing-whitespace
|
||||
name: trailing-whitespace
|
||||
entry: trailing-whitespace-fixer
|
||||
language: system
|
||||
types: [text]
|
||||
- id: end-of-file
|
||||
name: end-of-file
|
||||
entry: end-of-file-fixer
|
||||
language: system
|
||||
types: [text]
|
||||
- id: mixed-line-ending
|
||||
name: mixed-line-ending
|
||||
entry: mixed-line-ending
|
||||
language: system
|
||||
types: [text]
|
||||
- id: check-merge-conflict
|
||||
name: check-merge-conflict
|
||||
entry: check-merge-conflict --assume-in-merge
|
||||
language: system
|
||||
types: [text]
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
name: prettier
|
||||
language: system
|
||||
entry: prettier --ignore-unknown --write
|
||||
|
||||
exclude: |
|
||||
(?x)^(
|
||||
external/.*|
|
||||
.github/scripts/levelization/results/.*\.txt|
|
||||
conan\.lock
|
||||
.github/scripts/levelization/results/.*\.txt
|
||||
)$
|
||||
|
||||
22
BUILD.md
22
BUILD.md
@@ -158,10 +158,6 @@ updated dependencies with the newer version. However, if we switch to a newer
|
||||
version that no longer requires a patch, no action is required on your part, as
|
||||
the new recipe will be automatically pulled from the official Conan Center.
|
||||
|
||||
> [!NOTE]
|
||||
> You might need to add `--lockfile=""` to your `conan install` command
|
||||
> to avoid automatic use of the existing `conan.lock` file when you run `conan export` manually on your machine
|
||||
|
||||
### Conan profile tweaks
|
||||
|
||||
#### Missing compiler version
|
||||
@@ -470,21 +466,6 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
The location of `rippled` binary in your build directory depends on your
|
||||
CMake generator. Pass `--help` to see the rest of the command line options.
|
||||
|
||||
#### Conan lockfile
|
||||
|
||||
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
|
||||
|
||||
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
|
||||
It is implicitly used when running `conan` commands, you don't need to specify it.
|
||||
|
||||
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
|
||||
|
||||
To do that, run the following command in the repository root:
|
||||
|
||||
```bash
|
||||
conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True'
|
||||
```
|
||||
|
||||
## Coverage report
|
||||
|
||||
The coverage report is intended for developers using compilers GCC
|
||||
@@ -583,8 +564,7 @@ After any updates or changes to dependencies, you may need to do the following:
|
||||
```
|
||||
|
||||
3. Re-run [conan export](#patched-recipes) if needed.
|
||||
4. [Regenerate lockfile](#conan-lockfile).
|
||||
5. Re-run [conan install](#build-and-test).
|
||||
4. Re-run [conan install](#build-and-test).
|
||||
|
||||
### `protobuf/port_def.inc` file not found
|
||||
|
||||
|
||||
@@ -975,6 +975,47 @@
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
#
|
||||
# Optional keys for NuDB only:
|
||||
#
|
||||
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
||||
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
||||
#
|
||||
# This parameter controls the fundamental storage unit
|
||||
# size for NuDB's internal data structures. The choice
|
||||
# of block size can significantly impact performance
|
||||
# depending on your storage hardware and filesystem:
|
||||
#
|
||||
# - 4096 bytes: Optimal for most standard SSDs and
|
||||
# traditional filesystems (ext4, NTFS, HFS+).
|
||||
# Provides good balance of performance and storage
|
||||
# efficiency. Recommended for most deployments.
|
||||
# Minimizes memory footprint and provides consistent
|
||||
# low-latency access patterns across diverse hardware.
|
||||
#
|
||||
# - 8192-16384 bytes: May improve performance on
|
||||
# high-end NVMe SSDs and copy-on-write filesystems
|
||||
# like ZFS or Btrfs that benefit from larger block
|
||||
# alignment. Can reduce metadata overhead for large
|
||||
# databases. Offers better sequential throughput and
|
||||
# reduced I/O operations at the cost of higher memory
|
||||
# usage per operation.
|
||||
#
|
||||
# - 32768 bytes (32K): Maximum supported block size
|
||||
# for high-performance scenarios with very fast
|
||||
# storage. May increase memory usage and reduce
|
||||
# efficiency for smaller databases. Best suited for
|
||||
# enterprise environments with abundant RAM.
|
||||
#
|
||||
# Performance testing is recommended before deploying
|
||||
# any non-default block size in production environments.
|
||||
#
|
||||
# Note: This setting cannot be changed after database
|
||||
# creation without rebuilding the entire database.
|
||||
# Choose carefully based on your hardware and expected
|
||||
# database size.
|
||||
#
|
||||
# Example: nudb_block_size=4096
|
||||
#
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
#
|
||||
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled/db/nudb
|
||||
nudb_block_size=4096
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
|
||||
@@ -218,12 +218,12 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update)
|
||||
check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update)
|
||||
if(HAVE_cxx_fprofile_update)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
|
||||
endif()
|
||||
|
||||
check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update)
|
||||
check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update)
|
||||
if(HAVE_c_fprofile_update)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
|
||||
endif()
|
||||
|
||||
56
conan.lock
56
conan.lock
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
|
||||
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/3.5.2#0c5a5e15ae569f45dff57adcf1770cf7%1756234259.61",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901",
|
||||
"msys2/cci.latest#5b73b10144f73cc5bfe0572ed9be39e1%1751977009.857",
|
||||
"m4/1.4.19#b38ced39a01e31fef5435bc634461fd2%1700758725.451",
|
||||
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
|
||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28",
|
||||
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
|
||||
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86"
|
||||
],
|
||||
"python_requires": [],
|
||||
"overrides": {
|
||||
"protobuf/3.21.12": [
|
||||
null,
|
||||
"protobuf/3.21.12"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"boost/1.83.0": [
|
||||
"boost/1.88.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.49.1"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
}
|
||||
@@ -150,24 +150,6 @@ public:
|
||||
return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0);
|
||||
}
|
||||
|
||||
Number
|
||||
truncate() const noexcept
|
||||
{
|
||||
if (exponent_ >= 0 || mantissa_ == 0)
|
||||
return *this;
|
||||
|
||||
Number ret = *this;
|
||||
while (ret.exponent_ < 0 && ret.mantissa_ != 0)
|
||||
{
|
||||
ret.exponent_ += 1;
|
||||
ret.mantissa_ /= rep(10);
|
||||
}
|
||||
// We are guaranteed that normalize() will never throw an exception
|
||||
// because exponent is either negative or zero at this point.
|
||||
ret.normalize();
|
||||
return ret;
|
||||
}
|
||||
|
||||
friend constexpr bool
|
||||
operator>(Number const& x, Number const& y) noexcept
|
||||
{
|
||||
|
||||
@@ -122,13 +122,6 @@ std::size_t constexpr maxDataPayloadLength = 256;
|
||||
/** Vault withdrawal policies */
|
||||
std::uint8_t constexpr vaultStrategyFirstComeFirstServe = 1;
|
||||
|
||||
/** Default IOU scale factor for a Vault */
|
||||
std::uint8_t constexpr vaultDefaultIOUScale = 6;
|
||||
/** Maximum scale factor for a Vault. The number is chosen to ensure that
|
||||
1 IOU can be always converted to shares.
|
||||
10^19 > maxMPTokenAmount (2^64-1) > 10^18 */
|
||||
std::uint8_t constexpr vaultMaximumIOUScale = 18;
|
||||
|
||||
/** Maximum recursion depth for vault shares being put as an asset inside
|
||||
* another vault; counted from 0 */
|
||||
std::uint8_t constexpr maxAssetCheckDepth = 5;
|
||||
|
||||
@@ -499,7 +499,6 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
|
||||
{sfLossUnrealized, soeREQUIRED},
|
||||
{sfShareMPTID, soeREQUIRED},
|
||||
{sfWithdrawalPolicy, soeREQUIRED},
|
||||
{sfScale, soeDEFAULT},
|
||||
// no SharesTotal ever (use MPTIssuance.sfOutstandingAmount)
|
||||
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
|
||||
}))
|
||||
|
||||
@@ -483,7 +483,6 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({
|
||||
{sfDomainID, soeOPTIONAL},
|
||||
{sfWithdrawalPolicy, soeOPTIONAL},
|
||||
{sfData, soeOPTIONAL},
|
||||
{sfScale, soeOPTIONAL},
|
||||
}))
|
||||
|
||||
/** This transaction updates a single asset vault. */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -720,30 +720,6 @@ public:
|
||||
BEAST_EXPECT(res2 == STAmount{7518784});
|
||||
}
|
||||
|
||||
void
|
||||
test_truncate()
|
||||
{
|
||||
BEAST_EXPECT(Number(25, +1).truncate() == Number(250, 0));
|
||||
BEAST_EXPECT(Number(25, 0).truncate() == Number(25, 0));
|
||||
BEAST_EXPECT(Number(25, -1).truncate() == Number(2, 0));
|
||||
BEAST_EXPECT(Number(25, -2).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(99, -2).truncate() == Number(0, 0));
|
||||
|
||||
BEAST_EXPECT(Number(-25, +1).truncate() == Number(-250, 0));
|
||||
BEAST_EXPECT(Number(-25, 0).truncate() == Number(-25, 0));
|
||||
BEAST_EXPECT(Number(-25, -1).truncate() == Number(-2, 0));
|
||||
BEAST_EXPECT(Number(-25, -2).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(-99, -2).truncate() == Number(0, 0));
|
||||
|
||||
BEAST_EXPECT(Number(0, 0).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(0, 30000).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(0, -30000).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(100, -30000).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(100, -30000).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(-100, -30000).truncate() == Number(0, 0));
|
||||
BEAST_EXPECT(Number(-100, -30000).truncate() == Number(0, 0));
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
@@ -764,7 +740,6 @@ public:
|
||||
test_stream();
|
||||
test_inc_dec();
|
||||
test_toSTAmount();
|
||||
test_truncate();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -175,78 +175,12 @@ public:
|
||||
BEAST_EXPECT(*lv == -1);
|
||||
}
|
||||
|
||||
void
|
||||
stopJobQueueWhenCoroutineSuspended()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using namespace jtx;
|
||||
|
||||
testcase("Stop JobQueue when a coroutine is suspended");
|
||||
|
||||
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->FORCE_MULTI_THREAD = true;
|
||||
return cfg;
|
||||
}));
|
||||
|
||||
bool started = false;
|
||||
bool finished = false;
|
||||
std::optional<bool> shouldStop;
|
||||
std::condition_variable cv;
|
||||
std::mutex m;
|
||||
std::unique_lock<std::mutex> lk(m);
|
||||
auto coro = env.app().getJobQueue().postCoro(
|
||||
jtCLIENT, "Coroutine-Test", [&](auto const& c) {
|
||||
started = true;
|
||||
cv.notify_all();
|
||||
c->yield();
|
||||
finished = true;
|
||||
shouldStop = c->shouldStop();
|
||||
cv.notify_all();
|
||||
});
|
||||
|
||||
cv.wait_for(lk, 5s, [&]() { return started; });
|
||||
env.app().getJobQueue().stop();
|
||||
|
||||
cv.wait_for(lk, 5s, [&]() { return finished; });
|
||||
BEAST_EXPECT(finished);
|
||||
BEAST_EXPECT(shouldStop.has_value() && *shouldStop == true);
|
||||
}
|
||||
|
||||
void
|
||||
coroutineGetsDestroyedBeforeExecuting()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using namespace jtx;
|
||||
|
||||
testcase("Coroutine gets destroyed before executing");
|
||||
|
||||
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
cfg->FORCE_MULTI_THREAD = true;
|
||||
return cfg;
|
||||
}));
|
||||
|
||||
{
|
||||
auto coro = std::make_shared<JobQueue::Coro>(
|
||||
Coro_create_t{},
|
||||
env.app().getJobQueue(),
|
||||
JobType::jtCLIENT,
|
||||
"test",
|
||||
[](auto coro) {
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
pass();
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
correct_order();
|
||||
incorrect_order();
|
||||
thread_specific_storage();
|
||||
stopJobQueueWhenCoroutineSuspended();
|
||||
coroutineGetsDestroyedBeforeExecuting();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -74,10 +74,6 @@ public:
|
||||
|
||||
/** @} */
|
||||
|
||||
/** Create an Account from an account ID. Should only be used when the
|
||||
* secret key is unavailable, such as for pseudo-accounts. */
|
||||
explicit Account(std::string name, AccountID const& id);
|
||||
|
||||
enum AcctStringType { base58Seed, other };
|
||||
/** Create an account from a base58 seed string. Throws on invalid seed. */
|
||||
Account(AcctStringType stringType, std::string base58SeedStr);
|
||||
|
||||
@@ -86,14 +86,6 @@ Account::Account(AcctStringType stringType, std::string base58SeedStr)
|
||||
{
|
||||
}
|
||||
|
||||
Account::Account(std::string name, AccountID const& id)
|
||||
: Account(name, randomKeyPair(KeyType::secp256k1), privateCtorTag{})
|
||||
{
|
||||
// override the randomly generated values
|
||||
id_ = id;
|
||||
human_ = toBase58(id_);
|
||||
}
|
||||
|
||||
IOU
|
||||
Account::operator[](std::string const& s) const
|
||||
{
|
||||
|
||||
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
@@ -0,0 +1,478 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/nodestore/TestBase.h>
|
||||
#include <test/unit_test/SuiteJournal.h>
|
||||
|
||||
#include <xrpld/nodestore/DummyScheduler.h>
|
||||
#include <xrpld/nodestore/Manager.h>
|
||||
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/beast/utility/temp_dir.h>
|
||||
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
class NuDBFactory_test : public TestBase
|
||||
{
|
||||
private:
|
||||
// Helper function to create a Section with specified parameters
|
||||
Section
|
||||
createSection(std::string const& path, std::string const& blockSize = "")
|
||||
{
|
||||
Section params;
|
||||
params.set("type", "nudb");
|
||||
params.set("path", path);
|
||||
if (!blockSize.empty())
|
||||
params.set("nudb_block_size", blockSize);
|
||||
return params;
|
||||
}
|
||||
|
||||
// Helper function to create a backend and test basic functionality
|
||||
bool
|
||||
testBackendFunctionality(
|
||||
Section const& params,
|
||||
std::size_t expectedBlocksize)
|
||||
{
|
||||
try
|
||||
{
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
if (!BEAST_EXPECT(backend))
|
||||
return false;
|
||||
|
||||
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
|
||||
return false;
|
||||
|
||||
backend->open();
|
||||
|
||||
if (!BEAST_EXPECT(backend->isOpen()))
|
||||
return false;
|
||||
|
||||
// Test basic store/fetch functionality
|
||||
auto batch = createPredictableBatch(10, 12345);
|
||||
storeBatch(*backend, batch);
|
||||
|
||||
Batch copy;
|
||||
fetchCopyOfBatch(*backend, ©, batch);
|
||||
|
||||
backend->close();
|
||||
|
||||
return areBatchesEqual(batch, copy);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to test log messages
|
||||
void
|
||||
testLogMessage(
|
||||
Section const& params,
|
||||
beast::severities::Severity level,
|
||||
std::string const& expectedMessage)
|
||||
{
|
||||
test::StreamSink sink(level);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
std::string logOutput = sink.messages().str();
|
||||
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
|
||||
}
|
||||
|
||||
// Helper function to test power of two validation
|
||||
void
|
||||
testPowerOfTwoValidation(std::string const& size, bool shouldWork)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
std::string logOutput = sink.messages().str();
|
||||
bool hasWarning =
|
||||
logOutput.find("Invalid nudb_block_size") != std::string::npos;
|
||||
|
||||
BEAST_EXPECT(hasWarning == !shouldWork);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
testDefaultBlockSize()
|
||||
{
|
||||
testcase("Default block size (no nudb_block_size specified)");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path());
|
||||
|
||||
// Should work with default 4096 block size
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
void
|
||||
testValidBlockSizes()
|
||||
{
|
||||
testcase("Valid block sizes");
|
||||
|
||||
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
|
||||
|
||||
for (auto const& size : validSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), to_string(size));
|
||||
|
||||
BEAST_EXPECT(testBackendFunctionality(params, size));
|
||||
}
|
||||
// Empty value is ignored by the config parser, so uses the
|
||||
// default
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "");
|
||||
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
void
|
||||
testInvalidBlockSizes()
|
||||
{
|
||||
testcase("Invalid block sizes");
|
||||
|
||||
std::vector<std::string> invalidSizes = {
|
||||
"2048", // Too small
|
||||
"1024", // Too small
|
||||
"65536", // Too large
|
||||
"131072", // Too large
|
||||
"5000", // Not power of 2
|
||||
"6000", // Not power of 2
|
||||
"10000", // Not power of 2
|
||||
"0", // Zero
|
||||
"-1", // Negative
|
||||
"abc", // Non-numeric
|
||||
"4k", // Invalid format
|
||||
"4096.5" // Decimal
|
||||
};
|
||||
|
||||
for (auto const& size : invalidSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||
}
|
||||
|
||||
// Test whitespace cases separately since lexical_cast may handle them
|
||||
std::vector<std::string> whitespaceInvalidSizes = {
|
||||
"4096 ", // Trailing space - might be handled by lexical_cast
|
||||
" 4096" // Leading space - might be handled by lexical_cast
|
||||
};
|
||||
|
||||
for (auto const& size : whitespaceInvalidSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testLogMessages()
|
||||
{
|
||||
testcase("Log message verification");
|
||||
|
||||
// Test valid custom block size logging
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "8192");
|
||||
|
||||
testLogMessage(
|
||||
params,
|
||||
beast::severities::kInfo,
|
||||
"Using custom NuDB block size: 8192");
|
||||
}
|
||||
|
||||
// Test invalid block size failure
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "5000");
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
fail();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size: 5000") !=
|
||||
std::string::npos);
|
||||
BEAST_EXPECT(
|
||||
logOutput.find(
|
||||
"Must be power of 2 between 4096 and 32768") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
|
||||
// Test non-numeric value failure
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "invalid");
|
||||
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
fail();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size value: invalid") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testPowerOfTwoValidation()
|
||||
{
|
||||
testcase("Power of 2 validation logic");
|
||||
|
||||
// Test edge cases around valid range
|
||||
std::vector<std::pair<std::string, bool>> testCases = {
|
||||
{"4095", false}, // Just below minimum
|
||||
{"4096", true}, // Minimum valid
|
||||
{"4097", false}, // Just above minimum, not power of 2
|
||||
{"8192", true}, // Valid power of 2
|
||||
{"8193", false}, // Just above valid power of 2
|
||||
{"16384", true}, // Valid power of 2
|
||||
{"32768", true}, // Maximum valid
|
||||
{"32769", false}, // Just above maximum
|
||||
{"65536", false} // Power of 2 but too large
|
||||
};
|
||||
|
||||
for (auto const& [size, shouldWork] : testCases)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
// We test the validation logic by catching exceptions for invalid
|
||||
// values
|
||||
test::StreamSink sink(beast::severities::kWarning);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
BEAST_EXPECT(shouldWork);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::string logOutput{e.what()};
|
||||
BEAST_EXPECT(
|
||||
logOutput.find("Invalid nudb_block_size") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testBothConstructorVariants()
|
||||
{
|
||||
testcase("Both constructor variants work with custom block size");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), "16384");
|
||||
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
// Test first constructor (without nudb::context)
|
||||
{
|
||||
auto backend1 = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
BEAST_EXPECT(backend1 != nullptr);
|
||||
BEAST_EXPECT(testBackendFunctionality(params, 16384));
|
||||
}
|
||||
|
||||
// Test second constructor (with nudb::context)
|
||||
// Note: This would require access to nudb::context, which might not be
|
||||
// easily testable without more complex setup. For now, we test that
|
||||
// the factory can create backends with the first constructor.
|
||||
}
|
||||
|
||||
void
|
||||
testConfigurationParsing()
|
||||
{
|
||||
testcase("Configuration parsing edge cases");
|
||||
|
||||
// Test that whitespace is handled correctly
|
||||
std::vector<std::string> validFormats = {
|
||||
"8192" // Basic valid format
|
||||
};
|
||||
|
||||
// Test whitespace handling separately since lexical_cast behavior may
|
||||
// vary
|
||||
std::vector<std::string> whitespaceFormats = {
|
||||
" 8192", // Leading space - may or may not be handled by
|
||||
// lexical_cast
|
||||
"8192 " // Trailing space - may or may not be handled by
|
||||
// lexical_cast
|
||||
};
|
||||
|
||||
// Test basic valid format
|
||||
for (auto const& format : validFormats)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), format);
|
||||
|
||||
test::StreamSink sink(beast::severities::kInfo);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
|
||||
// Should log success message for valid values
|
||||
std::string logOutput = sink.messages().str();
|
||||
bool hasSuccessMessage =
|
||||
logOutput.find("Using custom NuDB block size") !=
|
||||
std::string::npos;
|
||||
BEAST_EXPECT(hasSuccessMessage);
|
||||
}
|
||||
|
||||
// Test whitespace formats - these should work if lexical_cast handles
|
||||
// them
|
||||
for (auto const& format : whitespaceFormats)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), format);
|
||||
|
||||
// Use a lower threshold to capture both info and warning messages
|
||||
test::StreamSink sink(beast::severities::kDebug);
|
||||
beast::Journal journal(sink);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
try
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
fail();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Fails
|
||||
BEAST_EXPECT(!testBackendFunctionality(params, 8192));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testDataPersistence()
|
||||
{
|
||||
testcase("Data persistence with different block sizes");
|
||||
|
||||
std::vector<std::string> blockSizes = {
|
||||
"4096", "8192", "16384", "32768"};
|
||||
|
||||
for (auto const& size : blockSizes)
|
||||
{
|
||||
beast::temp_dir tempDir;
|
||||
auto params = createSection(tempDir.path(), size);
|
||||
|
||||
DummyScheduler scheduler;
|
||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||
|
||||
// Create test data
|
||||
auto batch = createPredictableBatch(50, 54321);
|
||||
|
||||
// Store data
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
backend->open();
|
||||
storeBatch(*backend, batch);
|
||||
backend->close();
|
||||
}
|
||||
|
||||
// Retrieve data in new backend instance
|
||||
{
|
||||
auto backend = Manager::instance().make_Backend(
|
||||
params, megabytes(4), scheduler, journal);
|
||||
backend->open();
|
||||
|
||||
Batch copy;
|
||||
fetchCopyOfBatch(*backend, ©, batch);
|
||||
|
||||
BEAST_EXPECT(areBatchesEqual(batch, copy));
|
||||
backend->close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testDefaultBlockSize();
|
||||
testValidBlockSizes();
|
||||
testInvalidBlockSizes();
|
||||
testLogMessages();
|
||||
testPowerOfTwoValidation();
|
||||
testBothConstructorVariants();
|
||||
testConfigurationParsing();
|
||||
testDataPersistence();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
@@ -166,7 +166,7 @@ private:
|
||||
int addFlags,
|
||||
std::function<bool(void)> const& continueCallback);
|
||||
|
||||
// Compute the liquidity for a path. Return tesSUCCESS if it has enough
|
||||
// Compute the liquidity for a path. Return tesSUCCESS if it has has enough
|
||||
// liquidity to be worth keeping, otherwise an error.
|
||||
TER
|
||||
getPathLiquidity(
|
||||
|
||||
@@ -1510,12 +1510,6 @@ ValidMPTIssuance::finalize(
|
||||
|
||||
if (tx.getTxnType() == ttESCROW_FINISH)
|
||||
return true;
|
||||
|
||||
if ((tx.getTxnType() == ttVAULT_CLAWBACK ||
|
||||
tx.getTxnType() == ttVAULT_WITHDRAW) &&
|
||||
mptokensDeleted_ == 1 && mptokensCreated_ == 0 &&
|
||||
mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 0)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (mptIssuancesCreated_ != 0)
|
||||
|
||||
@@ -21,10 +21,8 @@
|
||||
#include <xrpld/ledger/View.h>
|
||||
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/MPTIssue.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STAmount.h>
|
||||
#include <xrpl/protocol/STNumber.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
@@ -153,7 +151,7 @@ VaultClawback::doApply()
|
||||
if (!vault)
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
|
||||
auto const mptIssuanceID = *((*vault)[sfShareMPTID]);
|
||||
auto const mptIssuanceID = (*vault)[sfShareMPTID];
|
||||
auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID));
|
||||
if (!sleIssuance)
|
||||
{
|
||||
@@ -163,169 +161,68 @@ VaultClawback::doApply()
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
Asset const vaultAsset = vault->at(sfAsset);
|
||||
Asset const asset = vault->at(sfAsset);
|
||||
STAmount const amount = [&]() -> STAmount {
|
||||
auto const maybeAmount = tx[~sfAmount];
|
||||
if (maybeAmount)
|
||||
return *maybeAmount;
|
||||
return {sfAmount, vaultAsset, 0};
|
||||
return {sfAmount, asset, 0};
|
||||
}();
|
||||
XRPL_ASSERT(
|
||||
amount.asset() == vaultAsset,
|
||||
amount.asset() == asset,
|
||||
"ripple::VaultClawback::doApply : matching asset");
|
||||
|
||||
auto assetsAvailable = vault->at(sfAssetsAvailable);
|
||||
auto assetsTotal = vault->at(sfAssetsTotal);
|
||||
[[maybe_unused]] auto const lossUnrealized = vault->at(sfLossUnrealized);
|
||||
XRPL_ASSERT(
|
||||
lossUnrealized <= (assetsTotal - assetsAvailable),
|
||||
"ripple::VaultClawback::doApply : loss and assets do balance");
|
||||
|
||||
AccountID holder = tx[sfHolder];
|
||||
MPTIssue const share{mptIssuanceID};
|
||||
STAmount sharesDestroyed = {share};
|
||||
STAmount assetsRecovered;
|
||||
try
|
||||
STAmount assets, shares;
|
||||
if (amount == beast::zero)
|
||||
{
|
||||
if (amount == beast::zero)
|
||||
{
|
||||
sharesDestroyed = accountHolds(
|
||||
view(),
|
||||
holder,
|
||||
share,
|
||||
FreezeHandling::fhIGNORE_FREEZE,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_);
|
||||
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
assetsRecovered = *maybeAssets;
|
||||
}
|
||||
else
|
||||
{
|
||||
assetsRecovered = amount;
|
||||
{
|
||||
auto const maybeShares =
|
||||
assetsToSharesWithdraw(vault, sleIssuance, assetsRecovered);
|
||||
if (!maybeShares)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
sharesDestroyed = *maybeShares;
|
||||
}
|
||||
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
assetsRecovered = *maybeAssets;
|
||||
}
|
||||
|
||||
// Clamp to maximum.
|
||||
if (assetsRecovered > *assetsAvailable)
|
||||
{
|
||||
assetsRecovered = *assetsAvailable;
|
||||
// Note, it is important to truncate the number of shares, otherwise
|
||||
// the corresponding assets might breach the AssetsAvailable
|
||||
{
|
||||
auto const maybeShares = assetsToSharesWithdraw(
|
||||
vault, sleIssuance, assetsRecovered, TruncateShares::yes);
|
||||
if (!maybeShares)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
sharesDestroyed = *maybeShares;
|
||||
}
|
||||
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
assetsRecovered = *maybeAssets;
|
||||
if (assetsRecovered > *assetsAvailable)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error())
|
||||
<< "VaultClawback: invalid rounding of shares.";
|
||||
return tecINTERNAL;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
Asset share = *(*vault)[sfShareMPTID];
|
||||
shares = accountHolds(
|
||||
view(),
|
||||
holder,
|
||||
share,
|
||||
FreezeHandling::fhIGNORE_FREEZE,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_);
|
||||
assets = sharesToAssetsWithdraw(vault, sleIssuance, shares);
|
||||
}
|
||||
catch (std::overflow_error const&)
|
||||
else
|
||||
{
|
||||
// It's easy to hit this exception from Number with large enough Scale
|
||||
// so we avoid spamming the log and only use debug here.
|
||||
JLOG(j_.debug()) //
|
||||
<< "VaultClawback: overflow error with"
|
||||
<< " scale=" << (int)vault->at(sfScale).value() //
|
||||
<< ", assetsTotal=" << vault->at(sfAssetsTotal).value()
|
||||
<< ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount)
|
||||
<< ", amount=" << amount.value();
|
||||
return tecPATH_DRY;
|
||||
assets = amount;
|
||||
shares = assetsToSharesWithdraw(vault, sleIssuance, assets);
|
||||
}
|
||||
|
||||
if (sharesDestroyed == beast::zero)
|
||||
return tecPRECISION_LOSS;
|
||||
// Clamp to maximum.
|
||||
Number maxAssets = *vault->at(sfAssetsAvailable);
|
||||
if (assets > maxAssets)
|
||||
{
|
||||
assets = maxAssets;
|
||||
shares = assetsToSharesWithdraw(vault, sleIssuance, assets);
|
||||
}
|
||||
|
||||
assetsTotal -= assetsRecovered;
|
||||
assetsAvailable -= assetsRecovered;
|
||||
if (shares == beast::zero)
|
||||
return tecINSUFFICIENT_FUNDS;
|
||||
|
||||
vault->at(sfAssetsTotal) -= assets;
|
||||
vault->at(sfAssetsAvailable) -= assets;
|
||||
view().update(vault);
|
||||
|
||||
auto const& vaultAccount = vault->at(sfAccount);
|
||||
// Transfer shares from holder to vault.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
holder,
|
||||
vaultAccount,
|
||||
sharesDestroyed,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), holder, vaultAccount, shares, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
// Try to remove MPToken for shares, if the holder balance is zero. Vault
|
||||
// pseudo-account will never set lsfMPTAuthorized, so we ignore flags.
|
||||
// Keep MPToken if holder is the vault owner.
|
||||
if (holder != vault->at(sfOwner))
|
||||
{
|
||||
if (auto const ter =
|
||||
removeEmptyHolding(view(), holder, sharesDestroyed.asset(), j_);
|
||||
isTesSuccess(ter))
|
||||
{
|
||||
JLOG(j_.debug()) //
|
||||
<< "VaultClawback: removed empty MPToken for vault shares"
|
||||
<< " MPTID=" << to_string(mptIssuanceID) //
|
||||
<< " account=" << toBase58(holder);
|
||||
}
|
||||
else if (ter != tecHAS_OBLIGATIONS)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) //
|
||||
<< "VaultClawback: failed to remove MPToken for vault shares"
|
||||
<< " MPTID=" << to_string(mptIssuanceID) //
|
||||
<< " account=" << toBase58(holder) //
|
||||
<< " with result: " << transToken(ter);
|
||||
return ter;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
// else quietly ignore, holder balance is not zero
|
||||
}
|
||||
|
||||
// Transfer assets from vault to issuer.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
vaultAccount,
|
||||
account_,
|
||||
assetsRecovered,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), vaultAccount, account_, assets, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
// Sanity check
|
||||
if (accountHolds(
|
||||
view(),
|
||||
vaultAccount,
|
||||
assetsRecovered.asset(),
|
||||
assets.asset(),
|
||||
FreezeHandling::fhIGNORE_FREEZE,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_) < beast::zero)
|
||||
|
||||
@@ -25,10 +25,8 @@
|
||||
#include <xrpl/protocol/Asset.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/Issue.h>
|
||||
#include <xrpl/protocol/MPTIssue.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STNumber.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
@@ -86,16 +84,6 @@ VaultCreate::preflight(PreflightContext const& ctx)
|
||||
return temMALFORMED;
|
||||
}
|
||||
|
||||
if (auto const scale = ctx.tx[~sfScale])
|
||||
{
|
||||
auto const vaultAsset = ctx.tx[sfAsset];
|
||||
if (vaultAsset.holds<MPTIssue>() || vaultAsset.native())
|
||||
return temMALFORMED;
|
||||
|
||||
if (scale > vaultMaximumIOUScale)
|
||||
return temMALFORMED;
|
||||
}
|
||||
|
||||
return preflight2(ctx);
|
||||
}
|
||||
|
||||
@@ -109,8 +97,8 @@ VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx)
|
||||
TER
|
||||
VaultCreate::preclaim(PreclaimContext const& ctx)
|
||||
{
|
||||
auto const vaultAsset = ctx.tx[sfAsset];
|
||||
auto const account = ctx.tx[sfAccount];
|
||||
auto vaultAsset = ctx.tx[sfAsset];
|
||||
auto account = ctx.tx[sfAccount];
|
||||
|
||||
if (vaultAsset.native())
|
||||
; // No special checks for XRP
|
||||
@@ -160,7 +148,7 @@ VaultCreate::preclaim(PreclaimContext const& ctx)
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto const sequence = ctx.tx.getSeqValue();
|
||||
auto sequence = ctx.tx.getSeqValue();
|
||||
if (auto const accountId = pseudoAccountAddress(
|
||||
ctx.view, keylet::vault(account, sequence).key);
|
||||
accountId == beast::zero)
|
||||
@@ -177,8 +165,8 @@ VaultCreate::doApply()
|
||||
// we can consider downgrading them to `tef` or `tem`.
|
||||
|
||||
auto const& tx = ctx_.tx;
|
||||
auto const sequence = tx.getSeqValue();
|
||||
auto const owner = view().peek(keylet::account(account_));
|
||||
auto sequence = tx.getSeqValue();
|
||||
auto owner = view().peek(keylet::account(account_));
|
||||
if (owner == nullptr)
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
|
||||
@@ -202,10 +190,6 @@ VaultCreate::doApply()
|
||||
!isTesSuccess(ter))
|
||||
return ter;
|
||||
|
||||
std::uint8_t const scale = (asset.holds<MPTIssue>() || asset.native())
|
||||
? 0
|
||||
: ctx_.tx[~sfScale].value_or(vaultDefaultIOUScale);
|
||||
|
||||
auto txFlags = tx.getFlags();
|
||||
std::uint32_t mptFlags = 0;
|
||||
if ((txFlags & tfVaultShareNonTransferable) == 0)
|
||||
@@ -225,13 +209,12 @@ VaultCreate::doApply()
|
||||
.account = pseudoId->value(),
|
||||
.sequence = 1,
|
||||
.flags = mptFlags,
|
||||
.assetScale = scale,
|
||||
.metadata = tx[~sfMPTokenMetadata],
|
||||
.domainId = tx[~sfDomainID],
|
||||
});
|
||||
if (!maybeShare)
|
||||
return maybeShare.error(); // LCOV_EXCL_LINE
|
||||
auto const& mptIssuanceID = *maybeShare;
|
||||
auto& share = *maybeShare;
|
||||
|
||||
vault->setFieldIssue(sfAsset, STIssue{sfAsset, asset});
|
||||
vault->at(sfFlags) = txFlags & tfVaultPrivate;
|
||||
@@ -244,7 +227,7 @@ VaultCreate::doApply()
|
||||
// Leave default values for AssetTotal and AssetAvailable, both zero.
|
||||
if (auto value = tx[~sfAssetsMaximum])
|
||||
vault->at(sfAssetsMaximum) = *value;
|
||||
vault->at(sfShareMPTID) = mptIssuanceID;
|
||||
vault->at(sfShareMPTID) = share;
|
||||
if (auto value = tx[~sfData])
|
||||
vault->at(sfData) = *value;
|
||||
// Required field, default to vaultStrategyFirstComeFirstServe
|
||||
@@ -252,31 +235,9 @@ VaultCreate::doApply()
|
||||
vault->at(sfWithdrawalPolicy) = *value;
|
||||
else
|
||||
vault->at(sfWithdrawalPolicy) = vaultStrategyFirstComeFirstServe;
|
||||
if (scale)
|
||||
vault->at(sfScale) = scale;
|
||||
// No `LossUnrealized`.
|
||||
view().insert(vault);
|
||||
|
||||
// Explicitly create MPToken for the vault owner
|
||||
if (auto const err = authorizeMPToken(
|
||||
view(), mPriorBalance, mptIssuanceID, account_, ctx_.journal);
|
||||
!isTesSuccess(err))
|
||||
return err;
|
||||
|
||||
// If the vault is private, set the authorized flag for the vault owner
|
||||
if (txFlags & tfVaultPrivate)
|
||||
{
|
||||
if (auto const err = authorizeMPToken(
|
||||
view(),
|
||||
mPriorBalance,
|
||||
mptIssuanceID,
|
||||
pseudoId,
|
||||
ctx_.journal,
|
||||
{},
|
||||
account_);
|
||||
!isTesSuccess(err))
|
||||
return err;
|
||||
}
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#include <xrpld/ledger/View.h>
|
||||
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/MPTIssue.h>
|
||||
#include <xrpl/protocol/STNumber.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
@@ -129,8 +128,7 @@ VaultDelete::doApply()
|
||||
|
||||
// Destroy the share issuance. Do not use MPTokenIssuanceDestroy for this,
|
||||
// no special logic needed. First run few checks, duplicated from preclaim.
|
||||
auto const shareMPTID = *vault->at(sfShareMPTID);
|
||||
auto const mpt = view().peek(keylet::mptIssuance(shareMPTID));
|
||||
auto const mpt = view().peek(keylet::mptIssuance(vault->at(sfShareMPTID)));
|
||||
if (!mpt)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
@@ -139,24 +137,6 @@ VaultDelete::doApply()
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
// Try to remove MPToken for vault shares for the vault owner if it exists.
|
||||
if (auto const mptoken = view().peek(keylet::mptoken(shareMPTID, account_)))
|
||||
{
|
||||
if (auto const ter =
|
||||
removeEmptyHolding(view(), account_, MPTIssue(shareMPTID), j_);
|
||||
!isTesSuccess(ter))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) //
|
||||
<< "VaultDelete: failed to remove vault owner's MPToken"
|
||||
<< " MPTID=" << to_string(shareMPTID) //
|
||||
<< " account=" << toBase58(account_) //
|
||||
<< " with result: " << transToken(ter);
|
||||
return ter;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
if (!view().dirRemove(
|
||||
keylet::ownerDir(pseudoID), (*mpt)[sfOwnerNode], mpt->key(), false))
|
||||
{
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/MPTIssue.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STNumber.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
@@ -139,7 +138,7 @@ VaultDeposit::preclaim(PreclaimContext const& ctx)
|
||||
if (isFrozen(ctx.view, account, vaultShare))
|
||||
return tecLOCKED;
|
||||
|
||||
if (vault->isFlag(lsfVaultPrivate) && account != vault->at(sfOwner))
|
||||
if (vault->isFlag(tfVaultPrivate) && account != vault->at(sfOwner))
|
||||
{
|
||||
auto const maybeDomainID = sleIssuance->at(~sfDomainID);
|
||||
// Since this is a private vault and the account is not its owner, we
|
||||
@@ -184,7 +183,7 @@ VaultDeposit::doApply()
|
||||
if (!vault)
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
|
||||
auto const amount = ctx_.tx[sfAmount];
|
||||
auto const assets = ctx_.tx[sfAmount];
|
||||
// Make sure the depositor can hold shares.
|
||||
auto const mptIssuanceID = (*vault)[sfShareMPTID];
|
||||
auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID));
|
||||
@@ -198,14 +197,14 @@ VaultDeposit::doApply()
|
||||
|
||||
auto const& vaultAccount = vault->at(sfAccount);
|
||||
// Note, vault owner is always authorized
|
||||
if (vault->isFlag(lsfVaultPrivate) && account_ != vault->at(sfOwner))
|
||||
if ((vault->getFlags() & tfVaultPrivate) && account_ != vault->at(sfOwner))
|
||||
{
|
||||
if (auto const err = enforceMPTokenAuthorization(
|
||||
ctx_.view(), mptIssuanceID, account_, mPriorBalance, j_);
|
||||
!isTesSuccess(err))
|
||||
return err;
|
||||
}
|
||||
else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner)
|
||||
else
|
||||
{
|
||||
// No authorization needed, but must ensure there is MPToken
|
||||
auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_));
|
||||
@@ -222,12 +221,8 @@ VaultDeposit::doApply()
|
||||
}
|
||||
|
||||
// If the vault is private, set the authorized flag for the vault owner
|
||||
if (vault->isFlag(lsfVaultPrivate))
|
||||
if (vault->isFlag(tfVaultPrivate))
|
||||
{
|
||||
// This follows from the reverse of the outer enclosing if condition
|
||||
XRPL_ASSERT(
|
||||
account_ == vault->at(sfOwner),
|
||||
"ripple::VaultDeposit::doApply : account is owner");
|
||||
if (auto const err = authorizeMPToken(
|
||||
view(),
|
||||
mPriorBalance, // priorBalance
|
||||
@@ -242,52 +237,14 @@ VaultDeposit::doApply()
|
||||
}
|
||||
}
|
||||
|
||||
STAmount sharesCreated = {vault->at(sfShareMPTID)}, assetsDeposited;
|
||||
try
|
||||
{
|
||||
// Compute exchange before transferring any amounts.
|
||||
{
|
||||
auto const maybeShares =
|
||||
assetsToSharesDeposit(vault, sleIssuance, amount);
|
||||
if (!maybeShares)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
sharesCreated = *maybeShares;
|
||||
}
|
||||
if (sharesCreated == beast::zero)
|
||||
return tecPRECISION_LOSS;
|
||||
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsDeposit(vault, sleIssuance, sharesCreated);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
else if (*maybeAssets > amount)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDeposit: would take more than offered.";
|
||||
return tecINTERNAL;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
assetsDeposited = *maybeAssets;
|
||||
}
|
||||
catch (std::overflow_error const&)
|
||||
{
|
||||
// It's easy to hit this exception from Number with large enough Scale
|
||||
// so we avoid spamming the log and only use debug here.
|
||||
JLOG(j_.debug()) //
|
||||
<< "VaultDeposit: overflow error with"
|
||||
<< " scale=" << (int)vault->at(sfScale).value() //
|
||||
<< ", assetsTotal=" << vault->at(sfAssetsTotal).value()
|
||||
<< ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount)
|
||||
<< ", amount=" << amount;
|
||||
return tecPATH_DRY;
|
||||
}
|
||||
|
||||
// Compute exchange before transferring any amounts.
|
||||
auto const shares = assetsToSharesDeposit(vault, sleIssuance, assets);
|
||||
XRPL_ASSERT(
|
||||
sharesCreated.asset() != assetsDeposited.asset(),
|
||||
shares.asset() != assets.asset(),
|
||||
"ripple::VaultDeposit::doApply : assets are not shares");
|
||||
|
||||
vault->at(sfAssetsTotal) += assetsDeposited;
|
||||
vault->at(sfAssetsAvailable) += assetsDeposited;
|
||||
vault->at(sfAssetsTotal) += assets;
|
||||
vault->at(sfAssetsAvailable) += assets;
|
||||
view().update(vault);
|
||||
|
||||
// A deposit must not push the vault over its limit.
|
||||
@@ -296,21 +253,15 @@ VaultDeposit::doApply()
|
||||
return tecLIMIT_EXCEEDED;
|
||||
|
||||
// Transfer assets from depositor to vault.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
account_,
|
||||
vaultAccount,
|
||||
assetsDeposited,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), account_, vaultAccount, assets, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
// Sanity check
|
||||
if (accountHolds(
|
||||
view(),
|
||||
account_,
|
||||
assetsDeposited.asset(),
|
||||
assets.asset(),
|
||||
FreezeHandling::fhIGNORE_FREEZE,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_) < beast::zero)
|
||||
@@ -322,14 +273,8 @@ VaultDeposit::doApply()
|
||||
}
|
||||
|
||||
// Transfer shares from vault to depositor.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
vaultAccount,
|
||||
account_,
|
||||
sharesCreated,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), vaultAccount, account_, shares, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
return tesSUCCESS;
|
||||
|
||||
@@ -108,7 +108,7 @@ VaultSet::preclaim(PreclaimContext const& ctx)
|
||||
if (auto const domain = ctx.tx[~sfDomainID])
|
||||
{
|
||||
// We can only set domain if private flag was originally set
|
||||
if (!vault->isFlag(lsfVaultPrivate))
|
||||
if ((vault->getFlags() & tfVaultPrivate) == 0)
|
||||
{
|
||||
JLOG(ctx.j.debug()) << "VaultSet: vault is not private";
|
||||
return tecNO_PERMISSION;
|
||||
@@ -175,9 +175,9 @@ VaultSet::doApply()
|
||||
{
|
||||
if (*domainId != beast::zero)
|
||||
{
|
||||
// In VaultSet::preclaim we enforce that lsfVaultPrivate must have
|
||||
// In VaultSet::preclaim we enforce that tfVaultPrivate must have
|
||||
// been set in the vault. We currently do not support making such a
|
||||
// vault public (i.e. removal of lsfVaultPrivate flag). The
|
||||
// vault public (i.e. removal of tfVaultPrivate flag). The
|
||||
// sfDomainID flag must be set in the MPTokenIssuance object and can
|
||||
// be freely updated.
|
||||
sleIssuance->setFieldH256(sfDomainID, *domainId);
|
||||
|
||||
@@ -177,7 +177,7 @@ VaultWithdraw::doApply()
|
||||
if (!vault)
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
|
||||
auto const mptIssuanceID = *((*vault)[sfShareMPTID]);
|
||||
auto const mptIssuanceID = (*vault)[sfShareMPTID];
|
||||
auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID));
|
||||
if (!sleIssuance)
|
||||
{
|
||||
@@ -192,57 +192,24 @@ VaultWithdraw::doApply()
|
||||
// to deposit into it, and this means you are also indefinitely authorized
|
||||
// to withdraw from it.
|
||||
|
||||
auto const amount = ctx_.tx[sfAmount];
|
||||
Asset const vaultAsset = vault->at(sfAsset);
|
||||
MPTIssue const share{mptIssuanceID};
|
||||
STAmount sharesRedeemed = {share};
|
||||
STAmount assetsWithdrawn;
|
||||
try
|
||||
auto amount = ctx_.tx[sfAmount];
|
||||
auto const asset = vault->at(sfAsset);
|
||||
auto const share = MPTIssue(mptIssuanceID);
|
||||
STAmount shares, assets;
|
||||
if (amount.asset() == asset)
|
||||
{
|
||||
if (amount.asset() == vaultAsset)
|
||||
{
|
||||
// Fixed assets, variable shares.
|
||||
{
|
||||
auto const maybeShares =
|
||||
assetsToSharesWithdraw(vault, sleIssuance, amount);
|
||||
if (!maybeShares)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
sharesRedeemed = *maybeShares;
|
||||
}
|
||||
|
||||
if (sharesRedeemed == beast::zero)
|
||||
return tecPRECISION_LOSS;
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsWithdraw(vault, sleIssuance, sharesRedeemed);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
assetsWithdrawn = *maybeAssets;
|
||||
}
|
||||
else if (amount.asset() == share)
|
||||
{
|
||||
// Fixed shares, variable assets.
|
||||
sharesRedeemed = amount;
|
||||
auto const maybeAssets =
|
||||
sharesToAssetsWithdraw(vault, sleIssuance, sharesRedeemed);
|
||||
if (!maybeAssets)
|
||||
return tecINTERNAL; // LCOV_EXCL_LINE
|
||||
assetsWithdrawn = *maybeAssets;
|
||||
}
|
||||
else
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
// Fixed assets, variable shares.
|
||||
assets = amount;
|
||||
shares = assetsToSharesWithdraw(vault, sleIssuance, assets);
|
||||
}
|
||||
catch (std::overflow_error const&)
|
||||
else if (amount.asset() == share)
|
||||
{
|
||||
// It's easy to hit this exception from Number with large enough Scale
|
||||
// so we avoid spamming the log and only use debug here.
|
||||
JLOG(j_.debug()) //
|
||||
<< "VaultWithdraw: overflow error with"
|
||||
<< " scale=" << (int)vault->at(sfScale).value() //
|
||||
<< ", assetsTotal=" << vault->at(sfAssetsTotal).value()
|
||||
<< ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount)
|
||||
<< ", amount=" << amount.value();
|
||||
return tecPATH_DRY;
|
||||
// Fixed shares, variable assets.
|
||||
shares = amount;
|
||||
assets = sharesToAssetsWithdraw(vault, sleIssuance, shares);
|
||||
}
|
||||
else
|
||||
return tefINTERNAL; // LCOV_EXCL_LINE
|
||||
|
||||
if (accountHolds(
|
||||
view(),
|
||||
@@ -250,72 +217,31 @@ VaultWithdraw::doApply()
|
||||
share,
|
||||
FreezeHandling::fhZERO_IF_FROZEN,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_) < sharesRedeemed)
|
||||
j_) < shares)
|
||||
{
|
||||
JLOG(j_.debug()) << "VaultWithdraw: account doesn't hold enough shares";
|
||||
return tecINSUFFICIENT_FUNDS;
|
||||
}
|
||||
|
||||
auto assetsAvailable = vault->at(sfAssetsAvailable);
|
||||
auto assetsTotal = vault->at(sfAssetsTotal);
|
||||
[[maybe_unused]] auto const lossUnrealized = vault->at(sfLossUnrealized);
|
||||
XRPL_ASSERT(
|
||||
lossUnrealized <= (assetsTotal - assetsAvailable),
|
||||
"ripple::VaultWithdraw::doApply : loss and assets do balance");
|
||||
|
||||
// The vault must have enough assets on hand. The vault may hold assets
|
||||
// that it has already pledged. That is why we look at AssetAvailable
|
||||
// instead of the pseudo-account balance.
|
||||
if (*assetsAvailable < assetsWithdrawn)
|
||||
// The vault must have enough assets on hand. The vault may hold assets that
|
||||
// it has already pledged. That is why we look at AssetAvailable instead of
|
||||
// the pseudo-account balance.
|
||||
if (*vault->at(sfAssetsAvailable) < assets)
|
||||
{
|
||||
JLOG(j_.debug()) << "VaultWithdraw: vault doesn't hold enough assets";
|
||||
return tecINSUFFICIENT_FUNDS;
|
||||
}
|
||||
|
||||
assetsTotal -= assetsWithdrawn;
|
||||
assetsAvailable -= assetsWithdrawn;
|
||||
vault->at(sfAssetsTotal) -= assets;
|
||||
vault->at(sfAssetsAvailable) -= assets;
|
||||
view().update(vault);
|
||||
|
||||
auto const& vaultAccount = vault->at(sfAccount);
|
||||
// Transfer shares from depositor to vault.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
account_,
|
||||
vaultAccount,
|
||||
sharesRedeemed,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), account_, vaultAccount, shares, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
// Try to remove MPToken for shares, if the account balance is zero. Vault
|
||||
// pseudo-account will never set lsfMPTAuthorized, so we ignore flags.
|
||||
// Keep MPToken if holder is the vault owner.
|
||||
if (account_ != vault->at(sfOwner))
|
||||
{
|
||||
if (auto const ter = removeEmptyHolding(
|
||||
view(), account_, sharesRedeemed.asset(), j_);
|
||||
isTesSuccess(ter))
|
||||
{
|
||||
JLOG(j_.debug()) //
|
||||
<< "VaultWithdraw: removed empty MPToken for vault shares"
|
||||
<< " MPTID=" << to_string(mptIssuanceID) //
|
||||
<< " account=" << toBase58(account_);
|
||||
}
|
||||
else if (ter != tecHAS_OBLIGATIONS)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) //
|
||||
<< "VaultWithdraw: failed to remove MPToken for vault shares"
|
||||
<< " MPTID=" << to_string(mptIssuanceID) //
|
||||
<< " account=" << toBase58(account_) //
|
||||
<< " with result: " << transToken(ter);
|
||||
return ter;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
// else quietly ignore, account balance is not zero
|
||||
}
|
||||
|
||||
auto const dstAcct = [&]() -> AccountID {
|
||||
if (ctx_.tx.isFieldPresent(sfDestination))
|
||||
return ctx_.tx.getAccountID(sfDestination);
|
||||
@@ -323,21 +249,15 @@ VaultWithdraw::doApply()
|
||||
}();
|
||||
|
||||
// Transfer assets from vault to depositor or destination account.
|
||||
if (auto const ter = accountSend(
|
||||
view(),
|
||||
vaultAccount,
|
||||
dstAcct,
|
||||
assetsWithdrawn,
|
||||
j_,
|
||||
WaiveTransferFee::Yes);
|
||||
!isTesSuccess(ter))
|
||||
if (auto ter = accountSend(
|
||||
view(), vaultAccount, dstAcct, assets, j_, WaiveTransferFee::Yes))
|
||||
return ter;
|
||||
|
||||
// Sanity check
|
||||
if (accountHolds(
|
||||
view(),
|
||||
vaultAccount,
|
||||
assetsWithdrawn.asset(),
|
||||
assets.asset(),
|
||||
FreezeHandling::fhIGNORE_FREEZE,
|
||||
AuthHandling::ahIGNORE_AUTH,
|
||||
j_) < beast::zero)
|
||||
|
||||
@@ -34,21 +34,17 @@ JobQueue::Coro::Coro(
|
||||
: jq_(jq)
|
||||
, type_(type)
|
||||
, name_(name)
|
||||
, running_(false)
|
||||
, coro_(
|
||||
[this, fn = std::forward<F>(f)](
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type&
|
||||
do_yield) {
|
||||
yield_ = &do_yield;
|
||||
yield();
|
||||
// self makes Coro alive until this function returns
|
||||
std::shared_ptr<Coro> self;
|
||||
if (!shouldStop())
|
||||
{
|
||||
self = shared_from_this();
|
||||
fn(self);
|
||||
}
|
||||
state_ = CoroState::Finished;
|
||||
cv_.notify_all();
|
||||
fn(shared_from_this());
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
#endif
|
||||
},
|
||||
boost::coroutines::attributes(megabytes(1)))
|
||||
{
|
||||
@@ -56,36 +52,17 @@ JobQueue::Coro::Coro(
|
||||
|
||||
inline JobQueue::Coro::~Coro()
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
state_ != CoroState::Running,
|
||||
"ripple::JobQueue::Coro::~Coro : is not running");
|
||||
exiting_ = true;
|
||||
// Resume the coroutine so that it has a chance to clean things up
|
||||
if (state_ == CoroState::Suspended)
|
||||
{
|
||||
resume();
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
XRPL_ASSERT(
|
||||
state_ == CoroState::Finished,
|
||||
"ripple::JobQueue::Coro::~Coro : is finished");
|
||||
XRPL_ASSERT(finished_, "ripple::JobQueue::Coro::~Coro : is finished");
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::yield()
|
||||
JobQueue::Coro::yield() const
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
if (shouldStop())
|
||||
{
|
||||
return;
|
||||
}
|
||||
state_ = CoroState::Suspended;
|
||||
++jq_.nSuspend_;
|
||||
jq_.m_suspendedCoros[this] = weak_from_this();
|
||||
jq_.cv_.notify_all();
|
||||
}
|
||||
(*yield_)();
|
||||
}
|
||||
@@ -93,6 +70,11 @@ JobQueue::Coro::yield()
|
||||
inline bool
|
||||
JobQueue::Coro::post()
|
||||
{
|
||||
{
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = true;
|
||||
}
|
||||
|
||||
// sp keeps 'this' alive
|
||||
if (jq_.addJob(
|
||||
type_, name_, [this, sp = shared_from_this()]() { resume(); }))
|
||||
@@ -100,6 +82,9 @@ JobQueue::Coro::post()
|
||||
return true;
|
||||
}
|
||||
|
||||
// The coroutine will not run. Clean up running_.
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
return false;
|
||||
}
|
||||
@@ -109,17 +94,11 @@ JobQueue::Coro::resume()
|
||||
{
|
||||
{
|
||||
std::lock_guard lk(mutex_run_);
|
||||
if (state_ != CoroState::Suspended)
|
||||
{
|
||||
return;
|
||||
}
|
||||
state_ = CoroState::Running;
|
||||
running_ = true;
|
||||
}
|
||||
{
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
jq_.m_suspendedCoros.erase(this);
|
||||
--jq_.nSuspend_;
|
||||
jq_.cv_.notify_all();
|
||||
}
|
||||
auto saved = detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(&lvs_);
|
||||
@@ -130,6 +109,9 @@ JobQueue::Coro::resume()
|
||||
coro_();
|
||||
detail::getLocalValues().release();
|
||||
detail::getLocalValues().reset(saved);
|
||||
std::lock_guard lk(mutex_run_);
|
||||
running_ = false;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
inline bool
|
||||
@@ -138,11 +120,32 @@ JobQueue::Coro::runnable() const
|
||||
return static_cast<bool>(coro_);
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::expectEarlyExit()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
if (!finished_)
|
||||
#endif
|
||||
{
|
||||
// expectEarlyExit() must only ever be called from outside the
|
||||
// Coro's stack. It you're inside the stack you can simply return
|
||||
// and be done.
|
||||
//
|
||||
// That said, since we're outside the Coro's stack, we need to
|
||||
// decrement the nSuspend that the Coro's call to yield caused.
|
||||
std::lock_guard lock(jq_.m_mutex);
|
||||
--jq_.nSuspend_;
|
||||
#ifndef NDEBUG
|
||||
finished_ = true;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
JobQueue::Coro::join()
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(mutex_run_);
|
||||
cv_.wait(lk, [this]() { return state_ != CoroState::Running; });
|
||||
cv_.wait(lk, [this]() { return running_ == false; });
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -60,22 +60,20 @@ public:
|
||||
/** Coroutines must run to completion. */
|
||||
class Coro : public std::enable_shared_from_this<Coro>
|
||||
{
|
||||
friend class JobQueue;
|
||||
|
||||
private:
|
||||
enum class CoroState { None, Suspended, Running, Finished };
|
||||
|
||||
std::atomic_bool exiting_ = false;
|
||||
detail::LocalValues lvs_;
|
||||
JobQueue& jq_;
|
||||
JobType type_;
|
||||
std::string name_;
|
||||
std::atomic<CoroState> state_ = CoroState::None;
|
||||
bool running_;
|
||||
std::mutex mutex_;
|
||||
std::mutex mutex_run_;
|
||||
std::condition_variable cv_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::pull_type coro_;
|
||||
boost::coroutines::asymmetric_coroutine<void>::push_type* yield_;
|
||||
#ifndef NDEBUG
|
||||
bool finished_ = false;
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Private: Used in the implementation
|
||||
@@ -99,7 +97,7 @@ public:
|
||||
post.
|
||||
*/
|
||||
void
|
||||
yield();
|
||||
yield() const;
|
||||
|
||||
/** Schedule coroutine execution.
|
||||
Effects:
|
||||
@@ -133,13 +131,13 @@ public:
|
||||
bool
|
||||
runnable() const;
|
||||
|
||||
/** Once called, the Coro allows early exit without an assert. */
|
||||
void
|
||||
expectEarlyExit();
|
||||
|
||||
/** Waits until coroutine returns from the user function. */
|
||||
void
|
||||
join();
|
||||
|
||||
/** Returns true if the coroutine should stop executing */
|
||||
bool
|
||||
shouldStop() const;
|
||||
};
|
||||
|
||||
using JobFunction = std::function<void()>;
|
||||
@@ -169,10 +167,6 @@ public:
|
||||
bool
|
||||
addJob(JobType type, std::string const& name, JobHandler&& jobHandler)
|
||||
{
|
||||
if (!accepting_)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (auto optionalCountedJob =
|
||||
jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
|
||||
{
|
||||
@@ -255,7 +249,6 @@ private:
|
||||
std::uint64_t m_lastJob;
|
||||
std::set<Job> m_jobSet;
|
||||
JobCounter jobCounter_;
|
||||
std::atomic_bool accepting_ = true;
|
||||
std::atomic_bool stopping_{false};
|
||||
std::atomic_bool stopped_{false};
|
||||
JobDataMap m_jobData;
|
||||
@@ -267,8 +260,6 @@ private:
|
||||
// The number of suspended coroutines
|
||||
int nSuspend_ = 0;
|
||||
|
||||
std::map<void*, std::weak_ptr<Coro>> m_suspendedCoros;
|
||||
|
||||
Workers m_workers;
|
||||
|
||||
// Statistics tracking
|
||||
@@ -279,25 +270,6 @@ private:
|
||||
|
||||
std::condition_variable cv_;
|
||||
|
||||
void
|
||||
onStopResumeCoros(std::map<void*, std::weak_ptr<Coro>>& coros)
|
||||
{
|
||||
for (auto& [_, coro] : coros)
|
||||
{
|
||||
if (auto coroPtr = coro.lock())
|
||||
{
|
||||
if (auto optionalCountedJob =
|
||||
jobCounter_.wrap([=]() { coroPtr->resume(); }))
|
||||
{
|
||||
addRefCountedJob(
|
||||
coroPtr->type_,
|
||||
coroPtr->name_,
|
||||
std::move(*optionalCountedJob));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
collect();
|
||||
JobTypeData&
|
||||
@@ -440,10 +412,6 @@ template <class F>
|
||||
std::shared_ptr<JobQueue::Coro>
|
||||
JobQueue::postCoro(JobType t, std::string const& name, F&& f)
|
||||
{
|
||||
if (!accepting_)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
/* First param is a detail type to make construction private.
|
||||
Last param is the function the coroutine runs. Signature of
|
||||
void(std::shared_ptr<Coro>).
|
||||
@@ -454,6 +422,7 @@ JobQueue::postCoro(JobType t, std::string const& name, F&& f)
|
||||
{
|
||||
// The Coro was not successfully posted. Disable it so it's destructor
|
||||
// can run with no negative side effects. Then destroy it.
|
||||
coro->expectEarlyExit();
|
||||
coro.reset();
|
||||
}
|
||||
return coro;
|
||||
|
||||
@@ -26,12 +26,6 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
bool
|
||||
JobQueue::Coro::shouldStop() const
|
||||
{
|
||||
return jq_.stopping_ || jq_.stopped_ || !jq_.accepting_ || exiting_;
|
||||
}
|
||||
|
||||
JobQueue::JobQueue(
|
||||
int threadCount,
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
@@ -301,22 +295,6 @@ JobQueue::getJobTypeData(JobType type)
|
||||
void
|
||||
JobQueue::stop()
|
||||
{
|
||||
// Once we stop accepting new jobs, all running coroutines won't be able to
|
||||
// get suspended and yield() will return immediately, so we can safely
|
||||
// move m_suspendedCoros, and we can assume that no coroutine will be
|
||||
// suspended in the future.
|
||||
std::map<void*, std::weak_ptr<Coro>> suspendedCoros;
|
||||
{
|
||||
std::unique_lock lock(m_mutex);
|
||||
accepting_ = false;
|
||||
suspendedCoros = std::move(m_suspendedCoros);
|
||||
}
|
||||
if (!suspendedCoros.empty())
|
||||
{
|
||||
// We should resume the suspended coroutines so that the coroutines
|
||||
// get a chance to exit cleanly.
|
||||
onStopResumeCoros(suspendedCoros);
|
||||
}
|
||||
stopping_ = true;
|
||||
using namespace std::chrono_literals;
|
||||
jobCounter_.join("JobQueue", 1s, m_journal);
|
||||
@@ -327,9 +305,8 @@ JobQueue::stop()
|
||||
// `Job::doJob` and the return of `JobQueue::processTask`. That is why
|
||||
// we must wait on the condition variable to make these assertions.
|
||||
std::unique_lock<std::mutex> lock(m_mutex);
|
||||
cv_.wait(lock, [this] {
|
||||
return m_processCount == 0 && nSuspend_ == 0 && m_jobSet.empty();
|
||||
});
|
||||
cv_.wait(
|
||||
lock, [this] { return m_processCount == 0 && m_jobSet.empty(); });
|
||||
XRPL_ASSERT(
|
||||
m_processCount == 0,
|
||||
"ripple::JobQueue::stop : all processes completed");
|
||||
|
||||
@@ -912,41 +912,28 @@ deleteAMMTrustLine(
|
||||
std::optional<AccountID> const& ammAccountID,
|
||||
beast::Journal j);
|
||||
|
||||
// From the perspective of a vault, return the number of shares to give the
|
||||
// depositor when they deposit a fixed amount of assets. Since shares are MPT
|
||||
// this number is integral and always truncated in this calculation.
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
// From the perspective of a vault,
|
||||
// return the number of shares to give the depositor
|
||||
// when they deposit a fixed amount of assets.
|
||||
[[nodiscard]] STAmount
|
||||
assetsToSharesDeposit(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& assets);
|
||||
|
||||
// From the perspective of a vault, return the number of assets to take from
|
||||
// depositor when they receive a fixed amount of shares. Note, since shares are
|
||||
// MPT, they are always an integral number.
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
sharesToAssetsDeposit(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& shares);
|
||||
|
||||
enum class TruncateShares : bool { no = false, yes = true };
|
||||
|
||||
// From the perspective of a vault, return the number of shares to demand from
|
||||
// the depositor when they ask to withdraw a fixed amount of assets. Since
|
||||
// shares are MPT this number is integral, and it will be rounded to nearest
|
||||
// unless explicitly requested to be truncated instead.
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
// From the perspective of a vault,
|
||||
// return the number of shares to demand from the depositor
|
||||
// when they ask to withdraw a fixed amount of assets.
|
||||
[[nodiscard]] STAmount
|
||||
assetsToSharesWithdraw(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& assets,
|
||||
TruncateShares truncate = TruncateShares::no);
|
||||
STAmount const& assets);
|
||||
|
||||
// From the perspective of a vault, return the number of assets to give the
|
||||
// depositor when they redeem a fixed amount of shares. Note, since shares are
|
||||
// MPT, they are always an integral number.
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
// From the perspective of a vault,
|
||||
// return the number of assets to give the depositor
|
||||
// when they redeem a fixed amount of shares.
|
||||
[[nodiscard]] STAmount
|
||||
sharesToAssetsWithdraw(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
|
||||
@@ -2793,113 +2793,58 @@ rippleCredit(
|
||||
saAmount.asset().value());
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
[[nodiscard]] STAmount
|
||||
assetsToSharesDeposit(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& assets)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
!assets.negative(),
|
||||
"ripple::assetsToSharesDeposit : non-negative assets");
|
||||
XRPL_ASSERT(
|
||||
assets.asset() == vault->at(sfAsset),
|
||||
"ripple::assetsToSharesDeposit : assets and vault match");
|
||||
if (assets.negative() || assets.asset() != vault->at(sfAsset))
|
||||
return std::nullopt; // LCOV_EXCL_LINE
|
||||
|
||||
Number const assetTotal = vault->at(sfAssetsTotal);
|
||||
STAmount shares{vault->at(sfShareMPTID)};
|
||||
Number assetTotal = vault->at(sfAssetsTotal);
|
||||
STAmount shares{vault->at(sfShareMPTID), static_cast<Number>(assets)};
|
||||
if (assetTotal == 0)
|
||||
return STAmount{
|
||||
shares.asset(),
|
||||
Number(assets.mantissa(), assets.exponent() + vault->at(sfScale))
|
||||
.truncate()};
|
||||
|
||||
Number const shareTotal = issuance->at(sfOutstandingAmount);
|
||||
shares = (shareTotal * (assets / assetTotal)).truncate();
|
||||
return shares;
|
||||
Number shareTotal = issuance->at(sfOutstandingAmount);
|
||||
shares = shareTotal * (assets / assetTotal);
|
||||
return shares;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
sharesToAssetsDeposit(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& shares)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
!shares.negative(),
|
||||
"ripple::sharesToAssetsDeposit : non-negative shares");
|
||||
XRPL_ASSERT(
|
||||
shares.asset() == vault->at(sfShareMPTID),
|
||||
"ripple::sharesToAssetsDeposit : shares and vault match");
|
||||
if (shares.negative() || shares.asset() != vault->at(sfShareMPTID))
|
||||
return std::nullopt; // LCOV_EXCL_LINE
|
||||
|
||||
Number const assetTotal = vault->at(sfAssetsTotal);
|
||||
STAmount assets{vault->at(sfAsset)};
|
||||
if (assetTotal == 0)
|
||||
return STAmount{
|
||||
assets.asset(),
|
||||
shares.mantissa(),
|
||||
shares.exponent() - vault->at(sfScale),
|
||||
false};
|
||||
|
||||
Number const shareTotal = issuance->at(sfOutstandingAmount);
|
||||
assets = assetTotal * (shares / shareTotal);
|
||||
return assets;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
[[nodiscard]] STAmount
|
||||
assetsToSharesWithdraw(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& assets,
|
||||
TruncateShares truncate)
|
||||
STAmount const& assets)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
!assets.negative(),
|
||||
"ripple::assetsToSharesDeposit : non-negative assets");
|
||||
XRPL_ASSERT(
|
||||
assets.asset() == vault->at(sfAsset),
|
||||
"ripple::assetsToSharesWithdraw : assets and vault match");
|
||||
if (assets.negative() || assets.asset() != vault->at(sfAsset))
|
||||
return std::nullopt; // LCOV_EXCL_LINE
|
||||
|
||||
Number assetTotal = vault->at(sfAssetsTotal);
|
||||
assetTotal -= vault->at(sfLossUnrealized);
|
||||
STAmount shares{vault->at(sfShareMPTID)};
|
||||
if (assetTotal == 0)
|
||||
return shares;
|
||||
Number const shareTotal = issuance->at(sfOutstandingAmount);
|
||||
Number result = shareTotal * (assets / assetTotal);
|
||||
if (truncate == TruncateShares::yes)
|
||||
result = result.truncate();
|
||||
shares = result;
|
||||
Number shareTotal = issuance->at(sfOutstandingAmount);
|
||||
shares = shareTotal * (assets / assetTotal);
|
||||
return shares;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<STAmount>
|
||||
[[nodiscard]] STAmount
|
||||
sharesToAssetsWithdraw(
|
||||
std::shared_ptr<SLE const> const& vault,
|
||||
std::shared_ptr<SLE const> const& issuance,
|
||||
STAmount const& shares)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
!shares.negative(),
|
||||
"ripple::sharesToAssetsDeposit : non-negative shares");
|
||||
XRPL_ASSERT(
|
||||
shares.asset() == vault->at(sfShareMPTID),
|
||||
"ripple::sharesToAssetsWithdraw : shares and vault match");
|
||||
if (shares.negative() || shares.asset() != vault->at(sfShareMPTID))
|
||||
return std::nullopt; // LCOV_EXCL_LINE
|
||||
|
||||
Number assetTotal = vault->at(sfAssetsTotal);
|
||||
assetTotal -= vault->at(sfLossUnrealized);
|
||||
STAmount assets{vault->at(sfAsset)};
|
||||
if (assetTotal == 0)
|
||||
return assets;
|
||||
Number const shareTotal = issuance->at(sfOutstandingAmount);
|
||||
Number shareTotal = issuance->at(sfOutstandingAmount);
|
||||
assets = assetTotal * (shares / shareTotal);
|
||||
return assets;
|
||||
}
|
||||
|
||||
@@ -53,6 +53,14 @@ public:
|
||||
virtual std::string
|
||||
getName() = 0;
|
||||
|
||||
/** Get the block size for backends that support it
|
||||
*/
|
||||
virtual std::optional<std::size_t>
|
||||
getBlockSize() const
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/** Open the backend.
|
||||
@param createIfMissing Create the database files if necessary.
|
||||
This allows the caller to catch exceptions.
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <xrpld/nodestore/detail/codec.h>
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/beast/core/LexicalCast.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
@@ -52,6 +53,7 @@ public:
|
||||
size_t const keyBytes_;
|
||||
std::size_t const burstSize_;
|
||||
std::string const name_;
|
||||
std::size_t const blockSize_;
|
||||
nudb::store db_;
|
||||
std::atomic<bool> deletePath_;
|
||||
Scheduler& scheduler_;
|
||||
@@ -66,6 +68,7 @@ public:
|
||||
, keyBytes_(keyBytes)
|
||||
, burstSize_(burstSize)
|
||||
, name_(get(keyValues, "path"))
|
||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||
, deletePath_(false)
|
||||
, scheduler_(scheduler)
|
||||
{
|
||||
@@ -85,6 +88,7 @@ public:
|
||||
, keyBytes_(keyBytes)
|
||||
, burstSize_(burstSize)
|
||||
, name_(get(keyValues, "path"))
|
||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||
, db_(context)
|
||||
, deletePath_(false)
|
||||
, scheduler_(scheduler)
|
||||
@@ -114,6 +118,12 @@ public:
|
||||
return name_;
|
||||
}
|
||||
|
||||
std::optional<std::size_t>
|
||||
getBlockSize() const override
|
||||
{
|
||||
return blockSize_;
|
||||
}
|
||||
|
||||
void
|
||||
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
||||
override
|
||||
@@ -143,7 +153,7 @@ public:
|
||||
uid,
|
||||
salt,
|
||||
keyBytes_,
|
||||
nudb::block_size(kp),
|
||||
blockSize_,
|
||||
0.50,
|
||||
ec);
|
||||
if (ec == nudb::errc::file_exists)
|
||||
@@ -359,6 +369,56 @@ public:
|
||||
{
|
||||
return 3;
|
||||
}
|
||||
|
||||
private:
|
||||
static std::size_t
|
||||
parseBlockSize(
|
||||
std::string const& name,
|
||||
Section const& keyValues,
|
||||
beast::Journal journal)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
auto const folder = path(name);
|
||||
auto const kp = (folder / "nudb.key").string();
|
||||
|
||||
std::size_t const defaultSize =
|
||||
nudb::block_size(kp); // Default 4K from NuDB
|
||||
std::size_t blockSize = defaultSize;
|
||||
std::string blockSizeStr;
|
||||
|
||||
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
|
||||
{
|
||||
return blockSize; // Early return with default
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
std::size_t const parsedBlockSize =
|
||||
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
|
||||
|
||||
// Validate: must be power of 2 between 4K and 32K
|
||||
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
|
||||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
|
||||
{
|
||||
std::stringstream s;
|
||||
s << "Invalid nudb_block_size: " << parsedBlockSize
|
||||
<< ". Must be power of 2 between 4096 and 32768.";
|
||||
Throw<std::runtime_error>(s.str());
|
||||
}
|
||||
|
||||
JLOG(journal.info())
|
||||
<< "Using custom NuDB block size: " << parsedBlockSize
|
||||
<< " bytes";
|
||||
return parsedBlockSize;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::stringstream s;
|
||||
s << "Invalid nudb_block_size value: " << blockSizeStr
|
||||
<< ". Error: " << e.what();
|
||||
Throw<std::runtime_error>(s.str());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -128,17 +128,21 @@ doRipplePathFind(RPC::JsonContext& context)
|
||||
// May 2017
|
||||
jvResult = context.app.getPathRequests().makeLegacyPathRequest(
|
||||
request,
|
||||
[coro = context.coro]() {
|
||||
// Capturing the shared_ptr keeps the coroutine alive up
|
||||
[&context]() {
|
||||
// Copying the shared_ptr keeps the coroutine alive up
|
||||
// through the return. Otherwise the storage under the
|
||||
// captured reference could evaporate when we return from
|
||||
// coro->post().
|
||||
// When post() failed, we won't get a thread to let
|
||||
// the Coro finish. We should ignore the coroutine and
|
||||
// let it destruct, as the JobQueu has been signaled to
|
||||
// close, and resuming it manually messes up the internal
|
||||
// state in JobQueue.
|
||||
coro->post();
|
||||
// coroCopy->resume(). This is not strictly necessary, but
|
||||
// will make maintenance easier.
|
||||
std::shared_ptr<JobQueue::Coro> coroCopy{context.coro};
|
||||
if (!coroCopy->post())
|
||||
{
|
||||
// The post() failed, so we won't get a thread to let
|
||||
// the Coro finish. We'll call Coro::resume() so the
|
||||
// Coro can finish on our thread. Otherwise the
|
||||
// application will hang on shutdown.
|
||||
coroCopy->resume();
|
||||
}
|
||||
},
|
||||
context.consumer,
|
||||
lpLedger,
|
||||
@@ -146,14 +150,6 @@ doRipplePathFind(RPC::JsonContext& context)
|
||||
if (request)
|
||||
{
|
||||
context.coro->yield();
|
||||
// Each time after we resume from yield(), we should
|
||||
// check if cancellation has been requested. It would
|
||||
// be a lot more elegant if we replace boost coroutine
|
||||
// with c++ standard coroutine.
|
||||
if (context.coro->shouldStop())
|
||||
{
|
||||
return jvResult;
|
||||
}
|
||||
jvResult = request->doStatus(context.params);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user