mirror of
https://github.com/XRPLF/rippled.git
synced 2026-01-12 18:55:25 +00:00
Compare commits
11 Commits
pratik/Mig
...
bthomee/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad2fa4f79e | ||
|
|
44d21b8f6d | ||
|
|
3d1b3a49b3 | ||
|
|
0b87a26f04 | ||
|
|
0f23ad820c | ||
|
|
b7139da4d0 | ||
|
|
40198d9792 | ||
|
|
f059f0beda | ||
|
|
41c1be2bac | ||
|
|
f816ffa55f | ||
|
|
cf748702af |
9
.github/actions/build-deps/action.yml
vendored
9
.github/actions/build-deps/action.yml
vendored
@@ -4,9 +4,6 @@ description: "Install Conan dependencies, optionally forcing a rebuild of all de
|
||||
# Note that actions do not support 'type' and all inputs are strings, see
|
||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
|
||||
inputs:
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: true
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
@@ -28,17 +25,13 @@ runs:
|
||||
- name: Install Conan dependencies
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
BUILD_NPROC: ${{ inputs.build_nproc }}
|
||||
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
|
||||
run: |
|
||||
echo 'Installing dependencies.'
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
conan install \
|
||||
--output-folder . \
|
||||
--build="${BUILD_OPTION}" \
|
||||
--options:host='&:tests=True' \
|
||||
--options:host='&:xrpld=True' \
|
||||
@@ -46,4 +39,4 @@ runs:
|
||||
--conf:all tools.build:jobs=${BUILD_NPROC} \
|
||||
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
|
||||
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
|
||||
..
|
||||
.
|
||||
|
||||
4
.github/scripts/rename/README.md
vendored
4
.github/scripts/rename/README.md
vendored
@@ -31,6 +31,9 @@ run from the repository root.
|
||||
the `xrpld` binary.
|
||||
5. `.github/scripts/rename/namespace.sh`: This script will rename the C++
|
||||
namespaces from `ripple` to `xrpl`.
|
||||
6. `.github/scripts/rename/config.sh`: This script will rename the config from
|
||||
`rippled.cfg` to `xrpld.cfg`, and updating the code accordingly. The old
|
||||
filename will still be accepted.
|
||||
|
||||
You can run all these scripts from the repository root as follows:
|
||||
|
||||
@@ -40,4 +43,5 @@ You can run all these scripts from the repository root as follows:
|
||||
./.github/scripts/rename/cmake.sh .
|
||||
./.github/scripts/rename/binary.sh .
|
||||
./.github/scripts/rename/namespace.sh .
|
||||
./.github/scripts/rename/config.sh .
|
||||
```
|
||||
|
||||
72
.github/scripts/rename/config.sh
vendored
Executable file
72
.github/scripts/rename/config.sh
vendored
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the config from `rippled.cfg` to `xrpld.cfg`, and updates
|
||||
# the code accordingly. The old filename will still be accepted.
|
||||
# Usage: .github/scripts/rename/config.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Add the xrpld.cfg to the .gitignore.
|
||||
if ! grep -q 'xrpld.cfg' .gitignore; then
|
||||
${SED_COMMAND} -i '/rippled.cfg/a\
|
||||
/xrpld.cfg' .gitignore
|
||||
fi
|
||||
|
||||
# Rename the files.
|
||||
if [ -e rippled.cfg ]; then
|
||||
mv rippled.cfg xrpld.cfg
|
||||
fi
|
||||
if [ -e cfg/rippled-example.cfg ]; then
|
||||
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
DIRECTORIES=("cfg" "cmake" "include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/ripplevalidators/xrplvalidators/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's@ripple/@xrpld/@g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/Rippled/File/g' src/test/core/Config_test.cpp
|
||||
|
||||
|
||||
# Restore the old config file name in the code that maintains support for now.
|
||||
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/g' src/xrpld/core/detail/Config.cpp
|
||||
|
||||
# Restore an URL.
|
||||
${SED_COMMAND} -i 's/connect-your-xrpld-to-the-xrp-test-net.html/connect-your-rippled-to-the-xrp-test-net.html/g' cfg/xrpld-example.cfg
|
||||
|
||||
popd
|
||||
echo "Renaming complete."
|
||||
4
.github/workflows/publish-docs.yml
vendored
4
.github/workflows/publish-docs.yml
vendored
@@ -22,7 +22,7 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
BUILD_DIR: .build
|
||||
BUILD_DIR: build
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
jobs:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
uses: XRPLF/actions/get-nproc@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
35
.github/workflows/reusable-build-test-config.yml
vendored
35
.github/workflows/reusable-build-test-config.yml
vendored
@@ -3,11 +3,6 @@ name: Build and test configuration
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: true
|
||||
type: string
|
||||
|
||||
build_only:
|
||||
description: 'Whether to only build or to build and test the code ("true", "false").'
|
||||
required: true
|
||||
@@ -59,6 +54,11 @@ defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
# Conan installs the generators in the build/generators directory, see the
|
||||
# layout() method in conanfile.py. We then run CMake from the build directory.
|
||||
BUILD_DIR: build
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
name: ${{ inputs.config_name }}
|
||||
@@ -71,13 +71,13 @@ jobs:
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
|
||||
uses: XRPLF/actions/cleanup-workspace@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
uses: XRPLF/actions/get-nproc@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
@@ -96,7 +96,6 @@ jobs:
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: ${{ inputs.build_dir }}
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
@@ -104,7 +103,7 @@ jobs:
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
|
||||
- name: Configure CMake
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CMAKE_ARGS: ${{ inputs.cmake_args }}
|
||||
@@ -117,7 +116,7 @@ jobs:
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -132,8 +131,6 @@ jobs:
|
||||
- name: Upload the binary (Linux)
|
||||
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
with:
|
||||
name: xrpld-${{ inputs.config_name }}
|
||||
path: ${{ env.BUILD_DIR }}/xrpld
|
||||
@@ -142,7 +139,7 @@ jobs:
|
||||
|
||||
- name: Check linking (Linux)
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
run: |
|
||||
ldd ./xrpld
|
||||
if [ "$(ldd ./xrpld | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
||||
@@ -154,13 +151,13 @@ jobs:
|
||||
|
||||
- name: Verify presence of instrumentation (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.ENABLED_VOIDSTAR == 'true' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
run: |
|
||||
./xrpld --version | grep libvoidstar
|
||||
|
||||
- name: Run the separate tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
# Windows locks some of the build files while running tests, and parallel jobs can collide
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -173,7 +170,7 @@ jobs:
|
||||
|
||||
- name: Run the embedded tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', inputs.build_dir, inputs.build_type) || inputs.build_dir }}
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
@@ -189,7 +186,7 @@ jobs:
|
||||
|
||||
- name: Prepare coverage report
|
||||
if: ${{ !inputs.build_only && env.ENABLED_COVERAGE == 'true' }}
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
@@ -207,7 +204,7 @@ jobs:
|
||||
disable_search: true
|
||||
disable_telem: true
|
||||
fail_ci_if_error: true
|
||||
files: ${{ inputs.build_dir }}/coverage.xml
|
||||
files: ${{ env.BUILD_DIR }}/coverage.xml
|
||||
plugins: noop
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
6
.github/workflows/reusable-build-test.yml
vendored
6
.github/workflows/reusable-build-test.yml
vendored
@@ -8,11 +8,6 @@ name: Build and test
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_dir:
|
||||
description: "The directory where to build."
|
||||
required: false
|
||||
type: string
|
||||
default: ".build"
|
||||
os:
|
||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
||||
required: true
|
||||
@@ -46,7 +41,6 @@ jobs:
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
with:
|
||||
build_dir: ${{ inputs.build_dir }}
|
||||
build_only: ${{ matrix.build_only }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
cmake_args: ${{ matrix.cmake_args }}
|
||||
|
||||
2
.github/workflows/reusable-check-rename.yml
vendored
2
.github/workflows/reusable-check-rename.yml
vendored
@@ -29,6 +29,8 @@ jobs:
|
||||
run: .github/scripts/rename/binary.sh .
|
||||
- name: Check namespaces
|
||||
run: .github/scripts/rename/namespace.sh .
|
||||
- name: Check config name
|
||||
run: .github/scripts/rename/config.sh .
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
|
||||
7
.github/workflows/upload-conan-deps.yml
vendored
7
.github/workflows/upload-conan-deps.yml
vendored
@@ -64,13 +64,13 @@ jobs:
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@01b244d2718865d427b499822fbd3f15e7197fcc
|
||||
uses: XRPLF/actions/cleanup-workspace@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
uses: XRPLF/actions/get-nproc@2ece4ec6ab7de266859a6f053571425b2bd684b6
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
@@ -92,7 +92,6 @@ jobs:
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: .build
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,6 +35,7 @@ gmon.out
|
||||
|
||||
# Customized configs.
|
||||
/rippled.cfg
|
||||
/xrpld.cfg
|
||||
/validators.txt
|
||||
|
||||
# Locally patched Conan recipes
|
||||
|
||||
@@ -85,34 +85,18 @@ endif()
|
||||
###
|
||||
|
||||
include(deps/Boost)
|
||||
find_package(OpenSSL 1.1.1 REQUIRED)
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
|
||||
add_subdirectory(external/antithesis-sdk)
|
||||
find_package(gRPC REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
# Target names with :: are not allowed in a generator expression.
|
||||
# We need to pull the include directories and imported location properties
|
||||
# from separate targets.
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
|
||||
find_package(date REQUIRED)
|
||||
find_package(ed25519 REQUIRED)
|
||||
find_package(gRPC REQUIRED)
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
find_package(secp256k1 REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
|
||||
target_link_libraries(xrpl_libs INTERFACE
|
||||
@@ -125,6 +109,15 @@ target_link_libraries(xrpl_libs INTERFACE
|
||||
SQLite::SQLite3
|
||||
)
|
||||
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
|
||||
# Work around changes to Conan recipe for now.
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
# Default validators.txt
|
||||
#
|
||||
# This file is located in the same folder as your rippled.cfg file
|
||||
# This file is located in the same folder as your xrpld.cfg file
|
||||
# and defines which validators your server trusts not to collude.
|
||||
#
|
||||
# This file is UTF-8 with DOS, UNIX, or Mac style line endings.
|
||||
|
||||
@@ -29,18 +29,18 @@
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# This file documents and provides examples of all rippled server process
|
||||
# configuration options. When the rippled server instance is launched, it
|
||||
# This file documents and provides examples of all xrpld server process
|
||||
# configuration options. When the xrpld server instance is launched, it
|
||||
# looks for a file with the following name:
|
||||
#
|
||||
# rippled.cfg
|
||||
# xrpld.cfg
|
||||
#
|
||||
# For more information on where the rippled server instance searches for the
|
||||
# For more information on where the xrpld server instance searches for the
|
||||
# file, visit:
|
||||
#
|
||||
# https://xrpl.org/commandline-usage.html#generic-options
|
||||
#
|
||||
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# This file should be named xrpld.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# or Mac style end of lines. Blank lines and lines beginning with '#' are
|
||||
# ignored. Undefined sections are reserved. No escapes are currently defined.
|
||||
#
|
||||
@@ -89,8 +89,8 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# rippled offers various server protocols to clients making inbound
|
||||
# connections. The listening ports rippled uses are "universal" ports
|
||||
# xrpld offers various server protocols to clients making inbound
|
||||
# connections. The listening ports xrpld uses are "universal" ports
|
||||
# which may be configured to handshake in one or more of the available
|
||||
# supported protocols. These universal ports simplify administration:
|
||||
# A single open port can be used for multiple protocols.
|
||||
@@ -103,7 +103,7 @@
|
||||
#
|
||||
# A list of port names and key/value pairs. A port name must start with a
|
||||
# letter and contain only letters and numbers. The name is not case-sensitive.
|
||||
# For each name in this list, rippled will look for a configuration file
|
||||
# For each name in this list, xrpld will look for a configuration file
|
||||
# section with the same name and use it to create a listening port. The
|
||||
# name is informational only; the choice of name does not affect the function
|
||||
# of the listening port.
|
||||
@@ -134,7 +134,7 @@
|
||||
# ip = 127.0.0.1
|
||||
# protocol = http
|
||||
#
|
||||
# When rippled is used as a command line client (for example, issuing a
|
||||
# When xrpld is used as a command line client (for example, issuing a
|
||||
# server stop command), the first port advertising the http or https
|
||||
# protocol will be used to make the connection.
|
||||
#
|
||||
@@ -175,7 +175,7 @@
|
||||
# same time. It is possible have both Websockets and Secure Websockets
|
||||
# together in one port.
|
||||
#
|
||||
# NOTE If no ports support the peer protocol, rippled cannot
|
||||
# NOTE If no ports support the peer protocol, xrpld cannot
|
||||
# receive incoming peer connections or become a superpeer.
|
||||
#
|
||||
# limit = <number>
|
||||
@@ -194,7 +194,7 @@
|
||||
# required. IP address restrictions, if any, will be checked in addition
|
||||
# to the credentials specified here.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xrpld will supply these credentials
|
||||
# using HTTP's Basic Authentication headers when making outbound HTTP/S
|
||||
# requests.
|
||||
#
|
||||
@@ -237,7 +237,7 @@
|
||||
# WS, or WSS protocol interfaces. If administrative commands are
|
||||
# disabled for a port, these credentials have no effect.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xrpld will supply these credentials
|
||||
# in the submitted JSON for any administrative command requests when
|
||||
# invoking JSON-RPC commands on remote servers.
|
||||
#
|
||||
@@ -258,7 +258,7 @@
|
||||
# resource controls will default to those for non-administrative users.
|
||||
#
|
||||
# The secure_gateway IP addresses are intended to represent
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# proxies. Since xrpld trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# If some IP addresses are included for both "admin" and
|
||||
@@ -272,7 +272,7 @@
|
||||
# Use the specified files when configuring SSL on the port.
|
||||
#
|
||||
# NOTE If no files are specified and secure protocols are selected,
|
||||
# rippled will generate an internal self-signed certificate.
|
||||
# xrpld will generate an internal self-signed certificate.
|
||||
#
|
||||
# The files have these meanings:
|
||||
#
|
||||
@@ -297,12 +297,12 @@
|
||||
# Control the ciphers which the server will support over SSL on the port,
|
||||
# specified using the OpenSSL "cipher list format".
|
||||
#
|
||||
# NOTE If unspecified, rippled will automatically configure a modern
|
||||
# NOTE If unspecified, xrpld will automatically configure a modern
|
||||
# cipher suite. This default suite should be widely supported.
|
||||
#
|
||||
# You should not modify this string unless you have a specific
|
||||
# reason and cryptographic expertise. Incorrect modification may
|
||||
# keep rippled from connecting to other instances of rippled or
|
||||
# keep xrpld from connecting to other instances of xrpld or
|
||||
# prevent RPC and WebSocket clients from connecting.
|
||||
#
|
||||
# send_queue_limit = [1..65535]
|
||||
@@ -382,7 +382,7 @@
|
||||
#-----------------
|
||||
#
|
||||
# These settings control security and access attributes of the Peer to Peer
|
||||
# server section of the rippled process. Peer Protocol implements the
|
||||
# server section of the xrpld process. Peer Protocol implements the
|
||||
# Ripple Payment protocol. It is over peer connections that transactions
|
||||
# and validations are passed from to machine to machine, to determine the
|
||||
# contents of validated ledgers.
|
||||
@@ -396,7 +396,7 @@
|
||||
# true - enables compression
|
||||
# false - disables compression [default].
|
||||
#
|
||||
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
|
||||
# The xrpld server can save bandwidth by compressing its peer-to-peer communications,
|
||||
# at a cost of greater CPU usage. If you enable link compression,
|
||||
# the server automatically compresses communications with peer servers
|
||||
# that also have link compression enabled.
|
||||
@@ -432,7 +432,7 @@
|
||||
#
|
||||
# [ips_fixed]
|
||||
#
|
||||
# List of IP addresses or hostnames to which rippled should always attempt to
|
||||
# List of IP addresses or hostnames to which xrpld should always attempt to
|
||||
# maintain peer connections with. This is useful for manually forming private
|
||||
# networks, for example to configure a validation server that connects to the
|
||||
# Ripple network through a public-facing server, or for building a set
|
||||
@@ -573,7 +573,7 @@
|
||||
#
|
||||
# minimum_txn_in_ledger_standalone = <number>
|
||||
#
|
||||
# Like minimum_txn_in_ledger when rippled is running in standalone
|
||||
# Like minimum_txn_in_ledger when xrpld is running in standalone
|
||||
# mode. Default: 1000.
|
||||
#
|
||||
# target_txn_in_ledger = <number>
|
||||
@@ -710,7 +710,7 @@
|
||||
#
|
||||
# [validator_token]
|
||||
#
|
||||
# This is an alternative to [validation_seed] that allows rippled to perform
|
||||
# This is an alternative to [validation_seed] that allows xrpld to perform
|
||||
# validation without having to store the validator keys on the network
|
||||
# connected server. The field should contain a single token in the form of a
|
||||
# base64-encoded blob.
|
||||
@@ -745,7 +745,7 @@
|
||||
#
|
||||
# Specify the file by its name or path.
|
||||
# Unless an absolute path is specified, it will be considered relative to
|
||||
# the folder in which the rippled.cfg file is located.
|
||||
# the folder in which the xrpld.cfg file is located.
|
||||
#
|
||||
# Examples:
|
||||
# /home/ripple/validators.txt
|
||||
@@ -840,7 +840,7 @@
|
||||
#
|
||||
# 0: Disable the ledger replay feature [default]
|
||||
# 1: Enable the ledger replay feature. With this feature enabled, when
|
||||
# acquiring a ledger from the network, a rippled node only downloads
|
||||
# acquiring a ledger from the network, a xrpld node only downloads
|
||||
# the ledger header and the transactions instead of the whole ledger.
|
||||
# And the ledger is built by applying the transactions to the parent
|
||||
# ledger.
|
||||
@@ -851,7 +851,7 @@
|
||||
#
|
||||
#----------------
|
||||
#
|
||||
# The rippled server instance uses HTTPS GET requests in a variety of
|
||||
# The xrpld server instance uses HTTPS GET requests in a variety of
|
||||
# circumstances, including but not limited to contacting trusted domains to
|
||||
# fetch information such as mapping an email address to a Ripple Payment
|
||||
# Network address.
|
||||
@@ -891,7 +891,7 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled creates 4 SQLite database to hold bookkeeping information
|
||||
# xrpld creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers.
|
||||
@@ -902,7 +902,7 @@
|
||||
# the performance of the server.
|
||||
#
|
||||
# Partial pathnames will be considered relative to the location of
|
||||
# the rippled.cfg file.
|
||||
# the xrpld.cfg file.
|
||||
#
|
||||
# [node_db] Settings for the Node Database (required)
|
||||
#
|
||||
@@ -920,11 +920,11 @@
|
||||
# type = NuDB
|
||||
#
|
||||
# NuDB is a high-performance database written by Ripple Labs and optimized
|
||||
# for rippled and solid-state drives.
|
||||
# for xrpld and solid-state drives.
|
||||
#
|
||||
# NuDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete may be selected, but is not required. NuDB is
|
||||
# available on all platforms that rippled runs on.
|
||||
# available on all platforms that xrpld runs on.
|
||||
#
|
||||
# type = RocksDB
|
||||
#
|
||||
@@ -1049,7 +1049,7 @@
|
||||
#
|
||||
# recovery_wait_seconds
|
||||
# The online delete process checks periodically
|
||||
# that rippled is still in sync with the network,
|
||||
# that xrpld is still in sync with the network,
|
||||
# and that the validated ledger is less than
|
||||
# 'age_threshold_seconds' old. If not, then continue
|
||||
# sleeping for this number of seconds and
|
||||
@@ -1069,8 +1069,8 @@
|
||||
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
|
||||
# the 'database_path' location. If you omit this configuration setting,
|
||||
# the server creates a directory called "db" located in the same place as
|
||||
# your rippled.cfg file.
|
||||
# Partial pathnames are relative to the location of the rippled executable.
|
||||
# your xrpld.cfg file.
|
||||
# Partial pathnames are relative to the location of the xrpld executable.
|
||||
#
|
||||
# [sqlite] Tuning settings for the SQLite databases (optional)
|
||||
#
|
||||
@@ -1120,7 +1120,7 @@
|
||||
# The default is "wal", which uses a write-ahead
|
||||
# log to implement database transactions.
|
||||
# Alternately, "memory" saves disk I/O, but if
|
||||
# rippled crashes during a transaction, the
|
||||
# xrpld crashes during a transaction, the
|
||||
# database is likely to be corrupted.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# for more details about the available options.
|
||||
@@ -1130,7 +1130,7 @@
|
||||
# synchronous Valid values: off, normal, full, extra
|
||||
# The default is "normal", which works well with
|
||||
# the "wal" journal mode. Alternatively, "off"
|
||||
# allows rippled to continue as soon as data is
|
||||
# allows xrpld to continue as soon as data is
|
||||
# passed to the OS, which can significantly
|
||||
# increase speed, but risks data corruption if
|
||||
# the host computer crashes before writing that
|
||||
@@ -1144,7 +1144,7 @@
|
||||
# The default is "file", which will use files
|
||||
# for temporary database tables and indices.
|
||||
# Alternatively, "memory" may save I/O, but
|
||||
# rippled does not currently use many, if any,
|
||||
# xrpld does not currently use many, if any,
|
||||
# of these temporary objects.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_temp_store
|
||||
# for more details about the available options.
|
||||
@@ -1173,7 +1173,7 @@
|
||||
#
|
||||
# These settings are designed to help server administrators diagnose
|
||||
# problems, and obtain detailed information about the activities being
|
||||
# performed by the rippled process.
|
||||
# performed by the xrpld process.
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -1190,7 +1190,7 @@
|
||||
#
|
||||
# Configuration parameters for the Beast. Insight stats collection module.
|
||||
#
|
||||
# Insight is a module that collects information from the areas of rippled
|
||||
# Insight is a module that collects information from the areas of xrpld
|
||||
# that have instrumentation. The configuration parameters control where the
|
||||
# collection metrics are sent. The parameters are expressed as key = value
|
||||
# pairs with no white space. The main parameter is the choice of server:
|
||||
@@ -1199,7 +1199,7 @@
|
||||
#
|
||||
# Choice of server to send metrics to. Currently the only choice is
|
||||
# "statsd" which sends UDP packets to a StatsD daemon, which must be
|
||||
# running while rippled is running. More information on StatsD is
|
||||
# running while xrpld is running. More information on StatsD is
|
||||
# available here:
|
||||
# https://github.com/b/statsd_spec
|
||||
#
|
||||
@@ -1209,7 +1209,7 @@
|
||||
# in the format, n.n.n.n:port.
|
||||
#
|
||||
# "prefix" A string prepended to each collected metric. This is used
|
||||
# to distinguish between different running instances of rippled.
|
||||
# to distinguish between different running instances of xrpld.
|
||||
#
|
||||
# If this section is missing, or the server type is unspecified or unknown,
|
||||
# statistics are not collected or reported.
|
||||
@@ -1236,7 +1236,7 @@
|
||||
#
|
||||
# Example:
|
||||
# [perf]
|
||||
# perf_log=/var/log/rippled/perf.log
|
||||
# perf_log=/var/log/xrpld/perf.log
|
||||
# log_interval=2
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -1246,7 +1246,7 @@
|
||||
#----------
|
||||
#
|
||||
# The vote settings configure settings for the entire Ripple network.
|
||||
# While a single instance of rippled cannot unilaterally enforce network-wide
|
||||
# While a single instance of xrpld cannot unilaterally enforce network-wide
|
||||
# settings, these choices become part of the instance's vote during the
|
||||
# consensus process for each voting ledger.
|
||||
#
|
||||
@@ -1260,7 +1260,7 @@
|
||||
# The reference transaction is the simplest form of transaction.
|
||||
# It represents an XRP payment between two parties.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1272,7 +1272,7 @@
|
||||
# account's XRP balance that is at or below the reserve may only be
|
||||
# spent on transaction fees, and not transferred out of the account.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1284,7 +1284,7 @@
|
||||
# each ledger item owned by the account. Ledger items an account may
|
||||
# own include trust lines, open orders, and tickets.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xrpld will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1326,7 +1326,7 @@
|
||||
# tool instead.
|
||||
#
|
||||
# This flag has no effect on the "sign" and "sign_for" command line options
|
||||
# that rippled makes available.
|
||||
# that xrpld makes available.
|
||||
#
|
||||
# The default value of this field is "false"
|
||||
#
|
||||
@@ -1405,7 +1405,7 @@
|
||||
#--------------------
|
||||
#
|
||||
# Administrators can use these values as a starting point for configuring
|
||||
# their instance of rippled, but each value should be checked to make sure
|
||||
# their instance of xrpld, but each value should be checked to make sure
|
||||
# it meets the business requirements for the organization.
|
||||
#
|
||||
# Server
|
||||
@@ -1415,7 +1415,7 @@
|
||||
# "peer"
|
||||
#
|
||||
# Peer protocol open to everyone. This is required to accept
|
||||
# incoming rippled connections. This does not affect automatic
|
||||
# incoming xrpld connections. This does not affect automatic
|
||||
# or manual outgoing Peer protocol connections.
|
||||
#
|
||||
# "rpc"
|
||||
@@ -1432,7 +1432,7 @@
|
||||
#
|
||||
# ETL commands for Clio. We recommend setting secure_gateway
|
||||
# in this section to a comma-separated list of the addresses
|
||||
# of your Clio servers, in order to bypass rippled's rate limiting.
|
||||
# of your Clio servers, in order to bypass xrpld's rate limiting.
|
||||
#
|
||||
# This port is commented out but can be enabled by removing
|
||||
# the '#' from each corresponding line including the entry under [server]
|
||||
@@ -1449,8 +1449,8 @@
|
||||
# NOTE
|
||||
#
|
||||
# To accept connections on well known ports such as 80 (HTTP) or
|
||||
# 443 (HTTPS), most operating systems will require rippled to
|
||||
# run with administrator privileges, or else rippled will not start.
|
||||
# 443 (HTTPS), most operating systems will require xrpld to
|
||||
# run with administrator privileges, or else xrpld will not start.
|
||||
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
@@ -1496,7 +1496,7 @@ secure_gateway = 127.0.0.1
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# This is primary persistent datastore for xrpld. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found at https://xrpl.org/capacity-planning.html#node-db-type
|
||||
# type=NuDB is recommended for non-validators with fast SSDs. Validators or
|
||||
@@ -1511,19 +1511,19 @@ secure_gateway = 127.0.0.1
|
||||
# deletion.
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled/db/nudb
|
||||
path=/var/lib/xrpld/db/nudb
|
||||
nudb_block_size=4096
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
[database_path]
|
||||
/var/lib/rippled/db
|
||||
/var/lib/xrpld/db
|
||||
|
||||
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/var/log/rippled/debug.log
|
||||
/var/log/xrpld/debug.log
|
||||
|
||||
# To use the XRP test network
|
||||
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
@@ -1533,7 +1533,7 @@ advisory_delete=0
|
||||
|
||||
# File containing trusted validator keys or validator list publishers.
|
||||
# Unless an absolute path is specified, it will be considered relative to the
|
||||
# folder in which the rippled.cfg file is located.
|
||||
# folder in which the xrpld.cfg file is located.
|
||||
[validators_file]
|
||||
validators.txt
|
||||
|
||||
@@ -62,7 +62,7 @@ if (is_root_project AND TARGET xrpld)
|
||||
message (\"-- Skipping : \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
|
||||
endif ()
|
||||
endmacro()
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/rippled-example.cfg\" etc rippled.cfg)
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/xrpld-example.cfg\" etc xrpld.cfg)
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/validators-example.txt\" etc validators.txt)
|
||||
")
|
||||
install(CODE "
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
find_package(Boost 1.82 REQUIRED
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
|
||||
@@ -182,12 +182,10 @@ class Xrpl(ConanFile):
|
||||
libxrpl.libs = [
|
||||
"xrpl",
|
||||
"xrpl.libpb",
|
||||
"ed25519",
|
||||
"secp256k1",
|
||||
]
|
||||
# TODO: Fix the protobufs to include each other relative to
|
||||
# `include/`, not `include/ripple/proto/`.
|
||||
libxrpl.includedirs = ["include", "include/ripple/proto"]
|
||||
# `include/`, not `include/xrpl/proto/`.
|
||||
libxrpl.includedirs = ["include", "include/xrpl/proto"]
|
||||
libxrpl.requires = [
|
||||
"boost::headers",
|
||||
"boost::chrono",
|
||||
|
||||
@@ -40,7 +40,7 @@ public:
|
||||
using microseconds = std::chrono::microseconds;
|
||||
|
||||
/**
|
||||
* Configuration from [perf] section of rippled.cfg.
|
||||
* Configuration from [perf] section of xrpld.cfg.
|
||||
*/
|
||||
struct Setup
|
||||
{
|
||||
|
||||
@@ -1,445 +0,0 @@
|
||||
#ifndef XRPL_JSON_OBJECT_H_INCLUDED
|
||||
#define XRPL_JSON_OBJECT_H_INCLUDED
|
||||
|
||||
#include <xrpl/json/Writer.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Json {
|
||||
|
||||
/**
|
||||
Collection is a base class for Array and Object, classes which provide the
|
||||
facade of JSON collections for the O(1) JSON writer, while still using no
|
||||
heap memory and only a very small amount of stack.
|
||||
|
||||
From http://json.org, JSON has two types of collection: array, and object.
|
||||
Everything else is a *scalar* - a number, a string, a boolean, the special
|
||||
value null, or a legacy Json::Value.
|
||||
|
||||
Collections must write JSON "as-it-goes" in order to get the strong
|
||||
performance guarantees. This puts restrictions upon API users:
|
||||
|
||||
1. Only one collection can be open for change at any one time.
|
||||
|
||||
This condition is enforced automatically and a std::logic_error thrown if
|
||||
it is violated.
|
||||
|
||||
2. A tag may only be used once in an Object.
|
||||
|
||||
Some objects have many tags, so this condition might be a little
|
||||
expensive. Enforcement of this condition is turned on in debug builds and
|
||||
a std::logic_error is thrown when the tag is added for a second time.
|
||||
|
||||
Code samples:
|
||||
|
||||
Writer writer;
|
||||
|
||||
// An empty object.
|
||||
{
|
||||
Object::Root (writer);
|
||||
}
|
||||
// Outputs {}
|
||||
|
||||
// An object with one scalar value.
|
||||
{
|
||||
Object::Root root (writer);
|
||||
write["hello"] = "world";
|
||||
}
|
||||
// Outputs {"hello":"world"}
|
||||
|
||||
// Same, using chaining.
|
||||
{
|
||||
Object::Root (writer)["hello"] = "world";
|
||||
}
|
||||
// Output is the same.
|
||||
|
||||
// Add several scalars, with chaining.
|
||||
{
|
||||
Object::Root (writer)
|
||||
.set ("hello", "world")
|
||||
.set ("flag", false)
|
||||
.set ("x", 42);
|
||||
}
|
||||
// Outputs {"hello":"world","flag":false,"x":42}
|
||||
|
||||
// Add an array.
|
||||
{
|
||||
Object::Root root (writer);
|
||||
{
|
||||
auto array = root.setArray ("hands");
|
||||
array.append ("left");
|
||||
array.append ("right");
|
||||
}
|
||||
}
|
||||
// Outputs {"hands":["left", "right"]}
|
||||
|
||||
// Same, using chaining.
|
||||
{
|
||||
Object::Root (writer)
|
||||
.setArray ("hands")
|
||||
.append ("left")
|
||||
.append ("right");
|
||||
}
|
||||
// Output is the same.
|
||||
|
||||
// Add an object.
|
||||
{
|
||||
Object::Root root (writer);
|
||||
{
|
||||
auto object = root.setObject ("hands");
|
||||
object["left"] = false;
|
||||
object["right"] = true;
|
||||
}
|
||||
}
|
||||
// Outputs {"hands":{"left":false,"right":true}}
|
||||
|
||||
// Same, using chaining.
|
||||
{
|
||||
Object::Root (writer)
|
||||
.setObject ("hands")
|
||||
.set ("left", false)
|
||||
.set ("right", true);
|
||||
}
|
||||
}
|
||||
// Outputs {"hands":{"left":false,"right":true}}
|
||||
|
||||
|
||||
Typical ways to make mistakes and get a std::logic_error:
|
||||
|
||||
Writer writer;
|
||||
Object::Root root (writer);
|
||||
|
||||
// Repeat a tag.
|
||||
{
|
||||
root ["hello"] = "world";
|
||||
root ["hello"] = "there"; // THROWS! in a debug build.
|
||||
}
|
||||
|
||||
// Open a subcollection, then set something else.
|
||||
{
|
||||
auto object = root.setObject ("foo");
|
||||
root ["hello"] = "world"; // THROWS!
|
||||
}
|
||||
|
||||
// Open two subcollections at a time.
|
||||
{
|
||||
auto object = root.setObject ("foo");
|
||||
auto array = root.setArray ("bar"); // THROWS!!
|
||||
}
|
||||
|
||||
For more examples, check the unit tests.
|
||||
*/
|
||||
|
||||
class Collection
|
||||
{
|
||||
public:
|
||||
Collection(Collection&& c) noexcept;
|
||||
Collection&
|
||||
operator=(Collection&& c) noexcept;
|
||||
Collection() = delete;
|
||||
|
||||
~Collection();
|
||||
|
||||
protected:
|
||||
// A null parent means "no parent at all".
|
||||
// Writers cannot be null.
|
||||
Collection(Collection* parent, Writer*);
|
||||
void
|
||||
checkWritable(std::string const& label);
|
||||
|
||||
Collection* parent_;
|
||||
Writer* writer_;
|
||||
bool enabled_;
|
||||
};
|
||||
|
||||
class Array;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Represents a JSON object being written to a Writer. */
|
||||
class Object : protected Collection
|
||||
{
|
||||
public:
|
||||
/** Object::Root is the only Collection that has a public constructor. */
|
||||
class Root;
|
||||
|
||||
/** Set a scalar value in the Object for a key.
|
||||
|
||||
A JSON scalar is a single value - a number, string, boolean, nullptr or
|
||||
a Json::Value.
|
||||
|
||||
`set()` throws an exception if this object is disabled (which means that
|
||||
one of its children is enabled).
|
||||
|
||||
In a debug build, `set()` also throws an exception if the key has
|
||||
already been set() before.
|
||||
|
||||
An operator[] is provided to allow writing `object["key"] = scalar;`.
|
||||
*/
|
||||
template <typename Scalar>
|
||||
void
|
||||
set(std::string const& key, Scalar const&);
|
||||
|
||||
void
|
||||
set(std::string const& key, Json::Value const&);
|
||||
|
||||
// Detail class and method used to implement operator[].
|
||||
class Proxy;
|
||||
|
||||
Proxy
|
||||
operator[](std::string const& key);
|
||||
Proxy
|
||||
operator[](Json::StaticString const& key);
|
||||
|
||||
/** Make a new Object at a key and return it.
|
||||
|
||||
This Object is disabled until that sub-object is destroyed.
|
||||
Throws an exception if this Object was already disabled.
|
||||
*/
|
||||
Object
|
||||
setObject(std::string const& key);
|
||||
|
||||
/** Make a new Array at a key and return it.
|
||||
|
||||
This Object is disabled until that sub-array is destroyed.
|
||||
Throws an exception if this Object was already disabled.
|
||||
*/
|
||||
Array
|
||||
setArray(std::string const& key);
|
||||
|
||||
protected:
|
||||
friend class Array;
|
||||
Object(Collection* parent, Writer* w) : Collection(parent, w)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class Object::Root : public Object
|
||||
{
|
||||
public:
|
||||
/** Each Object::Root must be constructed with its own unique Writer. */
|
||||
Root(Writer&);
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Represents a JSON array being written to a Writer. */
|
||||
class Array : private Collection
|
||||
{
|
||||
public:
|
||||
/** Append a scalar to the Arrary.
|
||||
|
||||
Throws an exception if this array is disabled (which means that one of
|
||||
its sub-collections is enabled).
|
||||
*/
|
||||
template <typename Scalar>
|
||||
void
|
||||
append(Scalar const&);
|
||||
|
||||
/**
|
||||
Appends a Json::Value to an array.
|
||||
Throws an exception if this Array was disabled.
|
||||
*/
|
||||
void
|
||||
append(Json::Value const&);
|
||||
|
||||
/** Append a new Object and return it.
|
||||
|
||||
This Array is disabled until that sub-object is destroyed.
|
||||
Throws an exception if this Array was disabled.
|
||||
*/
|
||||
Object
|
||||
appendObject();
|
||||
|
||||
/** Append a new Array and return it.
|
||||
|
||||
This Array is disabled until that sub-array is destroyed.
|
||||
Throws an exception if this Array was already disabled.
|
||||
*/
|
||||
Array
|
||||
appendArray();
|
||||
|
||||
protected:
|
||||
friend class Object;
|
||||
Array(Collection* parent, Writer* w) : Collection(parent, w)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Generic accessor functions to allow Json::Value and Collection to
|
||||
// interoperate.
|
||||
|
||||
/** Add a new subarray at a named key in a Json object. */
|
||||
Json::Value&
|
||||
setArray(Json::Value&, Json::StaticString const& key);
|
||||
|
||||
/** Add a new subarray at a named key in a Json object. */
|
||||
Array
|
||||
setArray(Object&, Json::StaticString const& key);
|
||||
|
||||
/** Add a new subobject at a named key in a Json object. */
|
||||
Json::Value&
|
||||
addObject(Json::Value&, Json::StaticString const& key);
|
||||
|
||||
/** Add a new subobject at a named key in a Json object. */
|
||||
Object
|
||||
addObject(Object&, Json::StaticString const& key);
|
||||
|
||||
/** Append a new subarray to a Json array. */
|
||||
Json::Value&
|
||||
appendArray(Json::Value&);
|
||||
|
||||
/** Append a new subarray to a Json array. */
|
||||
Array
|
||||
appendArray(Array&);
|
||||
|
||||
/** Append a new subobject to a Json object. */
|
||||
Json::Value&
|
||||
appendObject(Json::Value&);
|
||||
|
||||
/** Append a new subobject to a Json object. */
|
||||
Object
|
||||
appendObject(Array&);
|
||||
|
||||
/** Copy all the keys and values from one object into another. */
|
||||
void
|
||||
copyFrom(Json::Value& to, Json::Value const& from);
|
||||
|
||||
/** Copy all the keys and values from one object into another. */
|
||||
void
|
||||
copyFrom(Object& to, Json::Value const& from);
|
||||
|
||||
/** An Object that contains its own Writer. */
|
||||
class WriterObject
|
||||
{
|
||||
public:
|
||||
WriterObject(Output const& output)
|
||||
: writer_(std::make_unique<Writer>(output))
|
||||
, object_(std::make_unique<Object::Root>(*writer_))
|
||||
{
|
||||
}
|
||||
|
||||
WriterObject(WriterObject&& other) = default;
|
||||
|
||||
Object*
|
||||
operator->()
|
||||
{
|
||||
return object_.get();
|
||||
}
|
||||
|
||||
Object&
|
||||
operator*()
|
||||
{
|
||||
return *object_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<Writer> writer_;
|
||||
std::unique_ptr<Object::Root> object_;
|
||||
};
|
||||
|
||||
WriterObject
|
||||
stringWriterObject(std::string&);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Implementation details.
|
||||
|
||||
// Detail class for Object::operator[].
|
||||
class Object::Proxy
|
||||
{
|
||||
private:
|
||||
Object& object_;
|
||||
std::string const key_;
|
||||
|
||||
public:
|
||||
Proxy(Object& object, std::string const& key);
|
||||
|
||||
template <class T>
|
||||
void
|
||||
operator=(T const& t)
|
||||
{
|
||||
object_.set(key_, t);
|
||||
// Note: This function shouldn't return *this, because it's a trap.
|
||||
//
|
||||
// In Json::Value, foo[jss::key] returns a reference to a
|
||||
// mutable Json::Value contained _inside_ foo. But in the case of
|
||||
// Json::Object, where we write once only, there isn't any such
|
||||
// reference that can be returned. Returning *this would return an
|
||||
// object "a level higher" than in Json::Value, leading to obscure bugs,
|
||||
// particularly in generic code.
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <typename Scalar>
|
||||
void
|
||||
Array::append(Scalar const& value)
|
||||
{
|
||||
checkWritable("append");
|
||||
if (writer_)
|
||||
writer_->append(value);
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
void
|
||||
Object::set(std::string const& key, Scalar const& value)
|
||||
{
|
||||
checkWritable("set");
|
||||
if (writer_)
|
||||
writer_->set(key, value);
|
||||
}
|
||||
|
||||
inline Json::Value&
|
||||
setArray(Json::Value& json, Json::StaticString const& key)
|
||||
{
|
||||
return (json[key] = Json::arrayValue);
|
||||
}
|
||||
|
||||
inline Array
|
||||
setArray(Object& json, Json::StaticString const& key)
|
||||
{
|
||||
return json.setArray(std::string(key));
|
||||
}
|
||||
|
||||
inline Json::Value&
|
||||
addObject(Json::Value& json, Json::StaticString const& key)
|
||||
{
|
||||
return (json[key] = Json::objectValue);
|
||||
}
|
||||
|
||||
inline Object
|
||||
addObject(Object& object, Json::StaticString const& key)
|
||||
{
|
||||
return object.setObject(std::string(key));
|
||||
}
|
||||
|
||||
inline Json::Value&
|
||||
appendArray(Json::Value& json)
|
||||
{
|
||||
return json.append(Json::arrayValue);
|
||||
}
|
||||
|
||||
inline Array
|
||||
appendArray(Array& json)
|
||||
{
|
||||
return json.appendArray();
|
||||
}
|
||||
|
||||
inline Json::Value&
|
||||
appendObject(Json::Value& json)
|
||||
{
|
||||
return json.append(Json::objectValue);
|
||||
}
|
||||
|
||||
inline Object
|
||||
appendObject(Array& json)
|
||||
{
|
||||
return json.appendObject();
|
||||
}
|
||||
|
||||
} // namespace Json
|
||||
|
||||
#endif
|
||||
@@ -130,7 +130,7 @@ newer versions of RocksDB (TBD).
|
||||
## Discussion
|
||||
|
||||
RocksDBQuickFactory is intended to provide a testbed for comparing potential
|
||||
rocksdb performance with the existing recommended configuration in rippled.cfg.
|
||||
rocksdb performance with the existing recommended configuration in xrpld.cfg.
|
||||
Through various executions and profiling some conclusions are presented below.
|
||||
|
||||
- If the write ahead log is enabled, insert speed soon clogs up under load. The
|
||||
|
||||
@@ -58,14 +58,14 @@ static_assert(apiMaximumSupportedVersion >= apiMinimumSupportedVersion);
|
||||
static_assert(apiBetaVersion >= apiMaximumSupportedVersion);
|
||||
static_assert(apiMaximumValidVersion >= apiMaximumSupportedVersion);
|
||||
|
||||
template <class JsonObject>
|
||||
void
|
||||
setVersion(JsonObject& parent, unsigned int apiVersion, bool betaEnabled)
|
||||
inline void
|
||||
setVersion(Json::Value& parent, unsigned int apiVersion, bool betaEnabled)
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
apiVersion != apiInvalidVersion,
|
||||
"xrpl::RPC::setVersion : input is valid");
|
||||
auto& retObj = addObject(parent, jss::version);
|
||||
|
||||
auto& retObj = parent[jss::version] = Json::objectValue;
|
||||
|
||||
if (apiVersion == apiVersionIfUnspecified)
|
||||
{
|
||||
|
||||
@@ -209,33 +209,11 @@ get_error_info(error_code_i code);
|
||||
|
||||
/** Add or update the json update to reflect the error code. */
|
||||
/** @{ */
|
||||
template <class JsonValue>
|
||||
void
|
||||
inject_error(error_code_i code, JsonValue& json)
|
||||
{
|
||||
ErrorInfo const& info(get_error_info(code));
|
||||
json[jss::error] = info.token;
|
||||
json[jss::error_code] = info.code;
|
||||
json[jss::error_message] = info.message;
|
||||
}
|
||||
inject_error(error_code_i code, Json::Value& json);
|
||||
|
||||
template <class JsonValue>
|
||||
void
|
||||
inject_error(int code, JsonValue& json)
|
||||
{
|
||||
inject_error(error_code_i(code), json);
|
||||
}
|
||||
|
||||
template <class JsonValue>
|
||||
void
|
||||
inject_error(error_code_i code, std::string const& message, JsonValue& json)
|
||||
{
|
||||
ErrorInfo const& info(get_error_info(code));
|
||||
json[jss::error] = info.token;
|
||||
json[jss::error_code] = info.code;
|
||||
json[jss::error_message] = message;
|
||||
}
|
||||
|
||||
inject_error(error_code_i code, std::string const& message, Json::Value& json);
|
||||
/** @} */
|
||||
|
||||
/** Returns a new json object that reflects the error code. */
|
||||
|
||||
@@ -9,7 +9,7 @@ namespace xrpl {
|
||||
bool
|
||||
isRpcError(Json::Value jvResult);
|
||||
Json::Value
|
||||
rpcError(int iError);
|
||||
rpcError(error_code_i iError);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/json/Object.h>
|
||||
#include <xrpl/json/Output.h>
|
||||
#include <xrpl/json/Writer.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <utility>
|
||||
|
||||
namespace Json {
|
||||
|
||||
Collection::Collection(Collection* parent, Writer* writer)
|
||||
: parent_(parent), writer_(writer), enabled_(true)
|
||||
{
|
||||
checkWritable("Collection::Collection()");
|
||||
if (parent_)
|
||||
{
|
||||
check(parent_->enabled_, "Parent not enabled in constructor");
|
||||
parent_->enabled_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
Collection::~Collection()
|
||||
{
|
||||
if (writer_)
|
||||
writer_->finish();
|
||||
if (parent_)
|
||||
parent_->enabled_ = true;
|
||||
}
|
||||
|
||||
Collection&
|
||||
Collection::operator=(Collection&& that) noexcept
|
||||
{
|
||||
parent_ = that.parent_;
|
||||
writer_ = that.writer_;
|
||||
enabled_ = that.enabled_;
|
||||
|
||||
that.parent_ = nullptr;
|
||||
that.writer_ = nullptr;
|
||||
that.enabled_ = false;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
Collection::Collection(Collection&& that) noexcept
|
||||
{
|
||||
*this = std::move(that);
|
||||
}
|
||||
|
||||
void
|
||||
Collection::checkWritable(std::string const& label)
|
||||
{
|
||||
if (!enabled_)
|
||||
xrpl::Throw<std::logic_error>(label + ": not enabled");
|
||||
if (!writer_)
|
||||
xrpl::Throw<std::logic_error>(label + ": not writable");
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Object::Root::Root(Writer& w) : Object(nullptr, &w)
|
||||
{
|
||||
writer_->startRoot(Writer::object);
|
||||
}
|
||||
|
||||
Object
|
||||
Object::setObject(std::string const& key)
|
||||
{
|
||||
checkWritable("Object::setObject");
|
||||
if (writer_)
|
||||
writer_->startSet(Writer::object, key);
|
||||
return Object(this, writer_);
|
||||
}
|
||||
|
||||
Array
|
||||
Object::setArray(std::string const& key)
|
||||
{
|
||||
checkWritable("Object::setArray");
|
||||
if (writer_)
|
||||
writer_->startSet(Writer::array, key);
|
||||
return Array(this, writer_);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Object
|
||||
Array::appendObject()
|
||||
{
|
||||
checkWritable("Array::appendObject");
|
||||
if (writer_)
|
||||
writer_->startAppend(Writer::object);
|
||||
return Object(this, writer_);
|
||||
}
|
||||
|
||||
Array
|
||||
Array::appendArray()
|
||||
{
|
||||
checkWritable("Array::makeArray");
|
||||
if (writer_)
|
||||
writer_->startAppend(Writer::array);
|
||||
return Array(this, writer_);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Object::Proxy::Proxy(Object& object, std::string const& key)
|
||||
: object_(object), key_(key)
|
||||
{
|
||||
}
|
||||
|
||||
Object::Proxy
|
||||
Object::operator[](std::string const& key)
|
||||
{
|
||||
return Proxy(*this, key);
|
||||
}
|
||||
|
||||
Object::Proxy
|
||||
Object::operator[](Json::StaticString const& key)
|
||||
{
|
||||
return Proxy(*this, std::string(key));
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void
|
||||
Array::append(Json::Value const& v)
|
||||
{
|
||||
auto t = v.type();
|
||||
switch (t)
|
||||
{
|
||||
case Json::nullValue:
|
||||
return append(nullptr);
|
||||
case Json::intValue:
|
||||
return append(v.asInt());
|
||||
case Json::uintValue:
|
||||
return append(v.asUInt());
|
||||
case Json::realValue:
|
||||
return append(v.asDouble());
|
||||
case Json::stringValue:
|
||||
return append(v.asString());
|
||||
case Json::booleanValue:
|
||||
return append(v.asBool());
|
||||
|
||||
case Json::objectValue: {
|
||||
auto object = appendObject();
|
||||
copyFrom(object, v);
|
||||
return;
|
||||
}
|
||||
|
||||
case Json::arrayValue: {
|
||||
auto array = appendArray();
|
||||
for (auto& item : v)
|
||||
array.append(item);
|
||||
return;
|
||||
}
|
||||
}
|
||||
UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE
|
||||
}
|
||||
|
||||
void
|
||||
Object::set(std::string const& k, Json::Value const& v)
|
||||
{
|
||||
auto t = v.type();
|
||||
switch (t)
|
||||
{
|
||||
case Json::nullValue:
|
||||
return set(k, nullptr);
|
||||
case Json::intValue:
|
||||
return set(k, v.asInt());
|
||||
case Json::uintValue:
|
||||
return set(k, v.asUInt());
|
||||
case Json::realValue:
|
||||
return set(k, v.asDouble());
|
||||
case Json::stringValue:
|
||||
return set(k, v.asString());
|
||||
case Json::booleanValue:
|
||||
return set(k, v.asBool());
|
||||
|
||||
case Json::objectValue: {
|
||||
auto object = setObject(k);
|
||||
copyFrom(object, v);
|
||||
return;
|
||||
}
|
||||
|
||||
case Json::arrayValue: {
|
||||
auto array = setArray(k);
|
||||
for (auto& item : v)
|
||||
array.append(item);
|
||||
return;
|
||||
}
|
||||
}
|
||||
UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
namespace {
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
doCopyFrom(Object& to, Json::Value const& from)
|
||||
{
|
||||
XRPL_ASSERT(from.isObjectOrNull(), "Json::doCopyFrom : valid input type");
|
||||
auto members = from.getMemberNames();
|
||||
for (auto& m : members)
|
||||
to[m] = from[m];
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void
|
||||
copyFrom(Json::Value& to, Json::Value const& from)
|
||||
{
|
||||
if (!to) // Short circuit this very common case.
|
||||
to = from;
|
||||
else
|
||||
doCopyFrom(to, from);
|
||||
}
|
||||
|
||||
void
|
||||
copyFrom(Object& to, Json::Value const& from)
|
||||
{
|
||||
doCopyFrom(to, from);
|
||||
}
|
||||
|
||||
WriterObject
|
||||
stringWriterObject(std::string& s)
|
||||
{
|
||||
return WriterObject(stringOutput(s));
|
||||
}
|
||||
|
||||
} // namespace Json
|
||||
@@ -18,8 +18,8 @@ void
|
||||
ManagerImp::missing_backend()
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Your rippled.cfg is missing a [node_db] entry, "
|
||||
"please see the rippled-example.cfg file!");
|
||||
"Your xrpld.cfg is missing a [node_db] entry, "
|
||||
"please see the xrpld-example.cfg file!");
|
||||
}
|
||||
|
||||
// We shouldn't rely on global variables for lifetime management because their
|
||||
|
||||
@@ -17,7 +17,7 @@ namespace BuildInfo {
|
||||
// and follow the format described at http://semver.org/
|
||||
//------------------------------------------------------------------------------
|
||||
// clang-format off
|
||||
char const* const versionString = "3.1.0-b0"
|
||||
char const* const versionString = "3.2.0-b0"
|
||||
// clang-format on
|
||||
|
||||
#if defined(DEBUG) || defined(SANITIZER)
|
||||
|
||||
@@ -160,6 +160,24 @@ constexpr ErrorInfo unknownError;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void
|
||||
inject_error(error_code_i code, Json::Value& json)
|
||||
{
|
||||
ErrorInfo const& info(get_error_info(code));
|
||||
json[jss::error] = info.token;
|
||||
json[jss::error_code] = info.code;
|
||||
json[jss::error_message] = info.message;
|
||||
}
|
||||
|
||||
void
|
||||
inject_error(error_code_i code, std::string const& message, Json::Value& json)
|
||||
{
|
||||
ErrorInfo const& info(get_error_info(code));
|
||||
json[jss::error] = info.token;
|
||||
json[jss::error_code] = info.code;
|
||||
json[jss::error_message] = message;
|
||||
}
|
||||
|
||||
ErrorInfo const&
|
||||
get_error_info(error_code_i code)
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@ struct RPCErr;
|
||||
|
||||
// VFALCO NOTE Deprecated function
|
||||
Json::Value
|
||||
rpcError(int iError)
|
||||
rpcError(error_code_i iError)
|
||||
{
|
||||
Json::Value jvResult(Json::objectValue);
|
||||
RPC::inject_error(iError, jvResult);
|
||||
|
||||
@@ -303,7 +303,7 @@ public:
|
||||
// offers unfunded.
|
||||
// b. Carol's remaining 800 offers are consumed as unfunded.
|
||||
// c. 199 of alice's XRP(1) to USD(3) offers are consumed.
|
||||
// A book step is allowed to consume a maxium of 1000 offers
|
||||
// A book step is allowed to consume a maximum of 1000 offers
|
||||
// at a given quality, and that limit is now reached.
|
||||
// d. Now the strand is dry, even though there are still funded
|
||||
// XRP(1) to USD(3) offers available.
|
||||
@@ -384,7 +384,7 @@ public:
|
||||
// offers unfunded.
|
||||
// b. Carol's remaining 800 offers are consumed as unfunded.
|
||||
// c. 199 of alice's XRP(1) to USD(3) offers are consumed.
|
||||
// A book step is allowed to consume a maxium of 1000 offers
|
||||
// A book step is allowed to consume a maximum of 1000 offers
|
||||
// at a given quality, and that limit is now reached.
|
||||
// d. Now the strand is dry, even though there are still funded
|
||||
// XRP(1) to USD(3) offers available. Bob has spent 400 EUR and
|
||||
|
||||
@@ -1298,7 +1298,7 @@ public:
|
||||
testNegativeBalance(FeatureBitset features)
|
||||
{
|
||||
// This test creates an offer test for negative balance
|
||||
// with transfer fees and miniscule funds.
|
||||
// with transfer fees and minuscule funds.
|
||||
testcase("Negative Balance");
|
||||
|
||||
using namespace jtx;
|
||||
|
||||
@@ -254,7 +254,7 @@ class RCLValidations_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(trie.branchSupport(ledg_258) == 4);
|
||||
|
||||
// Move three of the s258 ledgers to s259, which splits the trie
|
||||
// due to the 256 ancestory limit
|
||||
// due to the 256 ancestry limit
|
||||
BEAST_EXPECT(trie.remove(ledg_258, 3));
|
||||
trie.insert(ledg_259, 3);
|
||||
trie.getPreferred(1);
|
||||
@@ -275,7 +275,7 @@ class RCLValidations_test : public beast::unit_test::suite
|
||||
// then verify the remove call works
|
||||
// past bug: remove had assumed the first child of a node in the trie
|
||||
// which matches is the *only* child in the trie which matches.
|
||||
// This is **NOT** true with the limited 256 ledger ancestory
|
||||
// This is **NOT** true with the limited 256 ledger ancestry
|
||||
// quirk of RCLValidation and prevents deleting the old support
|
||||
// for ledger 257
|
||||
|
||||
|
||||
@@ -3821,7 +3821,7 @@ public:
|
||||
return result;
|
||||
};
|
||||
|
||||
testcase("straightfoward positive case");
|
||||
testcase("straightforward positive case");
|
||||
{
|
||||
// Queue up some transactions at a too-low fee.
|
||||
auto aliceSeq = env.seq(alice);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <xrpld/core/ConfigSections.h>
|
||||
|
||||
#include <xrpl/beast/unit_test/suite.h>
|
||||
#include <xrpl/beast/utility/temp_dir.h>
|
||||
#include <xrpl/server/Port.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
@@ -18,7 +19,7 @@ namespace detail {
|
||||
std::string
|
||||
configContents(std::string const& dbPath, std::string const& validatorsFile)
|
||||
{
|
||||
static boost::format configContentsTemplate(R"rippleConfig(
|
||||
static boost::format configContentsTemplate(R"xrpldConfig(
|
||||
[server]
|
||||
port_rpc
|
||||
port_peer
|
||||
@@ -51,14 +52,14 @@ protocol = wss
|
||||
[node_size]
|
||||
medium
|
||||
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# This is primary persistent datastore for xrpld. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found on https://xrpl.org/capacity-planning.html#node-db-type
|
||||
# delete old ledgers while maintaining at least 2000. Do not require an
|
||||
# external administrative command to initiate deletion.
|
||||
[node_db]
|
||||
type=memory
|
||||
path=/Users/dummy/ripple/config/db/rocksdb
|
||||
path=/Users/dummy/xrpld/config/db/rocksdb
|
||||
open_files=2000
|
||||
filter_bits=12
|
||||
cache_mb=256
|
||||
@@ -72,7 +73,7 @@ file_size_mult=2
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/Users/dummy/ripple/config/log/debug.log
|
||||
/Users/dummy/xrpld/config/log/debug.log
|
||||
|
||||
[sntp_servers]
|
||||
time.windows.com
|
||||
@@ -97,7 +98,7 @@ r.ripple.com 51235
|
||||
|
||||
[sqdb]
|
||||
backend=sqlite
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
|
||||
std::string dbPathSection =
|
||||
dbPath.empty() ? "" : "[database_path]\n" + dbPath;
|
||||
@@ -107,9 +108,9 @@ backend=sqlite
|
||||
}
|
||||
|
||||
/**
|
||||
Write a rippled config file and remove when done.
|
||||
Write a xrpld config file and remove when done.
|
||||
*/
|
||||
class RippledCfgGuard : public xrpl::detail::FileDirGuard
|
||||
class FileCfgGuard : public xrpl::detail::FileDirGuard
|
||||
{
|
||||
private:
|
||||
path dataDir_;
|
||||
@@ -119,17 +120,18 @@ private:
|
||||
Config config_;
|
||||
|
||||
public:
|
||||
RippledCfgGuard(
|
||||
FileCfgGuard(
|
||||
beast::unit_test::suite& test,
|
||||
path subDir,
|
||||
path const& dbPath,
|
||||
path const& configFile,
|
||||
path const& validatorsFile,
|
||||
bool useCounter = true,
|
||||
std::string confContents = "")
|
||||
: FileDirGuard(
|
||||
test,
|
||||
std::move(subDir),
|
||||
path(Config::configFileName),
|
||||
configFile,
|
||||
confContents.empty()
|
||||
? configContents(dbPath.string(), validatorsFile.string())
|
||||
: confContents,
|
||||
@@ -171,7 +173,7 @@ public:
|
||||
return fileExists();
|
||||
}
|
||||
|
||||
~RippledCfgGuard()
|
||||
~FileCfgGuard()
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -182,7 +184,7 @@ public:
|
||||
catch (std::exception& e)
|
||||
{
|
||||
// if we throw here, just let it die.
|
||||
test_.log << "Error in ~RippledCfgGuard: " << e.what() << std::endl;
|
||||
test_.log << "Error in ~FileCfgGuard: " << e.what() << std::endl;
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -190,7 +192,7 @@ public:
|
||||
std::string
|
||||
valFileContents()
|
||||
{
|
||||
std::string configContents(R"rippleConfig(
|
||||
std::string configContents(R"xrpldConfig(
|
||||
[validators]
|
||||
n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7
|
||||
n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj
|
||||
@@ -204,8 +206,8 @@ nHBu9PTL9dn2GuZtdW4U2WzBwffyX9qsQCd9CNU4Z5YG3PQfViM8
|
||||
nHUPDdcdb2Y5DZAJne4c2iabFuAP3F34xZUgYQT2NH7qfkdapgnz
|
||||
|
||||
[validator_list_sites]
|
||||
recommendedripplevalidators.com
|
||||
moreripplevalidators.net
|
||||
recommendedxrplvalidators.com
|
||||
morexrplvalidators.net
|
||||
|
||||
[validator_list_keys]
|
||||
03E74EE14CB525AFBB9F1B7D86CD58ECC4B91452294B42AB4E78F260BD905C091D
|
||||
@@ -213,7 +215,7 @@ moreripplevalidators.net
|
||||
|
||||
[validator_list_threshold]
|
||||
2
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
return configContents;
|
||||
}
|
||||
|
||||
@@ -270,7 +272,7 @@ public:
|
||||
|
||||
Config c;
|
||||
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[server]
|
||||
port_rpc
|
||||
port_peer
|
||||
@@ -278,7 +280,7 @@ port_wss_admin
|
||||
|
||||
[ssl_verify]
|
||||
0
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
|
||||
c.loadFromString(toLoad);
|
||||
|
||||
@@ -291,6 +293,126 @@ port_wss_admin
|
||||
BEAST_EXPECT(c.legacy("not_in_file") == "new_value");
|
||||
}
|
||||
void
|
||||
testConfigFile()
|
||||
{
|
||||
testcase("config_file");
|
||||
|
||||
using namespace boost::filesystem;
|
||||
auto const cwd = current_path();
|
||||
|
||||
// Test both config file names.
|
||||
char const* configFiles[] = {
|
||||
Config::configFileName, Config::configLegacyName};
|
||||
|
||||
// Config file in current directory.
|
||||
for (auto const& configFile : configFiles)
|
||||
{
|
||||
// Use a temporary directory for testing.
|
||||
beast::temp_dir td;
|
||||
current_path(td.path());
|
||||
path const f = td.file(configFile);
|
||||
std::ofstream o(f.string());
|
||||
o << detail::configContents("", "");
|
||||
o.close();
|
||||
|
||||
// Load the config file from the current directory and verify it.
|
||||
Config c;
|
||||
c.setup("", true, false, true);
|
||||
BEAST_EXPECT(c.section(SECTION_DEBUG_LOGFILE).values().size() == 1);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_DEBUG_LOGFILE).values()[0] ==
|
||||
"/Users/dummy/xrpld/config/log/debug.log");
|
||||
}
|
||||
|
||||
// Config file in HOME or XDG_CONFIG_HOME directory.
|
||||
#if BOOST_OS_LINUX || BOOST_OS_MACOS
|
||||
for (auto const& configFile : configFiles)
|
||||
{
|
||||
// Point the current working directory to a temporary directory, so
|
||||
// we don't pick up an actual config file from the repository root.
|
||||
beast::temp_dir td;
|
||||
current_path(td.path());
|
||||
|
||||
// The XDG config directory is set: the config file must be in a
|
||||
// subdirectory named after the system.
|
||||
{
|
||||
beast::temp_dir tc;
|
||||
|
||||
// Set the HOME and XDG_CONFIG_HOME environment variables. The
|
||||
// HOME variable is not used when XDG_CONFIG_HOME is set, but
|
||||
// must be set.
|
||||
char const* h = getenv("HOME");
|
||||
setenv("HOME", tc.path().c_str(), 1);
|
||||
char const* x = getenv("XDG_CONFIG_HOME");
|
||||
setenv("XDG_CONFIG_HOME", tc.path().c_str(), 1);
|
||||
|
||||
// Create the config file in '${XDG_CONFIG_HOME}/[systemName]'.
|
||||
path p = tc.file(systemName());
|
||||
create_directory(p);
|
||||
p = tc.file(systemName() + "/" + configFile);
|
||||
std::ofstream o(p.string());
|
||||
o << detail::configContents("", "");
|
||||
o.close();
|
||||
|
||||
// Load the config file from the config directory and verify it.
|
||||
Config c;
|
||||
c.setup("", true, false, true);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_DEBUG_LOGFILE).values().size() == 1);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_DEBUG_LOGFILE).values()[0] ==
|
||||
"/Users/dummy/xrpld/config/log/debug.log");
|
||||
|
||||
// Restore the environment variables.
|
||||
h ? setenv("HOME", h, 1) : unsetenv("HOME");
|
||||
x ? setenv("XDG_CONFIG_HOME", x, 1)
|
||||
: unsetenv("XDG_CONFIG_HOME");
|
||||
}
|
||||
|
||||
// The XDG config directory is not set: the config file must be in a
|
||||
// subdirectory named .config followed by the system name.
|
||||
{
|
||||
beast::temp_dir tc;
|
||||
|
||||
// Set only the HOME environment variable.
|
||||
char const* h = getenv("HOME");
|
||||
setenv("HOME", tc.path().c_str(), 1);
|
||||
char const* x = getenv("XDG_CONFIG_HOME");
|
||||
unsetenv("XDG_CONFIG_HOME");
|
||||
|
||||
// Create the config file in '${HOME}/.config/[systemName]'.
|
||||
std::string s = ".config";
|
||||
path p = tc.file(s);
|
||||
create_directory(p);
|
||||
s += "/" + systemName();
|
||||
p = tc.file(s);
|
||||
create_directory(p);
|
||||
p = tc.file(s + "/" + configFile);
|
||||
std::ofstream o(p.string());
|
||||
o << detail::configContents("", "");
|
||||
o.close();
|
||||
|
||||
// Load the config file from the config directory and verify it.
|
||||
Config c;
|
||||
c.setup("", true, false, true);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_DEBUG_LOGFILE).values().size() == 1);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_DEBUG_LOGFILE).values()[0] ==
|
||||
"/Users/dummy/xrpld/config/log/debug.log");
|
||||
|
||||
// Restore the environment variables.
|
||||
h ? setenv("HOME", h, 1) : unsetenv("HOME");
|
||||
if (x)
|
||||
setenv("XDG_CONFIG_HOME", x, 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Restore the current working directory.
|
||||
current_path(cwd);
|
||||
}
|
||||
void
|
||||
testDbPath()
|
||||
{
|
||||
testcase("database_path");
|
||||
@@ -326,11 +448,16 @@ port_wss_admin
|
||||
{
|
||||
// read from file absolute path
|
||||
auto const cwd = current_path();
|
||||
xrpl::detail::DirGuard const g0(*this, "test_db");
|
||||
detail::DirGuard const g0(*this, "test_db");
|
||||
path const dataDirRel("test_data_dir");
|
||||
path const dataDirAbs(cwd / g0.subdir() / dataDirRel);
|
||||
detail::RippledCfgGuard const g(
|
||||
*this, g0.subdir(), dataDirAbs, "", false);
|
||||
detail::FileCfgGuard const g(
|
||||
*this,
|
||||
g0.subdir(),
|
||||
dataDirAbs,
|
||||
Config::configFileName,
|
||||
"",
|
||||
false);
|
||||
auto const& c(g.config());
|
||||
BEAST_EXPECT(g.dataDirExists());
|
||||
BEAST_EXPECT(g.configFileExists());
|
||||
@@ -339,7 +466,8 @@ port_wss_admin
|
||||
{
|
||||
// read from file relative path
|
||||
std::string const dbPath("my_db");
|
||||
detail::RippledCfgGuard const g(*this, "test_db", dbPath, "");
|
||||
detail::FileCfgGuard const g(
|
||||
*this, "test_db", dbPath, Config::configFileName, "");
|
||||
auto const& c(g.config());
|
||||
std::string const nativeDbPath = absolute(path(dbPath)).string();
|
||||
BEAST_EXPECT(g.dataDirExists());
|
||||
@@ -348,7 +476,8 @@ port_wss_admin
|
||||
}
|
||||
{
|
||||
// read from file no path
|
||||
detail::RippledCfgGuard const g(*this, "test_db", "", "");
|
||||
detail::FileCfgGuard const g(
|
||||
*this, "test_db", "", Config::configFileName, "");
|
||||
auto const& c(g.config());
|
||||
std::string const nativeDbPath =
|
||||
absolute(g.subdir() / path(Config::databaseDirName)).string();
|
||||
@@ -378,13 +507,13 @@ port_wss_admin
|
||||
|
||||
{
|
||||
Config c;
|
||||
static boost::format configTemplate(R"rippleConfig(
|
||||
static boost::format configTemplate(R"xrpldConfig(
|
||||
[validation_seed]
|
||||
%1%
|
||||
|
||||
[validator_token]
|
||||
%2%
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
std::string error;
|
||||
auto const expectedError =
|
||||
"Cannot have both [validation_seed] "
|
||||
@@ -410,10 +539,10 @@ port_wss_admin
|
||||
Config c;
|
||||
try
|
||||
{
|
||||
c.loadFromString(R"rippleConfig(
|
||||
c.loadFromString(R"xrpldConfig(
|
||||
[network_id]
|
||||
main
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
}
|
||||
catch (std::runtime_error& e)
|
||||
{
|
||||
@@ -425,8 +554,8 @@ main
|
||||
|
||||
try
|
||||
{
|
||||
c.loadFromString(R"rippleConfig(
|
||||
)rippleConfig");
|
||||
c.loadFromString(R"xrpldConfig(
|
||||
)xrpldConfig");
|
||||
}
|
||||
catch (std::runtime_error& e)
|
||||
{
|
||||
@@ -438,10 +567,10 @@ main
|
||||
|
||||
try
|
||||
{
|
||||
c.loadFromString(R"rippleConfig(
|
||||
c.loadFromString(R"xrpldConfig(
|
||||
[network_id]
|
||||
255
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
}
|
||||
catch (std::runtime_error& e)
|
||||
{
|
||||
@@ -453,10 +582,10 @@ main
|
||||
|
||||
try
|
||||
{
|
||||
c.loadFromString(R"rippleConfig(
|
||||
c.loadFromString(R"xrpldConfig(
|
||||
[network_id]
|
||||
10000
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
}
|
||||
catch (std::runtime_error& e)
|
||||
{
|
||||
@@ -516,7 +645,7 @@ main
|
||||
{
|
||||
// load validators from config into single section
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validators]
|
||||
n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7
|
||||
n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj
|
||||
@@ -525,7 +654,7 @@ n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C
|
||||
[validator_keys]
|
||||
nHUhG1PgAG8H8myUENypM35JgfqXAKNQvRVVAFDRzJrny5eZN8d5
|
||||
nHBu9PTL9dn2GuZtdW4U2WzBwffyX9qsQCd9CNU4Z5YG3PQfViM8
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
c.loadFromString(toLoad);
|
||||
BEAST_EXPECT(c.legacy("validators_file").empty());
|
||||
BEAST_EXPECT(c.section(SECTION_VALIDATORS).values().size() == 5);
|
||||
@@ -534,9 +663,9 @@ nHBu9PTL9dn2GuZtdW4U2WzBwffyX9qsQCd9CNU4Z5YG3PQfViM8
|
||||
{
|
||||
// load validator list sites and keys from config
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
@@ -544,13 +673,13 @@ trustthesevalidators.gov
|
||||
|
||||
[validator_list_threshold]
|
||||
1
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
c.loadFromString(toLoad);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values().size() == 2);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values()[0] ==
|
||||
"ripplevalidators.com");
|
||||
"xrplvalidators.com");
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values()[1] ==
|
||||
"trustthesevalidators.gov");
|
||||
@@ -570,9 +699,9 @@ trustthesevalidators.gov
|
||||
{
|
||||
// load validator list sites and keys from config
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
@@ -580,13 +709,13 @@ trustthesevalidators.gov
|
||||
|
||||
[validator_list_threshold]
|
||||
0
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
c.loadFromString(toLoad);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values().size() == 2);
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values()[0] ==
|
||||
"ripplevalidators.com");
|
||||
"xrplvalidators.com");
|
||||
BEAST_EXPECT(
|
||||
c.section(SECTION_VALIDATOR_LIST_SITES).values()[1] ==
|
||||
"trustthesevalidators.gov");
|
||||
@@ -607,9 +736,9 @@ trustthesevalidators.gov
|
||||
// load should throw if [validator_list_threshold] is greater than
|
||||
// the number of [validator_list_keys]
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
@@ -617,7 +746,7 @@ trustthesevalidators.gov
|
||||
|
||||
[validator_list_threshold]
|
||||
2
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
std::string error;
|
||||
auto const expectedError =
|
||||
"Value in config section [validator_list_threshold] exceeds "
|
||||
@@ -636,9 +765,9 @@ trustthesevalidators.gov
|
||||
{
|
||||
// load should throw if [validator_list_threshold] is malformed
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
@@ -646,7 +775,7 @@ trustthesevalidators.gov
|
||||
|
||||
[validator_list_threshold]
|
||||
value = 2
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
std::string error;
|
||||
auto const expectedError =
|
||||
"Config section [validator_list_threshold] should contain "
|
||||
@@ -665,9 +794,9 @@ value = 2
|
||||
{
|
||||
// load should throw if [validator_list_threshold] is negative
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
@@ -675,7 +804,7 @@ trustthesevalidators.gov
|
||||
|
||||
[validator_list_threshold]
|
||||
-1
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
bool error = false;
|
||||
try
|
||||
{
|
||||
@@ -692,11 +821,11 @@ trustthesevalidators.gov
|
||||
// load should throw if [validator_list_sites] is configured but
|
||||
// [validator_list_keys] is not
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
std::string error;
|
||||
auto const expectedError =
|
||||
"[validator_list_keys] config section is missing";
|
||||
@@ -736,8 +865,13 @@ trustthesevalidators.gov
|
||||
std::string const valFileName = "validators.txt";
|
||||
detail::ValidatorsTxtGuard const vtg(
|
||||
*this, "test_cfg", valFileName);
|
||||
detail::RippledCfgGuard const rcg(
|
||||
*this, vtg.subdir(), "", valFileName, false);
|
||||
detail::FileCfgGuard const rcg(
|
||||
*this,
|
||||
vtg.subdir(),
|
||||
"",
|
||||
Config::configFileName,
|
||||
valFileName,
|
||||
false);
|
||||
BEAST_EXPECT(vtg.validatorsFileExists());
|
||||
BEAST_EXPECT(rcg.configFileExists());
|
||||
auto const& c(rcg.config());
|
||||
@@ -758,8 +892,13 @@ trustthesevalidators.gov
|
||||
detail::ValidatorsTxtGuard const vtg(
|
||||
*this, "test_cfg", "validators.txt");
|
||||
auto const valFilePath = ".." / vtg.subdir() / "validators.txt";
|
||||
detail::RippledCfgGuard const rcg(
|
||||
*this, vtg.subdir(), "", valFilePath, false);
|
||||
detail::FileCfgGuard const rcg(
|
||||
*this,
|
||||
vtg.subdir(),
|
||||
"",
|
||||
Config::configFileName,
|
||||
valFilePath,
|
||||
false);
|
||||
BEAST_EXPECT(vtg.validatorsFileExists());
|
||||
BEAST_EXPECT(rcg.configFileExists());
|
||||
auto const& c(rcg.config());
|
||||
@@ -778,8 +917,8 @@ trustthesevalidators.gov
|
||||
// load from validators file in default location
|
||||
detail::ValidatorsTxtGuard const vtg(
|
||||
*this, "test_cfg", "validators.txt");
|
||||
detail::RippledCfgGuard const rcg(
|
||||
*this, vtg.subdir(), "", "", false);
|
||||
detail::FileCfgGuard const rcg(
|
||||
*this, vtg.subdir(), "", Config::configFileName, "", false);
|
||||
BEAST_EXPECT(vtg.validatorsFileExists());
|
||||
BEAST_EXPECT(rcg.configFileExists());
|
||||
auto const& c(rcg.config());
|
||||
@@ -803,8 +942,13 @@ trustthesevalidators.gov
|
||||
detail::ValidatorsTxtGuard const vtgDefault(
|
||||
*this, vtg.subdir(), "validators.txt", false);
|
||||
BEAST_EXPECT(vtgDefault.validatorsFileExists());
|
||||
detail::RippledCfgGuard const rcg(
|
||||
*this, vtg.subdir(), "", vtg.validatorsFile(), false);
|
||||
detail::FileCfgGuard const rcg(
|
||||
*this,
|
||||
vtg.subdir(),
|
||||
"",
|
||||
Config::configFileName,
|
||||
vtg.validatorsFile(),
|
||||
false);
|
||||
BEAST_EXPECT(rcg.configFileExists());
|
||||
auto const& c(rcg.config());
|
||||
BEAST_EXPECT(c.legacy("validators_file") == vtg.validatorsFile());
|
||||
@@ -821,7 +965,7 @@ trustthesevalidators.gov
|
||||
|
||||
{
|
||||
// load validators from both config and validators file
|
||||
boost::format cc(R"rippleConfig(
|
||||
boost::format cc(R"xrpldConfig(
|
||||
[validators_file]
|
||||
%1%
|
||||
|
||||
@@ -837,12 +981,12 @@ nHB1X37qrniVugfQcuBTAjswphC1drx7QjFFojJPZwKHHnt8kU7v
|
||||
nHUkAWDR4cB8AgPg7VXMX6et8xRTQb2KJfgv1aBEXozwrawRKgMB
|
||||
|
||||
[validator_list_sites]
|
||||
ripplevalidators.com
|
||||
xrplvalidators.com
|
||||
trustthesevalidators.gov
|
||||
|
||||
[validator_list_keys]
|
||||
021A99A537FDEBC34E4FCA03B39BEADD04299BB19E85097EC92B15A3518801E566
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
detail::ValidatorsTxtGuard const vtg(
|
||||
*this, "test_cfg", "validators.cfg");
|
||||
BEAST_EXPECT(vtg.validatorsFileExists());
|
||||
@@ -861,14 +1005,14 @@ trustthesevalidators.gov
|
||||
}
|
||||
{
|
||||
// load should throw if [validator_list_threshold] is present both
|
||||
// in rippled cfg and validators file
|
||||
boost::format cc(R"rippleConfig(
|
||||
// in xrpld.cfg and validators file
|
||||
boost::format cc(R"xrpldConfig(
|
||||
[validators_file]
|
||||
%1%
|
||||
|
||||
[validator_list_threshold]
|
||||
1
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
std::string error;
|
||||
detail::ValidatorsTxtGuard const vtg(
|
||||
*this, "test_cfg", "validators.cfg");
|
||||
@@ -890,7 +1034,7 @@ trustthesevalidators.gov
|
||||
}
|
||||
{
|
||||
// load should throw if [validators], [validator_keys] and
|
||||
// [validator_list_keys] are missing from rippled cfg and
|
||||
// [validator_list_keys] are missing from xrpld.cfg and
|
||||
// validators file
|
||||
Config c;
|
||||
boost::format cc("[validators_file]\n%1%\n");
|
||||
@@ -920,9 +1064,13 @@ trustthesevalidators.gov
|
||||
void
|
||||
testSetup(bool explicitPath)
|
||||
{
|
||||
detail::RippledCfgGuard const cfg(
|
||||
*this, "testSetup", explicitPath ? "test_db" : "", "");
|
||||
/* RippledCfgGuard has a Config object that gets loaded on
|
||||
detail::FileCfgGuard const cfg(
|
||||
*this,
|
||||
"testSetup",
|
||||
explicitPath ? "test_db" : "",
|
||||
Config::configFileName,
|
||||
"");
|
||||
/* FileCfgGuard has a Config object that gets loaded on
|
||||
construction, but Config::setup is not reentrant, so we
|
||||
need a fresh config for every test case, so ignore it.
|
||||
*/
|
||||
@@ -1039,7 +1187,8 @@ trustthesevalidators.gov
|
||||
void
|
||||
testPort()
|
||||
{
|
||||
detail::RippledCfgGuard const cfg(*this, "testPort", "", "");
|
||||
detail::FileCfgGuard const cfg(
|
||||
*this, "testPort", "", Config::configFileName, "");
|
||||
auto const& conf = cfg.config();
|
||||
if (!BEAST_EXPECT(conf.exists("port_rpc")))
|
||||
return;
|
||||
@@ -1065,8 +1214,14 @@ trustthesevalidators.gov
|
||||
|
||||
try
|
||||
{
|
||||
detail::RippledCfgGuard const cfg(
|
||||
*this, "testPort", "", "", true, contents);
|
||||
detail::FileCfgGuard const cfg(
|
||||
*this,
|
||||
"testPort",
|
||||
"",
|
||||
Config::configFileName,
|
||||
"",
|
||||
true,
|
||||
contents);
|
||||
BEAST_EXPECT(false);
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
@@ -1377,9 +1532,9 @@ r.ripple.com:51235
|
||||
for (auto& [unit, sec, val, shouldPass] : units)
|
||||
{
|
||||
Config c;
|
||||
std::string toLoad(R"rippleConfig(
|
||||
std::string toLoad(R"xrpldConfig(
|
||||
[amendment_majority_time]
|
||||
)rippleConfig");
|
||||
)xrpldConfig");
|
||||
toLoad += std::to_string(val) + space + unit;
|
||||
space = space == "" ? " " : "";
|
||||
|
||||
@@ -1480,6 +1635,7 @@ r.ripple.com:51235
|
||||
run() override
|
||||
{
|
||||
testLegacy();
|
||||
testConfigFile();
|
||||
testDbPath();
|
||||
testValidatorKeys();
|
||||
testValidatorsFile();
|
||||
|
||||
@@ -18,14 +18,14 @@ namespace csf {
|
||||
- Comparison : T a, b; bool res = a < b
|
||||
- Addition: T a, b; T c = a + b;
|
||||
- Multiplication : T a, std::size_t b; T c = a * b;
|
||||
- Divison: T a; std::size_t b; T c = a/b;
|
||||
- Division: T a; std::size_t b; T c = a/b;
|
||||
|
||||
|
||||
*/
|
||||
template <class T, class Compare = std::less<T>>
|
||||
class Histogram
|
||||
{
|
||||
// TODO: Consider logarithimic bins around expected median if this becomes
|
||||
// TODO: Consider logarithmic bins around expected median if this becomes
|
||||
// unscaleable
|
||||
std::map<T, std::size_t, Compare> counts_;
|
||||
std::size_t samples = 0;
|
||||
|
||||
@@ -31,7 +31,7 @@ struct Rate
|
||||
/** Submits transactions to a specified peer
|
||||
|
||||
Submits successive transactions beginning at start, then spaced according
|
||||
to succesive calls of distribution(), until stop.
|
||||
to successive calls of distribution(), until stop.
|
||||
|
||||
@tparam Distribution is a `UniformRandomBitGenerator` from the STL that
|
||||
is used by random distributions to generate random samples
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
#include <test/json/TestOutputSuite.h>
|
||||
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/json/Object.h>
|
||||
|
||||
namespace Json {
|
||||
|
||||
class JsonObject_test : public xrpl::test::TestOutputSuite
|
||||
{
|
||||
void
|
||||
setup(std::string const& testName)
|
||||
{
|
||||
testcase(testName);
|
||||
output_.clear();
|
||||
}
|
||||
|
||||
std::unique_ptr<WriterObject> writerObject_;
|
||||
|
||||
Object&
|
||||
makeRoot()
|
||||
{
|
||||
writerObject_ =
|
||||
std::make_unique<WriterObject>(stringWriterObject(output_));
|
||||
return **writerObject_;
|
||||
}
|
||||
|
||||
void
|
||||
expectResult(std::string const& expected)
|
||||
{
|
||||
writerObject_.reset();
|
||||
TestOutputSuite::expectResult(expected);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
testTrivial()
|
||||
{
|
||||
setup("trivial");
|
||||
|
||||
{
|
||||
auto& root = makeRoot();
|
||||
(void)root;
|
||||
}
|
||||
expectResult("{}");
|
||||
}
|
||||
|
||||
void
|
||||
testSimple()
|
||||
{
|
||||
setup("simple");
|
||||
{
|
||||
auto& root = makeRoot();
|
||||
root["hello"] = "world";
|
||||
root["skidoo"] = 23;
|
||||
root["awake"] = false;
|
||||
root["temperature"] = 98.6;
|
||||
}
|
||||
|
||||
expectResult(
|
||||
"{\"hello\":\"world\","
|
||||
"\"skidoo\":23,"
|
||||
"\"awake\":false,"
|
||||
"\"temperature\":98.6}");
|
||||
}
|
||||
|
||||
void
|
||||
testOneSub()
|
||||
{
|
||||
setup("oneSub");
|
||||
{
|
||||
auto& root = makeRoot();
|
||||
root.setArray("ar");
|
||||
}
|
||||
expectResult("{\"ar\":[]}");
|
||||
}
|
||||
|
||||
void
|
||||
testSubs()
|
||||
{
|
||||
setup("subs");
|
||||
{
|
||||
auto& root = makeRoot();
|
||||
|
||||
{
|
||||
// Add an array with three entries.
|
||||
auto array = root.setArray("ar");
|
||||
array.append(23);
|
||||
array.append(false);
|
||||
array.append(23.5);
|
||||
}
|
||||
|
||||
{
|
||||
// Add an object with one entry.
|
||||
auto obj = root.setObject("obj");
|
||||
obj["hello"] = "world";
|
||||
}
|
||||
|
||||
{
|
||||
// Add another object with two entries.
|
||||
Json::Value value;
|
||||
value["h"] = "w";
|
||||
value["f"] = false;
|
||||
root["obj2"] = value;
|
||||
}
|
||||
}
|
||||
|
||||
// Json::Value has an unstable order...
|
||||
auto case1 =
|
||||
"{\"ar\":[23,false,23.5],"
|
||||
"\"obj\":{\"hello\":\"world\"},"
|
||||
"\"obj2\":{\"h\":\"w\",\"f\":false}}";
|
||||
auto case2 =
|
||||
"{\"ar\":[23,false,23.5],"
|
||||
"\"obj\":{\"hello\":\"world\"},"
|
||||
"\"obj2\":{\"f\":false,\"h\":\"w\"}}";
|
||||
writerObject_.reset();
|
||||
BEAST_EXPECT(output_ == case1 || output_ == case2);
|
||||
}
|
||||
|
||||
void
|
||||
testSubsShort()
|
||||
{
|
||||
setup("subsShort");
|
||||
|
||||
{
|
||||
auto& root = makeRoot();
|
||||
|
||||
{
|
||||
// Add an array with three entries.
|
||||
auto array = root.setArray("ar");
|
||||
array.append(23);
|
||||
array.append(false);
|
||||
array.append(23.5);
|
||||
}
|
||||
|
||||
// Add an object with one entry.
|
||||
root.setObject("obj")["hello"] = "world";
|
||||
|
||||
{
|
||||
// Add another object with two entries.
|
||||
auto object = root.setObject("obj2");
|
||||
object.set("h", "w");
|
||||
object.set("f", false);
|
||||
}
|
||||
}
|
||||
expectResult(
|
||||
"{\"ar\":[23,false,23.5],"
|
||||
"\"obj\":{\"hello\":\"world\"},"
|
||||
"\"obj2\":{\"h\":\"w\",\"f\":false}}");
|
||||
}
|
||||
|
||||
void
|
||||
testFailureObject()
|
||||
{
|
||||
{
|
||||
setup("object failure assign");
|
||||
auto& root = makeRoot();
|
||||
auto obj = root.setObject("o1");
|
||||
expectException([&]() { root["fail"] = "complete"; });
|
||||
}
|
||||
{
|
||||
setup("object failure object");
|
||||
auto& root = makeRoot();
|
||||
auto obj = root.setObject("o1");
|
||||
expectException([&]() { root.setObject("o2"); });
|
||||
}
|
||||
{
|
||||
setup("object failure Array");
|
||||
auto& root = makeRoot();
|
||||
auto obj = root.setArray("o1");
|
||||
expectException([&]() { root.setArray("o2"); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testFailureArray()
|
||||
{
|
||||
{
|
||||
setup("array failure append");
|
||||
auto& root = makeRoot();
|
||||
auto array = root.setArray("array");
|
||||
auto subarray = array.appendArray();
|
||||
auto fail = [&]() { array.append("fail"); };
|
||||
expectException(fail);
|
||||
}
|
||||
{
|
||||
setup("array failure appendArray");
|
||||
auto& root = makeRoot();
|
||||
auto array = root.setArray("array");
|
||||
auto subarray = array.appendArray();
|
||||
auto fail = [&]() { array.appendArray(); };
|
||||
expectException(fail);
|
||||
}
|
||||
{
|
||||
setup("array failure appendObject");
|
||||
auto& root = makeRoot();
|
||||
auto array = root.setArray("array");
|
||||
auto subarray = array.appendArray();
|
||||
auto fail = [&]() { array.appendObject(); };
|
||||
expectException(fail);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testKeyFailure()
|
||||
{
|
||||
setup("repeating keys");
|
||||
auto& root = makeRoot();
|
||||
root.set("foo", "bar");
|
||||
root.set("baz", 0);
|
||||
// setting key again throws in !NDEBUG builds
|
||||
auto set_again = [&]() { root.set("foo", "bar"); };
|
||||
#ifdef NDEBUG
|
||||
set_again();
|
||||
pass();
|
||||
#else
|
||||
expectException(set_again);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testTrivial();
|
||||
testSimple();
|
||||
|
||||
testOneSub();
|
||||
testSubs();
|
||||
testSubsShort();
|
||||
|
||||
testFailureObject();
|
||||
testFailureArray();
|
||||
testKeyFailure();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(JsonObject, json, xrpl);
|
||||
|
||||
} // namespace Json
|
||||
@@ -3,7 +3,6 @@
|
||||
#include <xrpld/rpc/RPCCall.h>
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/json/Object.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
@@ -83,7 +82,7 @@ cmdToJSONRPC(
|
||||
// If paramsObj is not empty, put it in a [params] array.
|
||||
if (paramsObj.begin() != paramsObj.end())
|
||||
{
|
||||
auto& paramsArray = Json::setArray(jv, jss::params);
|
||||
auto& paramsArray = jv[jss::params] = Json::arrayValue;
|
||||
paramsArray.append(paramsObj);
|
||||
}
|
||||
if (paramsObj.isMember(jss::jsonrpc))
|
||||
|
||||
@@ -30,6 +30,7 @@ enum class FieldType {
|
||||
CurrencyField,
|
||||
HashField,
|
||||
HashOrObjectField,
|
||||
IssueField,
|
||||
ObjectField,
|
||||
StringField,
|
||||
TwoAccountArrayField,
|
||||
@@ -40,6 +41,8 @@ enum class FieldType {
|
||||
std::vector<std::pair<Json::StaticString, FieldType>> mappings{
|
||||
{jss::account, FieldType::AccountField},
|
||||
{jss::accounts, FieldType::TwoAccountArrayField},
|
||||
{jss::asset, FieldType::IssueField},
|
||||
{jss::asset2, FieldType::IssueField},
|
||||
{jss::authorize, FieldType::AccountField},
|
||||
{jss::authorized, FieldType::AccountField},
|
||||
{jss::credential_type, FieldType::BlobField},
|
||||
@@ -74,24 +77,26 @@ getTypeName(FieldType typeID)
|
||||
{
|
||||
switch (typeID)
|
||||
{
|
||||
case FieldType::UInt32Field:
|
||||
return "number";
|
||||
case FieldType::UInt64Field:
|
||||
return "number";
|
||||
case FieldType::HashField:
|
||||
return "hex string";
|
||||
case FieldType::AccountField:
|
||||
return "AccountID";
|
||||
case FieldType::ArrayField:
|
||||
return "array";
|
||||
case FieldType::BlobField:
|
||||
return "hex string";
|
||||
case FieldType::CurrencyField:
|
||||
return "Currency";
|
||||
case FieldType::ArrayField:
|
||||
return "array";
|
||||
case FieldType::HashField:
|
||||
return "hex string";
|
||||
case FieldType::HashOrObjectField:
|
||||
return "hex string or object";
|
||||
case FieldType::IssueField:
|
||||
return "Issue";
|
||||
case FieldType::TwoAccountArrayField:
|
||||
return "length-2 array of Accounts";
|
||||
case FieldType::UInt32Field:
|
||||
return "number";
|
||||
case FieldType::UInt64Field:
|
||||
return "number";
|
||||
default:
|
||||
Throw<std::runtime_error>(
|
||||
"unknown type " + std::to_string(static_cast<uint8_t>(typeID)));
|
||||
@@ -192,34 +197,37 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
return values;
|
||||
};
|
||||
|
||||
static auto const& badUInt32Values = remove({2, 3});
|
||||
static auto const& badUInt64Values = remove({2, 3});
|
||||
static auto const& badHashValues = remove({2, 3, 7, 8, 16});
|
||||
static auto const& badAccountValues = remove({12});
|
||||
static auto const& badArrayValues = remove({17, 20});
|
||||
static auto const& badBlobValues = remove({3, 7, 8, 16});
|
||||
static auto const& badCurrencyValues = remove({14});
|
||||
static auto const& badArrayValues = remove({17, 20});
|
||||
static auto const& badHashValues = remove({2, 3, 7, 8, 16});
|
||||
static auto const& badIndexValues = remove({12, 16, 18, 19});
|
||||
static auto const& badUInt32Values = remove({2, 3});
|
||||
static auto const& badUInt64Values = remove({2, 3});
|
||||
static auto const& badIssueValues = remove({});
|
||||
|
||||
switch (fieldType)
|
||||
{
|
||||
case FieldType::UInt32Field:
|
||||
return badUInt32Values;
|
||||
case FieldType::UInt64Field:
|
||||
return badUInt64Values;
|
||||
case FieldType::HashField:
|
||||
return badHashValues;
|
||||
case FieldType::AccountField:
|
||||
return badAccountValues;
|
||||
case FieldType::ArrayField:
|
||||
case FieldType::TwoAccountArrayField:
|
||||
return badArrayValues;
|
||||
case FieldType::BlobField:
|
||||
return badBlobValues;
|
||||
case FieldType::CurrencyField:
|
||||
return badCurrencyValues;
|
||||
case FieldType::ArrayField:
|
||||
case FieldType::TwoAccountArrayField:
|
||||
return badArrayValues;
|
||||
case FieldType::HashField:
|
||||
return badHashValues;
|
||||
case FieldType::HashOrObjectField:
|
||||
return badIndexValues;
|
||||
case FieldType::IssueField:
|
||||
return badIssueValues;
|
||||
case FieldType::UInt32Field:
|
||||
return badUInt32Values;
|
||||
case FieldType::UInt64Field:
|
||||
return badUInt64Values;
|
||||
default:
|
||||
Throw<std::runtime_error>(
|
||||
"unknown type " +
|
||||
@@ -236,30 +244,37 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
arr[1u] = "r4MrUGTdB57duTnRs6KbsRGQXgkseGb1b5";
|
||||
return arr;
|
||||
}();
|
||||
static Json::Value const issueObject = []() {
|
||||
Json::Value arr(Json::objectValue);
|
||||
arr[jss::currency] = "XRP";
|
||||
return arr;
|
||||
}();
|
||||
|
||||
auto const typeID = getFieldType(fieldName);
|
||||
switch (typeID)
|
||||
{
|
||||
case FieldType::UInt32Field:
|
||||
return 1;
|
||||
case FieldType::UInt64Field:
|
||||
return 1;
|
||||
case FieldType::HashField:
|
||||
return "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6"
|
||||
"B01403D6D";
|
||||
case FieldType::AccountField:
|
||||
return "r4MrUGTdB57duTnRs6KbsRGQXgkseGb1b5";
|
||||
case FieldType::ArrayField:
|
||||
return Json::arrayValue;
|
||||
case FieldType::BlobField:
|
||||
return "ABCDEF";
|
||||
case FieldType::CurrencyField:
|
||||
return "USD";
|
||||
case FieldType::ArrayField:
|
||||
return Json::arrayValue;
|
||||
case FieldType::HashField:
|
||||
return "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6"
|
||||
"B01403D6D";
|
||||
case FieldType::IssueField:
|
||||
return issueObject;
|
||||
case FieldType::HashOrObjectField:
|
||||
return "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6"
|
||||
"B01403D6D";
|
||||
case FieldType::TwoAccountArrayField:
|
||||
return twoAccountArray;
|
||||
case FieldType::UInt32Field:
|
||||
return 1;
|
||||
case FieldType::UInt64Field:
|
||||
return 1;
|
||||
default:
|
||||
Throw<std::runtime_error>(
|
||||
"unknown type " +
|
||||
@@ -444,7 +459,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryInvalid()
|
||||
testInvalid()
|
||||
{
|
||||
testcase("Invalid requests");
|
||||
using namespace test::jtx;
|
||||
@@ -526,7 +541,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryAccountRoot()
|
||||
testAccountRoot()
|
||||
{
|
||||
testcase("AccountRoot");
|
||||
using namespace test::jtx;
|
||||
@@ -632,7 +647,147 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryCheck()
|
||||
testAmendments()
|
||||
{
|
||||
testcase("Amendments");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
{
|
||||
Keylet const keylet = keylet::amendments();
|
||||
|
||||
// easier to hack an object into the ledger than generate it
|
||||
// legitimately
|
||||
{
|
||||
auto const amendments = [&](OpenView& view,
|
||||
beast::Journal) -> bool {
|
||||
auto const sle = std::make_shared<SLE>(keylet);
|
||||
|
||||
// Create Amendments vector (enabled amendments)
|
||||
std::vector<uint256> enabledAmendments;
|
||||
enabledAmendments.push_back(
|
||||
uint256::fromVoid("42426C4D4F1009EE67080A9B7965B44656D7"
|
||||
"714D104A72F9B4369F97ABF044EE"));
|
||||
enabledAmendments.push_back(
|
||||
uint256::fromVoid("4C97EBA926031A7CF7D7B36FDE3ED66DDA54"
|
||||
"21192D63DE53FFB46E43B9DC8373"));
|
||||
enabledAmendments.push_back(
|
||||
uint256::fromVoid("03BDC0099C4E14163ADA272C1B6F6FABB448"
|
||||
"CC3E51F522F978041E4B57D9158C"));
|
||||
enabledAmendments.push_back(
|
||||
uint256::fromVoid("35291ADD2D79EB6991343BDA0912269C817D"
|
||||
"0F094B02226C1C14AD2858962ED4"));
|
||||
sle->setFieldV256(
|
||||
sfAmendments, STVector256(enabledAmendments));
|
||||
|
||||
// Create Majorities array
|
||||
STArray majorities;
|
||||
|
||||
auto majority1 = STObject::makeInnerObject(sfMajority);
|
||||
majority1.setFieldH256(
|
||||
sfAmendment,
|
||||
uint256::fromVoid("7BB62DC13EC72B775091E9C71BF8CF97E122"
|
||||
"647693B50C5E87A80DFD6FCFAC50"));
|
||||
majority1.setFieldU32(sfCloseTime, 779561310);
|
||||
majorities.push_back(std::move(majority1));
|
||||
|
||||
auto majority2 = STObject::makeInnerObject(sfMajority);
|
||||
majority2.setFieldH256(
|
||||
sfAmendment,
|
||||
uint256::fromVoid("755C971C29971C9F20C6F080F2ED96F87884"
|
||||
"E40AD19554A5EBECDCEC8A1F77FE"));
|
||||
majority2.setFieldU32(sfCloseTime, 779561310);
|
||||
majorities.push_back(std::move(majority2));
|
||||
|
||||
sle->setFieldArray(sfMajorities, majorities);
|
||||
|
||||
view.rawInsert(sle);
|
||||
return true;
|
||||
};
|
||||
env.app().openLedger().modify(amendments);
|
||||
}
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::amendments] = to_string(keylet.key);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Amendments);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::amendments);
|
||||
}
|
||||
|
||||
void
|
||||
testAMM()
|
||||
{
|
||||
testcase("AMM");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
Account const alice{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
AMM amm(env, alice, XRP(10), alice["USD"](1000));
|
||||
env.close();
|
||||
|
||||
{
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::amm] = to_string(amm.ammID());
|
||||
auto const result =
|
||||
env.rpc("json", "ledger_entry", to_string(jvParams));
|
||||
BEAST_EXPECT(
|
||||
result.isObject() && result.isMember(jss::result) &&
|
||||
!result[jss::result].isMember(jss::error) &&
|
||||
result[jss::result].isMember(jss::node) &&
|
||||
result[jss::result][jss::node].isMember(
|
||||
sfLedgerEntryType.jsonName) &&
|
||||
result[jss::result][jss::node][sfLedgerEntryType.jsonName] ==
|
||||
jss::AMM);
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value jvParams;
|
||||
Json::Value ammParams(Json::objectValue);
|
||||
{
|
||||
Json::Value obj(Json::objectValue);
|
||||
obj[jss::currency] = "XRP";
|
||||
ammParams[jss::asset] = obj;
|
||||
}
|
||||
{
|
||||
Json::Value obj(Json::objectValue);
|
||||
obj[jss::currency] = "USD";
|
||||
obj[jss::issuer] = alice.human();
|
||||
ammParams[jss::asset2] = obj;
|
||||
}
|
||||
jvParams[jss::amm] = ammParams;
|
||||
auto const result =
|
||||
env.rpc("json", "ledger_entry", to_string(jvParams));
|
||||
BEAST_EXPECT(
|
||||
result.isObject() && result.isMember(jss::result) &&
|
||||
!result[jss::result].isMember(jss::error) &&
|
||||
result[jss::result].isMember(jss::node) &&
|
||||
result[jss::result][jss::node].isMember(
|
||||
sfLedgerEntryType.jsonName) &&
|
||||
result[jss::result][jss::node][sfLedgerEntryType.jsonName] ==
|
||||
jss::AMM);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(
|
||||
env,
|
||||
jss::amm,
|
||||
{
|
||||
{jss::asset, "malformedRequest"},
|
||||
{jss::asset2, "malformedRequest"},
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
testCheck()
|
||||
{
|
||||
testcase("Check");
|
||||
using namespace test::jtx;
|
||||
@@ -684,7 +839,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryCredentials()
|
||||
testCredentials()
|
||||
{
|
||||
testcase("Credentials");
|
||||
|
||||
@@ -752,7 +907,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryDelegate()
|
||||
testDelegate()
|
||||
{
|
||||
testcase("Delegate");
|
||||
|
||||
@@ -807,7 +962,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryDepositPreauth()
|
||||
testDepositPreauth()
|
||||
{
|
||||
testcase("Deposit Preauth");
|
||||
|
||||
@@ -868,7 +1023,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryDepositPreauthCred()
|
||||
testDepositPreauthCred()
|
||||
{
|
||||
testcase("Deposit Preauth with credentials");
|
||||
|
||||
@@ -1149,7 +1304,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryDirectory()
|
||||
testDirectory()
|
||||
{
|
||||
testcase("Directory");
|
||||
using namespace test::jtx;
|
||||
@@ -1303,7 +1458,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryEscrow()
|
||||
testEscrow()
|
||||
{
|
||||
testcase("Escrow");
|
||||
using namespace test::jtx;
|
||||
@@ -1365,7 +1520,177 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryOffer()
|
||||
testFeeSettings()
|
||||
{
|
||||
testcase("Fee Settings");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
{
|
||||
Keylet const keylet = keylet::fees();
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::fee] = to_string(keylet.key);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] == jss::FeeSettings);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::fee);
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerHashes()
|
||||
{
|
||||
testcase("Ledger Hashes");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
{
|
||||
Keylet const keylet = keylet::skip();
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::hashes] = to_string(keylet.key);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] ==
|
||||
jss::LedgerHashes);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::hashes);
|
||||
}
|
||||
|
||||
void
|
||||
testNFTokenOffer()
|
||||
{
|
||||
testcase("NFT Offer");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
Account const issuer{"issuer"};
|
||||
Account const buyer{"buyer"};
|
||||
env.fund(XRP(1000), issuer, buyer);
|
||||
|
||||
uint256 const nftokenID0 =
|
||||
token::getNextID(env, issuer, 0, tfTransferable);
|
||||
env(token::mint(issuer, 0), txflags(tfTransferable));
|
||||
env.close();
|
||||
uint256 const offerID = keylet::nftoffer(issuer, env.seq(issuer)).key;
|
||||
env(token::createOffer(issuer, nftokenID0, drops(1)),
|
||||
token::destination(buyer),
|
||||
txflags(tfSellNFToken));
|
||||
|
||||
{
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::nft_offer] = to_string(offerID);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] ==
|
||||
jss::NFTokenOffer);
|
||||
BEAST_EXPECT(jrr[jss::node][sfOwner.jsonName] == issuer.human());
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfNFTokenID.jsonName] == to_string(nftokenID0));
|
||||
BEAST_EXPECT(jrr[jss::node][sfAmount.jsonName] == "1");
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::nft_offer);
|
||||
}
|
||||
|
||||
void
|
||||
testNFTokenPage()
|
||||
{
|
||||
testcase("NFT Page");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
Account const issuer{"issuer"};
|
||||
env.fund(XRP(1000), issuer);
|
||||
|
||||
env(token::mint(issuer, 0), txflags(tfTransferable));
|
||||
env.close();
|
||||
|
||||
auto const nftpage = keylet::nftpage_max(issuer);
|
||||
BEAST_EXPECT(env.le(nftpage) != nullptr);
|
||||
|
||||
{
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::nft_page] = to_string(nftpage.key);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] == jss::NFTokenPage);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::nft_page);
|
||||
}
|
||||
|
||||
void
|
||||
testNegativeUNL()
|
||||
{
|
||||
testcase("Negative UNL");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
|
||||
// positive test
|
||||
{
|
||||
Keylet const keylet = keylet::negativeUNL();
|
||||
|
||||
// easier to hack an object into the ledger than generate it
|
||||
// legitimately
|
||||
{
|
||||
auto const nUNL = [&](OpenView& view, beast::Journal) -> bool {
|
||||
auto const sle = std::make_shared<SLE>(keylet);
|
||||
|
||||
// Create DisabledValidators array
|
||||
STArray disabledValidators;
|
||||
auto disabledValidator =
|
||||
STObject::makeInnerObject(sfDisabledValidator);
|
||||
auto pubKeyBlob = strUnHex(
|
||||
"ED58F6770DB5DD77E59D28CB650EC3816E2FC95021BB56E720C9A1"
|
||||
"2DA79C58A3AB");
|
||||
disabledValidator.setFieldVL(sfPublicKey, *pubKeyBlob);
|
||||
disabledValidator.setFieldU32(
|
||||
sfFirstLedgerSequence, 91371264);
|
||||
disabledValidators.push_back(std::move(disabledValidator));
|
||||
|
||||
sle->setFieldArray(
|
||||
sfDisabledValidators, disabledValidators);
|
||||
sle->setFieldH256(
|
||||
sfPreviousTxnID,
|
||||
uint256::fromVoid("8D47FFE664BE6C335108DF689537625855A6"
|
||||
"A95160CC6D351341B9"
|
||||
"2624D9C5E3"));
|
||||
sle->setFieldU32(sfPreviousTxnLgrSeq, 91442944);
|
||||
|
||||
view.rawInsert(sle);
|
||||
return true;
|
||||
};
|
||||
env.app().openLedger().modify(nUNL);
|
||||
}
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::nunl] = to_string(keylet.key);
|
||||
Json::Value const jrr = env.rpc(
|
||||
"json", "ledger_entry", to_string(jvParams))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::node][sfLedgerEntryType.jsonName] == jss::NegativeUNL);
|
||||
}
|
||||
|
||||
// negative tests
|
||||
runLedgerEntryTest(env, jss::nunl);
|
||||
}
|
||||
|
||||
void
|
||||
testOffer()
|
||||
{
|
||||
testcase("Offer");
|
||||
using namespace test::jtx;
|
||||
@@ -1413,7 +1738,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryPayChan()
|
||||
testPayChan()
|
||||
{
|
||||
testcase("Pay Chan");
|
||||
using namespace test::jtx;
|
||||
@@ -1475,7 +1800,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryRippleState()
|
||||
testRippleState()
|
||||
{
|
||||
testcase("RippleState");
|
||||
using namespace test::jtx;
|
||||
@@ -1626,7 +1951,16 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryTicket()
|
||||
testSignerList()
|
||||
{
|
||||
testcase("Signer List");
|
||||
using namespace test::jtx;
|
||||
Env env{*this};
|
||||
runLedgerEntryTest(env, jss::signer_list);
|
||||
}
|
||||
|
||||
void
|
||||
testTicket()
|
||||
{
|
||||
testcase("Ticket");
|
||||
using namespace test::jtx;
|
||||
@@ -1711,7 +2045,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryDID()
|
||||
testDID()
|
||||
{
|
||||
testcase("DID");
|
||||
using namespace test::jtx;
|
||||
@@ -1848,7 +2182,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryMPT()
|
||||
testMPT()
|
||||
{
|
||||
testcase("MPT");
|
||||
using namespace test::jtx;
|
||||
@@ -1931,7 +2265,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryPermissionedDomain()
|
||||
testPermissionedDomain()
|
||||
{
|
||||
testcase("PermissionedDomain");
|
||||
|
||||
@@ -2010,7 +2344,7 @@ class LedgerEntry_test : public beast::unit_test::suite
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryCLI()
|
||||
testCLI()
|
||||
{
|
||||
testcase("command-line");
|
||||
using namespace test::jtx;
|
||||
@@ -2040,25 +2374,33 @@ public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testLedgerEntryInvalid();
|
||||
testLedgerEntryAccountRoot();
|
||||
testLedgerEntryCheck();
|
||||
testLedgerEntryCredentials();
|
||||
testLedgerEntryDelegate();
|
||||
testLedgerEntryDepositPreauth();
|
||||
testLedgerEntryDepositPreauthCred();
|
||||
testLedgerEntryDirectory();
|
||||
testLedgerEntryEscrow();
|
||||
testLedgerEntryOffer();
|
||||
testLedgerEntryPayChan();
|
||||
testLedgerEntryRippleState();
|
||||
testLedgerEntryTicket();
|
||||
testLedgerEntryDID();
|
||||
testInvalid();
|
||||
testAccountRoot();
|
||||
testAmendments();
|
||||
testAMM();
|
||||
testCheck();
|
||||
testCredentials();
|
||||
testDelegate();
|
||||
testDepositPreauth();
|
||||
testDepositPreauthCred();
|
||||
testDirectory();
|
||||
testEscrow();
|
||||
testFeeSettings();
|
||||
testLedgerHashes();
|
||||
testNFTokenOffer();
|
||||
testNFTokenPage();
|
||||
testNegativeUNL();
|
||||
testOffer();
|
||||
testPayChan();
|
||||
testRippleState();
|
||||
testSignerList();
|
||||
testTicket();
|
||||
testDID();
|
||||
testInvalidOracleLedgerEntry();
|
||||
testOracleLedgerEntry();
|
||||
testLedgerEntryMPT();
|
||||
testLedgerEntryPermissionedDomain();
|
||||
testLedgerEntryCLI();
|
||||
testMPT();
|
||||
testPermissionedDomain();
|
||||
testCLI();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2086,7 +2428,7 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite,
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryBridge()
|
||||
testBridge()
|
||||
{
|
||||
testcase("ledger_entry: bridge");
|
||||
using namespace test::jtx;
|
||||
@@ -2177,7 +2519,7 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite,
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryClaimID()
|
||||
testClaimID()
|
||||
{
|
||||
testcase("ledger_entry: xchain_claim_id");
|
||||
using namespace test::jtx;
|
||||
@@ -2235,7 +2577,7 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite,
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerEntryCreateAccountClaimID()
|
||||
testCreateAccountClaimID()
|
||||
{
|
||||
testcase("ledger_entry: xchain_create_account_claim_id");
|
||||
using namespace test::jtx;
|
||||
@@ -2362,9 +2704,9 @@ public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testLedgerEntryBridge();
|
||||
testLedgerEntryClaimID();
|
||||
testLedgerEntryCreateAccountClaimID();
|
||||
testBridge();
|
||||
testClaimID();
|
||||
testCreateAccountClaimID();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include <xrpld/rpc/Context.h>
|
||||
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/json/Object.h>
|
||||
#include <xrpl/protocol/serialize.h>
|
||||
|
||||
namespace xrpl {
|
||||
@@ -42,10 +41,9 @@ struct LedgerFill
|
||||
std::optional<NetClock::time_point> closeTime;
|
||||
};
|
||||
|
||||
/** Given a Ledger and options, fill a Json::Object or Json::Value with a
|
||||
/** Given a Ledger and options, fill a Json::Value with a
|
||||
description of the ledger.
|
||||
*/
|
||||
|
||||
void
|
||||
addJson(Json::Value&, LedgerFill const&);
|
||||
|
||||
@@ -53,6 +51,10 @@ addJson(Json::Value&, LedgerFill const&);
|
||||
Json::Value
|
||||
getJson(LedgerFill const&);
|
||||
|
||||
/** Copy all the keys and values from one object into another. */
|
||||
void
|
||||
copyFrom(Json::Value& to, Json::Value const& from);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
#endif
|
||||
|
||||
@@ -32,10 +32,9 @@ isBinary(LedgerFill const& fill)
|
||||
return fill.options & LedgerFill::binary;
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJson(
|
||||
Object& json,
|
||||
Json::Value& json,
|
||||
bool closed,
|
||||
LedgerHeader const& info,
|
||||
bool bFull,
|
||||
@@ -78,9 +77,8 @@ fillJson(
|
||||
}
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJsonBinary(Object& json, bool closed, LedgerHeader const& info)
|
||||
fillJsonBinary(Json::Value& json, bool closed, LedgerHeader const& info)
|
||||
{
|
||||
if (!closed)
|
||||
json[jss::closed] = false;
|
||||
@@ -207,11 +205,10 @@ fillJsonTx(
|
||||
return txJson;
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJsonTx(Object& json, LedgerFill const& fill)
|
||||
fillJsonTx(Json::Value& json, LedgerFill const& fill)
|
||||
{
|
||||
auto&& txns = setArray(json, jss::transactions);
|
||||
auto& txns = json[jss::transactions] = Json::arrayValue;
|
||||
auto bBinary = isBinary(fill);
|
||||
auto bExpanded = isExpanded(fill);
|
||||
|
||||
@@ -238,12 +235,11 @@ fillJsonTx(Object& json, LedgerFill const& fill)
|
||||
}
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJsonState(Object& json, LedgerFill const& fill)
|
||||
fillJsonState(Json::Value& json, LedgerFill const& fill)
|
||||
{
|
||||
auto& ledger = fill.ledger;
|
||||
auto&& array = Json::setArray(json, jss::accountState);
|
||||
auto& array = json[jss::accountState] = Json::arrayValue;
|
||||
auto expanded = isExpanded(fill);
|
||||
auto binary = isBinary(fill);
|
||||
|
||||
@@ -251,7 +247,7 @@ fillJsonState(Object& json, LedgerFill const& fill)
|
||||
{
|
||||
if (binary)
|
||||
{
|
||||
auto&& obj = appendObject(array);
|
||||
auto& obj = array.append(Json::objectValue);
|
||||
obj[jss::hash] = to_string(sle->key());
|
||||
obj[jss::tx_blob] = serializeHex(*sle);
|
||||
}
|
||||
@@ -262,17 +258,16 @@ fillJsonState(Object& json, LedgerFill const& fill)
|
||||
}
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJsonQueue(Object& json, LedgerFill const& fill)
|
||||
fillJsonQueue(Json::Value& json, LedgerFill const& fill)
|
||||
{
|
||||
auto&& queueData = Json::setArray(json, jss::queue_data);
|
||||
auto& queueData = json[jss::queue_data] = Json::arrayValue;
|
||||
auto bBinary = isBinary(fill);
|
||||
auto bExpanded = isExpanded(fill);
|
||||
|
||||
for (auto const& tx : fill.txQueue)
|
||||
{
|
||||
auto&& txJson = appendObject(queueData);
|
||||
auto& txJson = queueData.append(Json::objectValue);
|
||||
txJson[jss::fee_level] = to_string(tx.feeLevel);
|
||||
if (tx.lastValid)
|
||||
txJson[jss::LastLedgerSequence] = *tx.lastValid;
|
||||
@@ -297,9 +292,8 @@ fillJsonQueue(Object& json, LedgerFill const& fill)
|
||||
}
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
fillJson(Object& json, LedgerFill const& fill)
|
||||
fillJson(Json::Value& json, LedgerFill const& fill)
|
||||
{
|
||||
// TODO: what happens if bBinary and bExtracted are both set?
|
||||
// Is there a way to report this back?
|
||||
@@ -327,7 +321,7 @@ fillJson(Object& json, LedgerFill const& fill)
|
||||
void
|
||||
addJson(Json::Value& json, LedgerFill const& fill)
|
||||
{
|
||||
auto&& object = Json::addObject(json, jss::ledger);
|
||||
auto& object = json[jss::ledger] = Json::objectValue;
|
||||
fillJson(object, fill);
|
||||
|
||||
if ((fill.options & LedgerFill::dumpQueue) && !fill.txQueue.empty())
|
||||
@@ -342,4 +336,20 @@ getJson(LedgerFill const& fill)
|
||||
return json;
|
||||
}
|
||||
|
||||
void
|
||||
copyFrom(Json::Value& to, Json::Value const& from)
|
||||
{
|
||||
if (!to) // Short circuit this very common case.
|
||||
to = from;
|
||||
else
|
||||
{
|
||||
// TODO: figure out if there is a way to remove this clause
|
||||
// or check that it does/needs to do a deep copy
|
||||
XRPL_ASSERT(from.isObjectOrNull(), "copyFrom : invalid input type");
|
||||
auto const members = from.getMemberNames();
|
||||
for (auto const& m : members)
|
||||
to[m] = from[m];
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -87,7 +87,7 @@ private:
|
||||
/// If the node is out of sync during an online_delete healthWait()
|
||||
/// call, sleep the thread for this time, and continue checking until
|
||||
/// recovery.
|
||||
/// See also: "recovery_wait_seconds" in rippled-example.cfg
|
||||
/// See also: "recovery_wait_seconds" in xrpld-example.cfg
|
||||
std::chrono::seconds recoveryWaitTime_{5};
|
||||
|
||||
// these do not exist upon SHAMapStore creation, but do exist
|
||||
|
||||
@@ -68,6 +68,7 @@ class Config : public BasicConfig
|
||||
public:
|
||||
// Settings related to the configuration file location and directories
|
||||
static char const* const configFileName;
|
||||
static char const* const configLegacyName;
|
||||
static char const* const databaseDirName;
|
||||
static char const* const validatorsFileName;
|
||||
|
||||
|
||||
@@ -221,11 +221,12 @@ getSingleSection(
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Config (DEPRECATED)
|
||||
// Config
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
char const* const Config::configFileName = "rippled.cfg";
|
||||
char const* const Config::configFileName = "xrpld.cfg";
|
||||
char const* const Config::configLegacyName = "rippled.cfg";
|
||||
char const* const Config::databaseDirName = "db";
|
||||
char const* const Config::validatorsFileName = "validators.txt";
|
||||
|
||||
@@ -295,76 +296,78 @@ Config::setup(
|
||||
bool bSilent,
|
||||
bool bStandalone)
|
||||
{
|
||||
boost::filesystem::path dataDir;
|
||||
std::string strDbPath, strConfFile;
|
||||
setupControl(bQuiet, bSilent, bStandalone);
|
||||
|
||||
// Determine the config and data directories.
|
||||
// If the config file is found in the current working
|
||||
// directory, use the current working directory as the
|
||||
// config directory and that with "db" as the data
|
||||
// directory.
|
||||
|
||||
setupControl(bQuiet, bSilent, bStandalone);
|
||||
|
||||
strDbPath = databaseDirName;
|
||||
|
||||
if (!strConf.empty())
|
||||
strConfFile = strConf;
|
||||
else
|
||||
strConfFile = configFileName;
|
||||
boost::filesystem::path dataDir;
|
||||
|
||||
if (!strConf.empty())
|
||||
{
|
||||
// --conf=<path> : everything is relative that file.
|
||||
CONFIG_FILE = strConfFile;
|
||||
CONFIG_FILE = strConf;
|
||||
CONFIG_DIR = boost::filesystem::absolute(CONFIG_FILE);
|
||||
CONFIG_DIR.remove_filename();
|
||||
dataDir = CONFIG_DIR / strDbPath;
|
||||
dataDir = CONFIG_DIR / databaseDirName;
|
||||
}
|
||||
else
|
||||
{
|
||||
CONFIG_DIR = boost::filesystem::current_path();
|
||||
CONFIG_FILE = CONFIG_DIR / strConfFile;
|
||||
dataDir = CONFIG_DIR / strDbPath;
|
||||
|
||||
// Construct XDG config and data home.
|
||||
// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
auto const strHome = getEnvVar("HOME");
|
||||
auto strXdgConfigHome = getEnvVar("XDG_CONFIG_HOME");
|
||||
auto strXdgDataHome = getEnvVar("XDG_DATA_HOME");
|
||||
|
||||
if (boost::filesystem::exists(CONFIG_FILE)
|
||||
// Can we figure out XDG dirs?
|
||||
|| (strHome.empty() &&
|
||||
(strXdgConfigHome.empty() || strXdgDataHome.empty())))
|
||||
do
|
||||
{
|
||||
// Current working directory is fine, put dbs in a subdir.
|
||||
}
|
||||
else
|
||||
{
|
||||
if (strXdgConfigHome.empty())
|
||||
// Check if either of the config files exist in the current working
|
||||
// directory, in which case the databases will be stored in a
|
||||
// subdirectory.
|
||||
CONFIG_DIR = boost::filesystem::current_path();
|
||||
dataDir = CONFIG_DIR / databaseDirName;
|
||||
CONFIG_FILE = CONFIG_DIR / configFileName;
|
||||
if (boost::filesystem::exists(CONFIG_FILE))
|
||||
break;
|
||||
CONFIG_FILE = CONFIG_DIR / configLegacyName;
|
||||
if (boost::filesystem::exists(CONFIG_FILE))
|
||||
break;
|
||||
|
||||
// Check if the home directory is set, and optionally the XDG config
|
||||
// and/or data directories, as the config may be there. See
|
||||
// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html.
|
||||
auto const strHome = getEnvVar("HOME");
|
||||
if (!strHome.empty())
|
||||
{
|
||||
// $XDG_CONFIG_HOME was not set, use default based on $HOME.
|
||||
strXdgConfigHome = strHome + "/.config";
|
||||
auto strXdgConfigHome = getEnvVar("XDG_CONFIG_HOME");
|
||||
auto strXdgDataHome = getEnvVar("XDG_DATA_HOME");
|
||||
if (strXdgConfigHome.empty())
|
||||
{
|
||||
// $XDG_CONFIG_HOME was not set, use default based on $HOME.
|
||||
strXdgConfigHome = strHome + "/.config";
|
||||
}
|
||||
if (strXdgDataHome.empty())
|
||||
{
|
||||
// $XDG_DATA_HOME was not set, use default based on $HOME.
|
||||
strXdgDataHome = strHome + "/.local/share";
|
||||
}
|
||||
|
||||
// Check if either of the config files exist in the XDG config
|
||||
// dir.
|
||||
dataDir = strXdgDataHome + "/" + systemName();
|
||||
CONFIG_DIR = strXdgConfigHome + "/" + systemName();
|
||||
CONFIG_FILE = CONFIG_DIR / configFileName;
|
||||
if (boost::filesystem::exists(CONFIG_FILE))
|
||||
break;
|
||||
CONFIG_FILE = CONFIG_DIR / configLegacyName;
|
||||
if (boost::filesystem::exists(CONFIG_FILE))
|
||||
break;
|
||||
}
|
||||
|
||||
if (strXdgDataHome.empty())
|
||||
{
|
||||
// $XDG_DATA_HOME was not set, use default based on $HOME.
|
||||
strXdgDataHome = strHome + "/.local/share";
|
||||
}
|
||||
|
||||
CONFIG_DIR = strXdgConfigHome + "/" + systemName();
|
||||
CONFIG_FILE = CONFIG_DIR / strConfFile;
|
||||
dataDir = strXdgDataHome + "/" + systemName();
|
||||
|
||||
if (!boost::filesystem::exists(CONFIG_FILE))
|
||||
{
|
||||
CONFIG_DIR = "/etc/opt/" + systemName();
|
||||
CONFIG_FILE = CONFIG_DIR / strConfFile;
|
||||
dataDir = "/var/opt/" + systemName();
|
||||
}
|
||||
}
|
||||
// As a last resort, check the system config directory.
|
||||
dataDir = "/var/opt/" + systemName();
|
||||
CONFIG_DIR = "/etc/opt/" + systemName();
|
||||
CONFIG_FILE = CONFIG_DIR / configFileName;
|
||||
if (boost::filesystem::exists(CONFIG_FILE))
|
||||
break;
|
||||
CONFIG_FILE = CONFIG_DIR / configLegacyName;
|
||||
} while (false);
|
||||
}
|
||||
|
||||
// Update default values
|
||||
@@ -374,11 +377,9 @@ Config::setup(
|
||||
std::string const dbPath(legacy("database_path"));
|
||||
if (!dbPath.empty())
|
||||
dataDir = boost::filesystem::path(dbPath);
|
||||
else if (RUN_STANDALONE)
|
||||
dataDir.clear();
|
||||
}
|
||||
|
||||
if (!dataDir.empty())
|
||||
if (!RUN_STANDALONE)
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
boost::filesystem::create_directories(dataDir, ec);
|
||||
|
||||
@@ -373,7 +373,7 @@ command. The key is in the `pubkey_node` value, and is a text string
|
||||
beginning with the letter `n`. The key is maintained across runs in a
|
||||
database.
|
||||
|
||||
Cluster members are configured in the `rippled.cfg` file under
|
||||
Cluster members are configured in the `xrpld.cfg` file under
|
||||
`[cluster_nodes]`. Each member should be configured on a line beginning
|
||||
with the node public key, followed optionally by a space and a friendly
|
||||
name.
|
||||
|
||||
@@ -514,7 +514,7 @@ OverlayImpl::start()
|
||||
m_peerFinder->addFallbackStrings(base + name, ips);
|
||||
});
|
||||
|
||||
// Add the ips_fixed from the rippled.cfg file
|
||||
// Add the ips_fixed from the xrpld.cfg file
|
||||
if (!app_.config().standalone() && !app_.config().IPS_FIXED.empty())
|
||||
{
|
||||
m_resolver.resolve(
|
||||
|
||||
@@ -94,9 +94,8 @@ public:
|
||||
|
||||
/** Apply the Status to a JsonObject
|
||||
*/
|
||||
template <class Object>
|
||||
void
|
||||
inject(Object& object) const
|
||||
inject(Json::Value& object) const
|
||||
{
|
||||
if (auto ec = toErrorCode())
|
||||
{
|
||||
|
||||
@@ -277,7 +277,7 @@ checkPayment(
|
||||
app);
|
||||
if (pf.findPaths(app.config().PATH_SEARCH_OLD))
|
||||
{
|
||||
// 4 is the maxium paths
|
||||
// 4 is the maximum paths
|
||||
pf.computePathRanks(4);
|
||||
STPath fullLiquidityPath;
|
||||
STPathSet paths;
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
#include <xrpl/json/Object.h>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
Json::Value
|
||||
|
||||
@@ -71,16 +71,17 @@ parseAMM(Json::Value const& params, Json::StaticString const fieldName)
|
||||
return Unexpected(value.error());
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
auto const issue = issueFromJson(params[jss::asset]);
|
||||
auto const issue2 = issueFromJson(params[jss::asset2]);
|
||||
return keylet::amm(issue, issue2).key;
|
||||
}
|
||||
catch (std::runtime_error const&)
|
||||
{
|
||||
return LedgerEntryHelpers::malformedError("malformedRequest", "");
|
||||
}
|
||||
auto const asset = LedgerEntryHelpers::requiredIssue(
|
||||
params, jss::asset, "malformedRequest");
|
||||
if (!asset)
|
||||
return Unexpected(asset.error());
|
||||
|
||||
auto const asset2 = LedgerEntryHelpers::requiredIssue(
|
||||
params, jss::asset2, "malformedRequest");
|
||||
if (!asset2)
|
||||
return Unexpected(asset2.error());
|
||||
|
||||
return keylet::amm(*asset, *asset2).key;
|
||||
}
|
||||
|
||||
static Expected<uint256, Json::Value>
|
||||
@@ -424,7 +425,7 @@ parseLoan(Json::Value const& params, Json::StaticString const fieldName)
|
||||
}
|
||||
|
||||
auto const id = LedgerEntryHelpers::requiredUInt256(
|
||||
params, jss::loan_broker_id, "malformedOwner");
|
||||
params, jss::loan_broker_id, "malformedLoanBrokerID");
|
||||
if (!id)
|
||||
return Unexpected(id.error());
|
||||
auto const seq = LedgerEntryHelpers::requiredUInt32(
|
||||
|
||||
@@ -218,6 +218,29 @@ requiredUInt192(
|
||||
return required<uint192>(params, fieldName, err, "Hash192");
|
||||
}
|
||||
|
||||
template <>
|
||||
std::optional<Issue>
|
||||
parse(Json::Value const& param)
|
||||
{
|
||||
try
|
||||
{
|
||||
return issueFromJson(param);
|
||||
}
|
||||
catch (std::runtime_error const&)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
Expected<Issue, Json::Value>
|
||||
requiredIssue(
|
||||
Json::Value const& params,
|
||||
Json::StaticString const fieldName,
|
||||
std::string const& err)
|
||||
{
|
||||
return required<Issue>(params, fieldName, err, "Issue");
|
||||
}
|
||||
|
||||
Expected<STXChainBridge, Json::Value>
|
||||
parseBridgeFields(Json::Value const& params)
|
||||
{
|
||||
|
||||
@@ -75,6 +75,43 @@ LedgerHandler::check()
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
void
|
||||
LedgerHandler::writeResult(Json::Value& value)
|
||||
{
|
||||
if (ledger_)
|
||||
{
|
||||
copyFrom(value, result_);
|
||||
addJson(value, {*ledger_, &context_, options_, queueTxs_});
|
||||
}
|
||||
else
|
||||
{
|
||||
auto& master = context_.app.getLedgerMaster();
|
||||
{
|
||||
auto& closed = value[jss::closed] = Json::objectValue;
|
||||
addJson(closed, {*master.getClosedLedger(), &context_, 0});
|
||||
}
|
||||
{
|
||||
auto& open = value[jss::open] = Json::objectValue;
|
||||
addJson(open, {*master.getCurrentLedger(), &context_, 0});
|
||||
}
|
||||
}
|
||||
|
||||
Json::Value warnings{Json::arrayValue};
|
||||
if (context_.params.isMember(jss::type))
|
||||
{
|
||||
Json::Value& w = warnings.append(Json::objectValue);
|
||||
w[jss::id] = warnRPC_FIELDS_DEPRECATED;
|
||||
w[jss::message] =
|
||||
"Some fields from your request are deprecated. Please check the "
|
||||
"documentation at "
|
||||
"https://xrpl.org/docs/references/http-websocket-apis/ "
|
||||
"and update your request. Field `type` is deprecated.";
|
||||
}
|
||||
|
||||
if (warnings.size())
|
||||
value[jss::warnings] = std::move(warnings);
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
|
||||
std::pair<org::xrpl::rpc::v1::GetLedgerResponse, grpc::Status>
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <xrpld/rpc/Status.h>
|
||||
#include <xrpld/rpc/detail/Handler.h>
|
||||
|
||||
#include <xrpl/json/Object.h>
|
||||
#include <xrpl/ledger/ReadView.h>
|
||||
#include <xrpl/protocol/ApiVersion.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
@@ -37,9 +36,8 @@ public:
|
||||
Status
|
||||
check();
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
writeResult(Object&);
|
||||
writeResult(Json::Value&);
|
||||
|
||||
static constexpr char name[] = "ledger";
|
||||
|
||||
@@ -59,49 +57,6 @@ private:
|
||||
int options_ = 0;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Implementation.
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
LedgerHandler::writeResult(Object& value)
|
||||
{
|
||||
if (ledger_)
|
||||
{
|
||||
Json::copyFrom(value, result_);
|
||||
addJson(value, {*ledger_, &context_, options_, queueTxs_});
|
||||
}
|
||||
else
|
||||
{
|
||||
auto& master = context_.app.getLedgerMaster();
|
||||
{
|
||||
auto&& closed = Json::addObject(value, jss::closed);
|
||||
addJson(closed, {*master.getClosedLedger(), &context_, 0});
|
||||
}
|
||||
{
|
||||
auto&& open = Json::addObject(value, jss::open);
|
||||
addJson(open, {*master.getCurrentLedger(), &context_, 0});
|
||||
}
|
||||
}
|
||||
|
||||
Json::Value warnings{Json::arrayValue};
|
||||
if (context_.params.isMember(jss::type))
|
||||
{
|
||||
Json::Value& w = warnings.append(Json::objectValue);
|
||||
w[jss::id] = warnRPC_FIELDS_DEPRECATED;
|
||||
w[jss::message] =
|
||||
"Some fields from your request are deprecated. Please check the "
|
||||
"documentation at "
|
||||
"https://xrpl.org/docs/references/http-websocket-apis/ "
|
||||
"and update your request. Field `type` is deprecated.";
|
||||
}
|
||||
|
||||
if (warnings.size())
|
||||
value[jss::warnings] = std::move(warnings);
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace xrpl
|
||||
|
||||
|
||||
@@ -20,9 +20,8 @@ public:
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
template <class Object>
|
||||
void
|
||||
writeResult(Object& obj)
|
||||
writeResult(Json::Value& obj)
|
||||
{
|
||||
setVersion(obj, apiVersion_, betaEnabled_);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user