mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
127 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c5a69e000 | ||
|
|
ec5d1eb65c | ||
|
|
5b417bdc45 | ||
|
|
ce631a1f5a | ||
|
|
d2c870db92 | ||
|
|
8e17039586 | ||
|
|
25067c97ed | ||
|
|
1310e5dde9 | ||
|
|
0a5bf911c1 | ||
|
|
6015faa0d3 | ||
|
|
e68fd3251a | ||
|
|
c13ac79552 | ||
|
|
b1299792a6 | ||
|
|
2cbf09d6ae | ||
|
|
42cf55fd0e | ||
|
|
e825be24cc | ||
|
|
997742b555 | ||
|
|
031ad411a6 | ||
|
|
486f1f2fd2 | ||
|
|
739dd81981 | ||
|
|
1f900fcf7f | ||
|
|
fc68664b02 | ||
|
|
ffa5c58b32 | ||
|
|
4bf3a228dc | ||
|
|
9091bb06f4 | ||
|
|
41e3176c56 | ||
|
|
bedca85c78 | ||
|
|
39157f8be4 | ||
|
|
3affda8b13 | ||
|
|
8cc2de5643 | ||
|
|
9b74b3f898 | ||
|
|
ea2837749a | ||
|
|
8bd8ab9b8a | ||
|
|
dc89d23e5a | ||
|
|
734c7a5c36 | ||
|
|
b17ef28f55 | ||
|
|
e56bd7b29e | ||
|
|
5bf334e5f7 | ||
|
|
97ef66d130 | ||
|
|
4c9c606202 | ||
|
|
a885551006 | ||
|
|
fae1ec0c8d | ||
|
|
de23f015d6 | ||
|
|
37f9493d15 | ||
|
|
49387059ef | ||
|
|
744af4b639 | ||
|
|
db2b9dac3b | ||
|
|
ccf73dc68c | ||
|
|
3de421c390 | ||
|
|
d4a9560c3f | ||
|
|
983aa29271 | ||
|
|
0ebe92de68 | ||
|
|
eb1ea28e27 | ||
|
|
1764f3524e | ||
|
|
777ae24f62 | ||
|
|
1ada879072 | ||
|
|
e2792f5a0c | ||
|
|
97c431680a | ||
|
|
0b454a2316 | ||
|
|
b7cae53fcd | ||
|
|
ac45cce5bd | ||
|
|
ef39c04e1e | ||
|
|
83a099a547 | ||
|
|
73337d0819 | ||
|
|
816625c44e | ||
|
|
48e87d7c07 | ||
|
|
dfe18ed682 | ||
|
|
92a072d7a8 | ||
|
|
24fca61b56 | ||
|
|
ae8303fdc8 | ||
|
|
709a8463b8 | ||
|
|
84d31986d1 | ||
|
|
d50f229631 | ||
|
|
379c89fb02 | ||
|
|
81f7171368 | ||
|
|
629b35d1dd | ||
|
|
6fc4cee195 | ||
|
|
b01813ac3d | ||
|
|
6bf8c5bc4e | ||
|
|
2ffd98f895 | ||
|
|
3edead32ba | ||
|
|
28980734ae | ||
|
|
ce60c8f64d | ||
|
|
39ef2ae33c | ||
|
|
d83975e750 | ||
|
|
4468302852 | ||
|
|
a704cf7cfe | ||
|
|
05d09cc352 | ||
|
|
ae96ac7baf | ||
|
|
4579fa2f26 | ||
|
|
1e7645419f | ||
|
|
35db5d3da9 | ||
|
|
4e581e659f | ||
|
|
55f0536dca | ||
|
|
a3a15754b4 | ||
|
|
59d7d1bc49 | ||
|
|
5f5648470a | ||
|
|
13afe9373d | ||
|
|
9a79bdc50b | ||
|
|
7d5415e8b0 | ||
|
|
54669420bf | ||
|
|
a62849b89a | ||
|
|
20c2654abc | ||
|
|
37c810f6fa | ||
|
|
d64753c0dd | ||
|
|
92d6687151 | ||
|
|
fa8405df83 | ||
|
|
3d3b8e91b6 | ||
|
|
14a972c8e2 | ||
|
|
166ff63dbc | ||
|
|
b7ae6a0495 | ||
|
|
d0ea9d20ab | ||
|
|
b45b34edb1 | ||
|
|
7ecb894632 | ||
|
|
8de39739fa | ||
|
|
f16a05ae7a | ||
|
|
458fac776c | ||
|
|
af575b1bcf | ||
|
|
ee615a290b | ||
|
|
31cc06d4f4 | ||
|
|
f90dac2f85 | ||
|
|
8a5be14ba8 | ||
|
|
ba6b764e38 | ||
|
|
9939f6e6f4 | ||
|
|
a72aa73afe | ||
|
|
3d02803135 | ||
|
|
3f47b85e3b |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
build/
|
||||
@@ -6,3 +6,4 @@
|
||||
|
||||
# clang-format
|
||||
e41150248a97e4bdc1cf21b54650c4bb7c63928e
|
||||
2e542e7b0d94451a933c88778461cc8d3d7e6417
|
||||
|
||||
25
.githooks/pre-commit
Executable file
25
.githooks/pre-commit
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format -i"
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
second=$(git diff $sources)
|
||||
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
|
||||
if [ "$changes" != "0" ]; then
|
||||
cat <<\EOF
|
||||
|
||||
WARNING
|
||||
-----------------------------------------------------------------------------
|
||||
Automatically re-formatted code with `clang-format` - commit was aborted.
|
||||
Please manually add any updated files and commit again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
13
.github/actions/lint/action.yml
vendored
Normal file
13
.github/actions/lint/action.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Github's ubuntu-20.04 image already has clang-format-11 installed
|
||||
- run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
21
.github/actions/sign/action.yml
vendored
Normal file
21
.github/actions/sign/action.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: 'Sign packages'
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
steps:
|
||||
- name: Sign
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex -o pipefail
|
||||
echo "$GPG_KEY_B64"| base64 -d | gpg --batch --no-tty --allow-secret-key-import --import -
|
||||
unset GPG_KEY_B64
|
||||
export GPG_PASSPHRASE=$(echo $GPG_KEY_PASS_B64 | base64 -di)
|
||||
unset GPG_KEY_PASS_B64
|
||||
export GPG_KEYID=$(gpg --with-colon --list-secret-keys | head -n1 | cut -d : -f 5)
|
||||
for PKG in $(ls *.deb); do
|
||||
dpkg-sig \
|
||||
-g "--no-tty --digest-algo 'sha512' --passphrase '${GPG_PASSPHRASE}' --pinentry-mode=loopback" \
|
||||
-k "${GPG_KEYID}" \
|
||||
--sign builder \
|
||||
$PKG
|
||||
done
|
||||
6
.github/actions/test/Dockerfile
vendored
Normal file
6
.github/actions/test/Dockerfile
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM cassandra:4.0.4
|
||||
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
8
.github/actions/test/entrypoint.sh
vendored
Executable file
8
.github/actions/test/entrypoint.sh
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
pg_ctlcluster 12 main start
|
||||
su postgres -c"psql -c\"alter user postgres with password 'postgres'\""
|
||||
su cassandra -c "/opt/cassandra/bin/cassandra -R"
|
||||
sleep 90
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
149
.github/workflows/build.yml
vendored
149
.github/workflows/build.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Build Clio
|
||||
on:
|
||||
push:
|
||||
branches: [master, develop, develop-next]
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
pull_request:
|
||||
branches: [master, develop, develop-next]
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -11,40 +11,137 @@ jobs:
|
||||
name: Lint
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Get source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run clang-format
|
||||
uses: XRPLF/clio-gha/lint@main
|
||||
uses: ./.github/actions/lint
|
||||
|
||||
build_clio:
|
||||
name: Build
|
||||
name: Build Clio
|
||||
runs-on: [self-hosted, Linux]
|
||||
needs: lint
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- suffix: deb
|
||||
image: rippleci/clio-dpkg-builder:2022-09-17
|
||||
script: dpkg
|
||||
- suffix: rpm
|
||||
image: rippleci/clio-rpm-builder:2022-09-17
|
||||
script: rpm
|
||||
|
||||
container:
|
||||
image: ${{ matrix.type.image }}
|
||||
|
||||
steps:
|
||||
- name: Get Clio repo
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio_src
|
||||
ref: 'develop-next'
|
||||
path: clio
|
||||
|
||||
- name: Get Clio CI repo
|
||||
- name: Clone Clio packaging repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio_ci
|
||||
repository: 'XRPLF/clio-ci'
|
||||
|
||||
- name: Get GitHub actions repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: XRPLF/clio-gha
|
||||
path: gha # must be the same as defined in XRPLF/clio-gha
|
||||
path: clio-packages
|
||||
repository: XRPLF/clio-packages
|
||||
|
||||
- name: Build
|
||||
uses: XRPLF/clio-gha/build@main
|
||||
shell: bash
|
||||
run: |
|
||||
export CLIO_ROOT=$(realpath clio)
|
||||
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
fi
|
||||
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
|
||||
cmake --build clio-packages/build --parallel $(nproc)
|
||||
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
|
||||
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
|
||||
|
||||
# - name: Artifact clio_tests
|
||||
# uses: actions/upload-artifact@v2
|
||||
# with:
|
||||
# name: clio_output
|
||||
# path: clio_src/build/clio_tests
|
||||
- name: Artifact packages
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_${{ matrix.type.suffix }}_packages
|
||||
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
|
||||
|
||||
- name: Artifact clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests-${{ matrix.type.suffix }}
|
||||
path: ${{ github.workspace }}/clio_tests
|
||||
|
||||
build_dev:
|
||||
name: ${{ matrix.os.name }} test
|
||||
needs: lint
|
||||
continue-on-error: ${{ matrix.os.experimental }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- name: ubuntu-22.04
|
||||
experimental: true
|
||||
- name: macos-11
|
||||
experimental: true
|
||||
- name: macos-12
|
||||
experimental: false
|
||||
runs-on: ${{ matrix.os.name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build boost
|
||||
if: steps.boost.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
mv boost_1_77_0 boost
|
||||
cd boost
|
||||
./bootstrap.sh
|
||||
if [[ ${{ matrix.os.name }} =~ mac ]];then
|
||||
mac_flags='cxxflags="-std=c++14"'
|
||||
fi
|
||||
./b2 ${mac_flags}
|
||||
|
||||
- name: install deps
|
||||
run: |
|
||||
if [[ ${{ matrix.os.name }} =~ mac ]];then
|
||||
brew install pkg-config protobuf openssl ninja cassandra-cpp-driver bison
|
||||
elif [[ ${{matrix.os.name }} =~ ubuntu ]];then
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format
|
||||
fi
|
||||
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost
|
||||
cd clio
|
||||
cmake -B build
|
||||
if ! cmake --build build -j$(nproc); then
|
||||
echo '# 🔥${{ matrix.os.name }}🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
test_clio:
|
||||
name: Test Clio
|
||||
runs-on: [self-hosted, Linux]
|
||||
needs: build_clio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
suffix: [rpm, deb]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get clio_tests artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests-${{ matrix.suffix }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 10
|
||||
uses: ./.github/actions/test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,2 +1,5 @@
|
||||
*clio*.log
|
||||
build/
|
||||
.vscode
|
||||
.python-version
|
||||
config.json
|
||||
|
||||
15
CMake/ClioVersion.cmake
Normal file
15
CMake/ClioVersion.cmake
Normal file
@@ -0,0 +1,15 @@
|
||||
#[===================================================================[
|
||||
read version from source
|
||||
#]===================================================================]
|
||||
|
||||
file (STRINGS src/main/impl/Build.cpp BUILD_INFO)
|
||||
foreach (line_ ${BUILD_INFO})
|
||||
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||
set (clio_version ${CMAKE_MATCH_1})
|
||||
endif ()
|
||||
endforeach ()
|
||||
if (clio_version)
|
||||
message (STATUS "clio version: ${clio_version}")
|
||||
else ()
|
||||
message (FATAL_ERROR "unable to determine clio version")
|
||||
endif ()
|
||||
@@ -1,31 +0,0 @@
|
||||
set(POSTGRES_INSTALL_DIR ${CMAKE_BINARY_DIR}/postgres)
|
||||
set(POSTGRES_LIBS pq pgcommon pgport)
|
||||
ExternalProject_Add(postgres
|
||||
GIT_REPOSITORY https://github.com/postgres/postgres.git
|
||||
GIT_TAG REL_14_1
|
||||
GIT_SHALLOW 1
|
||||
LOG_CONFIGURE 1
|
||||
LOG_BUILD 1
|
||||
CONFIGURE_COMMAND ./configure --prefix ${POSTGRES_INSTALL_DIR} --without-readline --verbose
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make VERBOSE=${CMAKE_VERBOSE_MAKEFILE} -j32
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${CMAKE_COMMAND} -E env make -s --no-print-directory install
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pq${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgcommon${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgport${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
)
|
||||
ExternalProject_Get_Property (postgres BINARY_DIR)
|
||||
|
||||
foreach(_lib ${POSTGRES_LIBS})
|
||||
add_library(${_lib} STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(${_lib} postgres)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
IMPORTED_LOCATION ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}.a)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${POSTGRES_INSTALL_DIR}/include)
|
||||
target_link_libraries(clio PUBLIC ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endforeach()
|
||||
add_dependencies(clio postgres)
|
||||
target_include_directories(clio PUBLIC ${POSTGRES_INSTALL_DIR}/include)
|
||||
24
CMake/deps/Remove-bitset-operator.patch
Normal file
24
CMake/deps/Remove-bitset-operator.patch
Normal file
@@ -0,0 +1,24 @@
|
||||
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
|
||||
From: CJ Cobb <ccobb@ripple.com>
|
||||
Date: Wed, 10 Aug 2022 12:30:01 -0400
|
||||
Subject: [PATCH] Remove bitset operator !=
|
||||
|
||||
---
|
||||
src/ripple/protocol/Feature.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
|
||||
index b3ecb099b..6424be411 100644
|
||||
--- a/src/ripple/protocol/Feature.h
|
||||
+++ b/src/ripple/protocol/Feature.h
|
||||
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
|
||||
public:
|
||||
using base::bitset;
|
||||
using base::operator==;
|
||||
- using base::operator!=;
|
||||
|
||||
using base::all;
|
||||
using base::any;
|
||||
--
|
||||
2.32.0
|
||||
|
||||
11
CMake/deps/SourceLocation.cmake
Normal file
11
CMake/deps/SourceLocation.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
include(CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if(SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif()
|
||||
|
||||
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif()
|
||||
@@ -10,7 +10,7 @@ if(NOT cassandra)
|
||||
ExternalProject_Add(zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG master
|
||||
GIT_TAG v1.2.12
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
)
|
||||
@@ -33,7 +33,7 @@ if(NOT cassandra)
|
||||
ExternalProject_Add(krb5_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG master
|
||||
GIT_TAG krb5-1.20
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -66,7 +66,7 @@ if(NOT cassandra)
|
||||
ExternalProject_Add(libuv_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.x
|
||||
GIT_TAG v1.44.1
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
)
|
||||
@@ -89,7 +89,7 @@ if(NOT cassandra)
|
||||
ExternalProject_Add(cassandra_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||
GIT_TAG master
|
||||
GIT_TAG 2.16.2
|
||||
CMAKE_ARGS
|
||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
|
||||
@@ -10,7 +10,8 @@ if(NOT googletest_POPULATED)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio_tests PUBLIC clio gtest_main)
|
||||
target_link_libraries(clio_tests PUBLIC clio gmock_main)
|
||||
target_include_directories(clio_tests PRIVATE unittests)
|
||||
|
||||
enable_testing()
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
||||
set(RIPPLED_BRANCH "1.9.0")
|
||||
set(RIPPLED_BRANCH "1.9.2")
|
||||
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
||||
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
|
||||
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
||||
FetchContent_Declare(rippled
|
||||
GIT_REPOSITORY "${RIPPLED_REPO}"
|
||||
GIT_TAG "${RIPPLED_BRANCH}"
|
||||
GIT_SHALLOW ON
|
||||
PATCH_COMMAND "${patch_command}"
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(rippled)
|
||||
|
||||
@@ -11,6 +11,7 @@ ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
|
||||
Restart=on-failure
|
||||
User=clio
|
||||
Group=clio
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -3,8 +3,14 @@ set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
|
||||
install(TARGETS clio_server DESTINATION bin)
|
||||
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
||||
install(FILES example-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
#install(FILES example-config.json DESTINATION etc RENAME config.json)
|
||||
file(READ example-config.json config)
|
||||
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
|
||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
|
||||
|
||||
@@ -1 +1,6 @@
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else")
|
||||
target_compile_options(clio
|
||||
PUBLIC -Wall
|
||||
-Werror
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
#define VERSION "@PROJECT_VERSION@"
|
||||
@@ -1,6 +1,10 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
|
||||
project(clio VERSION 0.2.0)
|
||||
project(clio)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message(FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif()
|
||||
|
||||
option(BUILD_TESTS "Build tests" TRUE)
|
||||
|
||||
@@ -10,6 +14,25 @@ if(VERBOSE)
|
||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||
endif()
|
||||
|
||||
if(NOT GIT_COMMIT_HASH)
|
||||
if(VERBOSE)
|
||||
message("GIT_COMMIT_HASH not provided...looking for git")
|
||||
endif()
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE git-ref)
|
||||
if(git-ref)
|
||||
set(BUILD "${git-ref}")
|
||||
message(STATUS "Build version: ${BUILD}")
|
||||
add_definitions(-DCLIO_BUILD="${BUILD}")
|
||||
endif()
|
||||
endif()
|
||||
endif() #git
|
||||
if(PACKAGING)
|
||||
add_definitions(-DPKG=1)
|
||||
endif()
|
||||
|
||||
add_library(clio)
|
||||
target_compile_features(clio PUBLIC cxx_std_20)
|
||||
target_include_directories(clio PUBLIC src)
|
||||
@@ -17,30 +40,32 @@ target_include_directories(clio PUBLIC src)
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMake/settings.cmake)
|
||||
include(CMake/ClioVersion.cmake)
|
||||
include(CMake/deps/rippled.cmake)
|
||||
include(CMake/deps/Boost.cmake)
|
||||
include(CMake/deps/cassandra.cmake)
|
||||
include(CMake/deps/Postgres.cmake)
|
||||
|
||||
# configure_file(CMake/version-config.h include/version.h) # NOTE: Not used, but an idea how to handle versioning.
|
||||
include(CMake/deps/SourceLocation.cmake)
|
||||
|
||||
target_sources(clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/backend/BackendInterface.cpp
|
||||
src/backend/CassandraBackend.cpp
|
||||
src/backend/LayeredCache.cpp
|
||||
src/backend/Pg.cpp
|
||||
src/backend/PostgresBackend.cpp
|
||||
src/backend/SimpleCache.cpp
|
||||
## ETL
|
||||
src/etl/ETLSource.cpp
|
||||
src/etl/ProbingETLSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ReportingETL.cpp
|
||||
## Subscriptions
|
||||
src/subscriptions/SubscriptionManager.cpp
|
||||
## RPC
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/RPC.cpp
|
||||
src/rpc/RPCHelpers.cpp
|
||||
src/rpc/Counters.cpp
|
||||
src/rpc/WorkQueue.cpp
|
||||
## RPC Methods
|
||||
# Account
|
||||
src/rpc/handlers/AccountChannels.cpp
|
||||
@@ -51,6 +76,10 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/AccountObjects.cpp
|
||||
src/rpc/handlers/GatewayBalances.cpp
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
# NFT
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
src/rpc/handlers/NFTOffers.cpp
|
||||
# Ledger
|
||||
src/rpc/handlers/Ledger.cpp
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
@@ -61,6 +90,7 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
# Dex
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
# Payment Channel
|
||||
src/rpc/handlers/ChannelAuthorize.cpp
|
||||
@@ -69,14 +99,23 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/Subscribe.cpp
|
||||
# Server
|
||||
src/rpc/handlers/ServerInfo.cpp
|
||||
# Utility
|
||||
src/rpc/handlers/Random.cpp)
|
||||
# Utilities
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/config/Config.cpp
|
||||
src/log/Logger.cpp
|
||||
src/util/Taggable.cpp)
|
||||
|
||||
add_executable(clio_server src/main.cpp)
|
||||
add_executable(clio_server src/main/main.cpp)
|
||||
target_link_libraries(clio_server PUBLIC clio)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
add_executable(clio_tests unittests/main.cpp)
|
||||
add_executable(clio_tests
|
||||
unittests/RPCErrors.cpp
|
||||
unittests/Backend.cpp
|
||||
unittests/Logger.cpp
|
||||
unittests/Config.cpp
|
||||
unittests/ProfilerTest.cpp
|
||||
unittests/DOSGuard.cpp)
|
||||
include(CMake/deps/gtest.cmake)
|
||||
endif()
|
||||
|
||||
|
||||
134
CONTRIBUTING.md
Normal file
134
CONTRIBUTING.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Contributing
|
||||
Thank you for your interest in contributing to the `clio` project 🙏
|
||||
|
||||
To contribute, please:
|
||||
1. Fork the repository under your own user.
|
||||
2. Create a new branch on which to write your changes.
|
||||
3. Write and test your code.
|
||||
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||
5. Where applicable, write test cases for your code and include those in `unittests`.
|
||||
6. Ensure your code passes automated checks (e.g. clang-format)
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.). See below for more details.
|
||||
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||
|
||||
> **Note:** Please make sure you read the [Style guide](#style-guide).
|
||||
|
||||
## Install git hooks
|
||||
Please make sure to run the following command in order to use git hooks that are helpful for `clio` development.
|
||||
|
||||
``` bash
|
||||
git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git commands
|
||||
This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
|
||||
Please note that there are more than one way to do this and these commands are only provided for your convenience.
|
||||
At this point it's assumed that you have already finished working on your feature/bug.
|
||||
|
||||
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up to date with the main `clio` repository.
|
||||
|
||||
``` bash
|
||||
# Create a backup of your branch
|
||||
git branch <your feature branch>_bk
|
||||
|
||||
# Rebase and squash commits into one
|
||||
git checkout develop
|
||||
git pull origin develop
|
||||
git checkout <your feature branch>
|
||||
git rebase -i develop
|
||||
```
|
||||
For each commit in the list other than the first one please select `s` to squash.
|
||||
After this is done you will have the opportunity to write a message for the squashed commit.
|
||||
|
||||
> **Hint:** Please use **imperative mood** commit message capitalizing the first word of the subject.
|
||||
|
||||
``` bash
|
||||
# You should now have a single commit on top of a commit in `develop`
|
||||
git log
|
||||
```
|
||||
> **Todo:** In case there are merge conflicts, please resolve them now
|
||||
|
||||
``` bash
|
||||
# Use the same commit message as you did above
|
||||
git commit -m 'Your message'
|
||||
git rebase --continue
|
||||
```
|
||||
|
||||
> **Important:** If you have no GPG keys setup please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
|
||||
|
||||
``` bash
|
||||
# Sign the commit with your GPG key and finally push your changes to the repo
|
||||
git commit --amend -S
|
||||
git push --force
|
||||
```
|
||||
|
||||
## Fixing issues found during code review
|
||||
While your code is in review it's possible that some changes will be requested by the reviewer.
|
||||
This section describes the process of adding your fixes.
|
||||
|
||||
We assume that you already made the required changes on your feature branch.
|
||||
|
||||
``` bash
|
||||
# Add the changed code
|
||||
git add <paths to add>
|
||||
|
||||
# Add a folded commit message (so you can squash them later)
|
||||
# while also signing it with your GPG key
|
||||
git commit -S -m "[FOLD] Your commit message"
|
||||
|
||||
# And finally push your changes
|
||||
git push
|
||||
```
|
||||
## After code review
|
||||
Last but not least, when your PR is approved you still have to `Squash and merge` your code.
|
||||
Luckily there is a button for that towards the bottom of the PR's page on github.
|
||||
|
||||
> **Important:** Please leave the automatically generated link to PR in the subject line **and** in the description field please add `"Fixes #ISSUE_ID"` (replacing `ISSUE_ID` with yours).
|
||||
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
|
||||
|
||||
# Style guide
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments.
|
||||
|
||||
## Formatting
|
||||
All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
* Proliferation of nearly identical code.
|
||||
* Proliferation of new files and classes unless it improves readability or/and compilation time.
|
||||
* Unmanaged memory allocation and raw pointers.
|
||||
* Macros (unless they add significant value.)
|
||||
* Lambda patterns (unless these add significant value.)
|
||||
* CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
|
||||
* Importing new libraries unless there is a very good reason to do so.
|
||||
|
||||
## Seek to
|
||||
* Extend functionality of existing code rather than creating new code.
|
||||
* Prefer readability over terseness where important logic is concerned.
|
||||
* Inline functions that are not used or are not likely to be used elsewhere in the codebase.
|
||||
* Use clear and self-explanatory names for functions, variables, structs and classes.
|
||||
* Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
|
||||
* Provide as many comments as you feel that a competent programmer would need to understand what your code does.
|
||||
|
||||
# Maintainers
|
||||
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
|
||||
|
||||
## Code Review
|
||||
PRs must be reviewed by at least one of the maintainers.
|
||||
|
||||
## Adding and Removing
|
||||
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
|
||||
|
||||
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
|
||||
|
||||
## Existing Maintainers
|
||||
|
||||
* [cjcobb23](https://github.com/cjcobb23) (Ripple)
|
||||
* [legleux](https://github.com/legleux) (Ripple)
|
||||
* [undertome](https://github.com/undertome) (Ripple)
|
||||
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||
* [officialfrancismendoza](https://github.com/officialfrancismendoza) (Ripple)
|
||||
|
||||
## Honorable ex-Maintainers
|
||||
|
||||
* [natenichols](https://github.com/natenichols) (ex-Ripple)
|
||||
138
README.md
138
README.md
@@ -1,9 +1,6 @@
|
||||
**Status:** This software is in beta mode. We encourage anyone to try it out and
|
||||
report any issues they discover. Version 1.0 coming soon.
|
||||
|
||||
# Clio
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
|
||||
historical ledger and transaction data is stored in a more space efficient format,
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
|
||||
historical ledger and transaction data are stored in a more space-efficient format,
|
||||
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||
@@ -12,9 +9,9 @@ without the need for redundant data storage or computation.
|
||||
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
||||
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
||||
Other non-validated data is also not returned, such as information about queued transactions.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
|
||||
Clio does not connect to the peer to peer network. Instead, Clio extracts data from a specified rippled node. Running Clio requires access to a rippled node
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
|
||||
@@ -25,13 +22,13 @@ from which data can be extracted. The rippled node does not need to be running o
|
||||
|
||||
## Building
|
||||
|
||||
Clio is built with cmake. Clio requires c++20, and boost 1.75.0 or later.
|
||||
Clio is built with CMake. Clio requires at least GCC-11/clang-14.0.0 (C++20), and Boost 1.75.0.
|
||||
|
||||
Use these instructions to build a Clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
|
||||
```
|
||||
```sh
|
||||
# Install dependencies
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake clang-format
|
||||
|
||||
# Compile Boost
|
||||
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
|
||||
@@ -49,30 +46,62 @@ Use these instructions to build a Clio executable from source. These instruction
|
||||
```
|
||||
|
||||
## Running
|
||||
`./clio_server config.json`
|
||||
```sh
|
||||
./clio_server config.json
|
||||
```
|
||||
|
||||
Clio needs access to a rippled server. The config files of rippled and Clio need
|
||||
to match in a certain sense.
|
||||
Clio needs to know:
|
||||
- the ip of rippled
|
||||
- the port on which rippled is accepting unencrypted websocket connections
|
||||
- the IP of rippled
|
||||
- the port on which rippled is accepting unencrypted WebSocket connections
|
||||
- the port on which rippled is handling gRPC requests
|
||||
|
||||
rippled needs to open:
|
||||
- a port to accept unencrypted websocket connections
|
||||
- a port to handle gRPC requests, with the ip(s) of Clio specified in the `secure_gateway` entry
|
||||
- a port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry
|
||||
|
||||
The example configs of rippled and Clio are setup such that minimal changes are
|
||||
The example configs of rippled and Clio are setups such that minimal changes are
|
||||
required. When running locally, the only change needed is to uncomment the `port_grpc`
|
||||
section of the rippled config. When running Clio and rippled on separate machines,
|
||||
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
|
||||
1. change the `ip` of the first entry of `etl_sources` to the ip where your rippled
|
||||
1. change the `ip` of the first entry of `etl_sources` to the IP where your rippled
|
||||
server is running
|
||||
2. open a public, unencrypted websocket port on your rippled server
|
||||
3. change the ip specified in `secure_gateway` of `port_grpc` section of the rippled config
|
||||
to the ip of your Clio server. This entry can take the form of a comma separated list if
|
||||
2. open a public, unencrypted WebSocket port on your rippled server
|
||||
3. change the IP specified in `secure_gateway` of `port_grpc` section of the rippled config
|
||||
to the IP of your Clio server. This entry can take the form of a comma-separated list if
|
||||
you are running multiple Clio nodes.
|
||||
|
||||
|
||||
In addition, the parameter `start_sequence` can be included and configured within the top level of the config file. This parameter specifies the sequence of first ledger to extract if the database is empty. Note that ETL extracts ledgers in order and that no backfilling functionality currently exists, meaning Clio will not retroactively learn ledgers older than the one you specify. Choosing to specify this or not will yield the following behavior:
|
||||
- If this setting is absent and the database is empty, ETL will start with the next ledger validated by the network.
|
||||
- If this setting is present and the database is not empty, an exception is thrown.
|
||||
|
||||
In addition, the optional parameter `finish_sequence` can be added to the json file as well, specifying where the ledger can stop.
|
||||
|
||||
To add `start_sequence` and/or `finish_sequence` to the config.json file appropriately, they will be on the same top level of precedence as other parameters (such as `database`, `etl_sources`, `read_only`, etc.) and be specified with an integer. Here is an example snippet from the config file:
|
||||
|
||||
```json
|
||||
"start_sequence": 12345,
|
||||
"finish_sequence": 54321
|
||||
```
|
||||
|
||||
The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top level of precedence of our Clio config. `ssl_cert_file` specifies the filepath for your SSL cert while `ssl_key_file` specifies the filepath for your SSL key. It is up to you how to change ownership of these folders for your designated Clio user. Your options include:
|
||||
- Copying the two files as root somewhere that's accessible by the Clio user, then running `sudo chown` to your user
|
||||
- Changing the permissions directly so it's readable by your Clio user
|
||||
- Running Clio as root (strongly discouraged)
|
||||
|
||||
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
|
||||
|
||||
```json
|
||||
"server":{
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233
|
||||
},
|
||||
"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
"ssl_key_file" : "/full/path/to/key.file"
|
||||
```
|
||||
|
||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||
start first, and it's fine to stop one or the other and restart at any given time.
|
||||
|
||||
@@ -84,7 +113,7 @@ the most recent ledger on the network, and then backfill. If Clio is extracting
|
||||
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
|
||||
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
|
||||
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
|
||||
is dependent on the online_delete and ledger_history config values; if these values
|
||||
are dependent on the online_delete and ledger_history config values; if these values
|
||||
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
|
||||
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
|
||||
proportional to the amount of time that you expect rippled to be offline. For example, if you
|
||||
@@ -106,7 +135,7 @@ This can take some time, and depends on database throughput. With a moderately f
|
||||
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
|
||||
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
|
||||
is fully downloaded, Clio only needs to extract the changed data for each ledger,
|
||||
so extraction is much faster and Clio can keep up with rippled in real time. Even under
|
||||
so extraction is much faster and Clio can keep up with rippled in real-time. Even under
|
||||
intense load, Clio should not lag behind the network, as Clio is not processing the data,
|
||||
and is simply writing to a database. The throughput of Clio is dependent on the throughput
|
||||
of your database, but a standard Cassandra or Scylla deployment can handle
|
||||
@@ -140,3 +169,68 @@ are doing this, be aware that database traffic will be flowing across regions,
|
||||
which can cause high latencies. A possible alternative to this is to just deploy
|
||||
a database in each region, and the Clio nodes in each region use their region's database.
|
||||
This is effectively two systems.
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
mode there are a few quirks of both clio and rippled you need to keep in mind.
|
||||
You must:
|
||||
|
||||
1. Advance the `rippled` ledger to at least ledger 256
|
||||
2. Wait 10 minutes before first starting clio against this standalone node.
|
||||
|
||||
## Logging
|
||||
Clio provides several logging options, all are configurable via the config file and are detailed below.
|
||||
|
||||
`log_level`: The minimum level of severity at which the log message will be outputted by default.
|
||||
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
|
||||
|
||||
`log_format`: The format of log lines produced by clio. Defaults to `"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%"`.
|
||||
Each of the variables expands like so
|
||||
- `TimeStamp`: The full date and time of the log entry
|
||||
- `SourceLocation`: A partial path to the c++ file and the line number in said file (`source/file/path:linenumber`)
|
||||
- `ThreadID`: The ID of the thread the log entry is written from
|
||||
- `Channel`: The channel that this log entry was sent to
|
||||
- `Severity`: The severity (aka log level) the entry was sent at
|
||||
- `Message`: The actual log message
|
||||
|
||||
`log_channels`: An array of json objects, each overriding properties for a logging `channel`.
|
||||
At the moment of writing, only `log_level` can be overriden using this mechanism.
|
||||
|
||||
Each object is of this format:
|
||||
```json
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
}
|
||||
```
|
||||
If no override is present for a given channel, that channel will log at the severity specified by the global `log_level`.
|
||||
Overridable log channels: `Backend`, `WebServer`, `Subscriptions`, `RPC`, `ETL` and `Performance`.
|
||||
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`. Defaults to true.
|
||||
|
||||
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it. If not specified, logs are not written to a file.
|
||||
|
||||
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
|
||||
|
||||
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
|
||||
deleted to free up space. Defaults to 50GB.
|
||||
|
||||
`log_rotation_hour_interval`: The time interval in **hours** after the last log rotation to automatically
|
||||
rotate the current log file. Defaults to 12 hours.
|
||||
|
||||
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
|
||||
size-based log rotation occurs, the timer for the time-based rotation will reset.
|
||||
|
||||
`log_tag_style`: Tag implementation to use. Must be one of:
|
||||
- `uint`: Lock free and threadsafe but outputs just a simple unsigned integer
|
||||
- `uuid`: Threadsafe and outputs a UUID tag
|
||||
- `none`: Don't use tagging at all
|
||||
|
||||
## Cassandra / Scylla Administration
|
||||
|
||||
Since Clio relies on either Cassandra or Scylla for its database backend, here are some important considerations:
|
||||
|
||||
- Scylla, by default, will reserve all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument: https://docs.scylladb.com/getting-started/scylla-in-a-shared-environment/
|
||||
|
||||
121
REVIEW.md
121
REVIEW.md
@@ -1,121 +0,0 @@
|
||||
# How to review clio
|
||||
Clio is a massive project, and thus I don't expect the code to be reviewed the
|
||||
way a normal PR would. So I put this guide together to help reviewers look at
|
||||
the relevant pieces of code without getting lost in the weeds.
|
||||
|
||||
One thing reviewers should keep in mind is that most of clio is designed to be
|
||||
lightweight and simple. We try not to introduce any uneccessary complexity and
|
||||
keep the code as simple and straightforward as possible. Sometimes complexity is
|
||||
unavoidable, but simplicity is the goal.
|
||||
|
||||
## Order of review
|
||||
The code is organized into 4 main components, each with their own folder. The
|
||||
code in each folder is as self contained as possible. A good way to approach
|
||||
the review would be to review one folder at a time.
|
||||
|
||||
### backend
|
||||
The code in the backend folder is the heart of the project, and reviewers should
|
||||
start here. This is the most complex part of the code, as well as the most
|
||||
performance sensitive. clio does not keep any data in memory, so performance
|
||||
generally depends on the data model and the way we talk to the database.
|
||||
|
||||
Reviewers should start with the README in this folder to get a high level idea
|
||||
of the data model and to review the data model itself. Then, reviewers should
|
||||
dive into the implementation. The table schemas and queries for Cassandra are
|
||||
defined in `CassandraBackend::open()`. The table schemas for Postgres are defined
|
||||
in Pg.cpp. The queries for Postgres are defined in each of the functions of `PostgresBackend`.
|
||||
A good way to approach the implementation would be to look at the table schemas,
|
||||
and then go through the functions declared in `BackendInterface`. Reviewers could
|
||||
also branch out to the rest of the code by looking at where these functions are
|
||||
called from.
|
||||
|
||||
### webserver
|
||||
The code in the webserver folder implements the web server for handling RPC requests.
|
||||
This code was mostly copied and pasted from boost beast example code, so I would
|
||||
really appreciate review here.
|
||||
|
||||
### rpc
|
||||
The rpc folder contains all of the handlers and any helper functions they need.
|
||||
This code is not too complicated, so reviewers don't need to dwell long here.
|
||||
|
||||
### etl
|
||||
The etl folder contains all of the code for extracting data from rippled. This
|
||||
code is complex and important, but most of this code was just copied from rippled
|
||||
reporting mode, and thus has already been reviewed and is being used in prod.
|
||||
|
||||
## Design decisions that should be reviewed
|
||||
|
||||
### Data model
|
||||
Reviewers should review the general data model. The data model itself is described
|
||||
at a high level in the README in the backend folder. The table schemas and queries
|
||||
for Cassandra are defined in the `open()` function of `CassandraBackend`. The table
|
||||
schemas for Postgres are defined in Pg.cpp.
|
||||
|
||||
Particular attention should be paid to the keys table, and the problem that solves
|
||||
(successor/upper bound). I originally was going to have a special table for book_offers,
|
||||
but then I decided that we could use the keys table itself for that and save space.
|
||||
This makes book_offers somewhat slow compared to rippled, though still very usable.
|
||||
|
||||
### Large rows
|
||||
I did some tricks with Cassandra to deal with very large rows in the keys and account_tx
|
||||
tables. For each of these, the partition key (the first component of the primary
|
||||
key) is a compound key. This is meant to break large rows into smaller rows. This
|
||||
is done to avoid hotspots. Data is sharded in Cassandra, and if some rows get very
|
||||
large, some nodes can have a lot more data than others.
|
||||
|
||||
For account_tx, this has performance implications when iterating very far back
|
||||
in time. Refer to the `fetchAccountTransactions()` function in `CassandraBackend`.
|
||||
|
||||
It is unclear if this needs to be done for other tables.
|
||||
|
||||
### Postgres table partitioning
|
||||
Originally, Postgres exhibited performance problems when the dataset approach 1
|
||||
TB. This was solved by table partitioning.
|
||||
|
||||
### Threading
|
||||
I used asio for multithreading. There are a lot of different io_contexts lying
|
||||
around the code. This needs to be cleaned up a bit. Most of these are really
|
||||
just ways to submit an async job to a single thread. I don't think it makes
|
||||
sense to have one io_context for the whole application, but some of the threading
|
||||
is a bit opaque and could be cleaned up.
|
||||
|
||||
### Boost Json
|
||||
I used boost json for serializing data to json.
|
||||
|
||||
### No cache
|
||||
As of now, there is no cache. I am not sure if a cache is even worth it. A
|
||||
transaction cache would not be hard, but a cache for ledger data will be hard.
|
||||
While a cache would improve performance, it would increase memory usage. clio
|
||||
is designed to be lightweight. Also, I've reached thousands of requests per
|
||||
second with a single clio node, so I'm not sure performance is even an issue.
|
||||
|
||||
## Things I'm less than happy about
|
||||
|
||||
#### BackendIndexer
|
||||
This is a particularly hairy piece of code that handles writing to the keys table.
|
||||
I am not too happy with this code. Parts of it need to execute in real time as
|
||||
part of ETL, and other parts are allowed to run in the background. There is also
|
||||
code that detects if a previous background job failed to complete before the
|
||||
server shutdown, and thus tries to rerun that job. The code feels tacked on, and
|
||||
I would like it to be more cleanly integrated with the rest of the code.
|
||||
|
||||
#### Shifting
|
||||
There is some bit shifting going on with the keys table and the account_tx table.
|
||||
The keys table is written to every 2^20 ledgers. Maybe it would be better to just
|
||||
write every 1 million ledgers.
|
||||
|
||||
#### performance of book_offers
|
||||
book_offers is a bit slow. It could be sped up in a variety of ways. One is to
|
||||
keep a separate book_offers table. However, this is not straightforward and will
|
||||
use more space. Another is to keep a cache of book_offers for the most recent ledger
|
||||
(or few ledgers). I am not sure if this is worth it
|
||||
|
||||
#### account_tx in Cassandra
|
||||
After the fix to deal with large rows, account_tx can be slow at times when using
|
||||
Cassandra. Specifically, if there are large gaps in time where the account was
|
||||
not affected by any transactions, the code will be reading empty records. I would
|
||||
like to sidestep this issue if possible.
|
||||
|
||||
#### Implementation of fetchLedgerPage
|
||||
`fetchLedgerPage()` is rather complex. Part of this seems unavoidable, since this
|
||||
code is dealing with the keys table.
|
||||
49
docker/centos/Dockerfile
Normal file
49
docker/centos/Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# FROM centos:7 as deps
|
||||
FROM centos:7 as build
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY docker/shared/install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
# FROM centos:7 as build
|
||||
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY docker/shared/build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN yum install -y rpmdevtools rpmlint
|
||||
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
|
||||
cmake -B build -DBUILD_TESTS=1 && \
|
||||
cmake --build build --parallel $(nproc)
|
||||
RUN mkdir output
|
||||
RUN strip clio/build/clio_server && strip clio/build/clio_tests
|
||||
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
|
||||
RUN cp clio/example-config.json output/example-config.json
|
||||
|
||||
FROM centos:7
|
||||
COPY --from=build /tmp/output /clio
|
||||
RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json
|
||||
|
||||
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]
|
||||
18
docker/centos/build_git_centos7.sh
Executable file
18
docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
11
docker/centos/install_cmake.sh
Executable file
11
docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
13
docker/clio_docker/centos/build_boost.sh
Executable file
13
docker/clio_docker/centos/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
34
docker/clio_docker/centos/dockerfile
Normal file
34
docker/clio_docker/centos/dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN source /opt/rh/devtoolset-11/enable && \
|
||||
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)
|
||||
11
docker/clio_docker/centos/install_cmake.sh
Executable file
11
docker/clio_docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
13
docker/shared/build_boost.sh
Executable file
13
docker/shared/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
11
docker/shared/install_cmake.sh
Executable file
11
docker/shared/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
3
docker/shared/install_openssl.sh
Executable file
3
docker/shared/install_openssl.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
24
docker/ubuntu/Dockerfile
Normal file
24
docker/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM ubuntu:20.04 AS boost
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential
|
||||
ARG BOOST_VERSION_=1_75_0
|
||||
ARG BOOST_VERSION=1.75.0
|
||||
COPY docker/shared/build_boost.sh .
|
||||
RUN apt install -y curl
|
||||
RUN ./build_boost.sh ${BOOST_VERSION}
|
||||
ENV BOOST_ROOT=/boost
|
||||
|
||||
FROM ubuntu:20.04 AS build
|
||||
ENV BOOST_ROOT=/boost
|
||||
COPY --from=boost /boost /boost
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
|
||||
RUN apt install -y gpg-agent
|
||||
RUN wget https://apt.llvm.org/llvm.sh
|
||||
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
|
||||
# COPY . /clio
|
||||
## Install cmake
|
||||
ARG CMAKE_VERSION=3.16.3
|
||||
COPY docker/shared/install_cmake.sh .
|
||||
RUN ./install_cmake.sh ${CMAKE_VERSION}
|
||||
ENV PATH="/opt/local/cmake/bin:$PATH"
|
||||
@@ -1,37 +1,87 @@
|
||||
{
|
||||
"database":
|
||||
{
|
||||
"type":"cassandra",
|
||||
"cassandra":
|
||||
{
|
||||
"contact_points":"127.0.0.1",
|
||||
"port":9042,
|
||||
"keyspace":"clio",
|
||||
"replication_factor":1,
|
||||
"table_prefix":"",
|
||||
"max_requests_outstanding":25000,
|
||||
"threads":8
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
"contact_points": "127.0.0.1",
|
||||
"port": 9042,
|
||||
"keyspace": "clio",
|
||||
"replication_factor": 1,
|
||||
"table_prefix": "",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources":
|
||||
[
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip":"127.0.0.1",
|
||||
"ws_port":"6006",
|
||||
"grpc_port":"50051"
|
||||
"ip": "127.0.0.1",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard":
|
||||
{
|
||||
"whitelist":["127.0.0.1"]
|
||||
"whitelist":["127.0.0.1"], // comma-separated list of ips to exclude from rate limiting
|
||||
/* The below values are the default values and are only specified here
|
||||
* for documentation purposes. The rate limiter currently limits
|
||||
* connections and bandwidth per ip. The rate limiter looks at the raw
|
||||
* ip of a client connection, and so requests routed through a load
|
||||
* balancer will all have the same ip and be treated as a single client
|
||||
*/
|
||||
"max_fetches":100000000, // max bytes per ip per sweep interval
|
||||
"max_connections":1, // max connections per ip
|
||||
"sweep_interval": 10 // time in seconds before resetting bytes per ip count
|
||||
},
|
||||
"cache":
|
||||
{
|
||||
"peers": [{"ip":"127.0.0.1","port":51234}]
|
||||
},
|
||||
"server":{
|
||||
"ip":"0.0.0.0",
|
||||
"port":51233
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
/* Max number of requests to queue up before rejecting further requests.
|
||||
* Defaults to 0, which disables the limit
|
||||
*/
|
||||
"max_queue_size":500
|
||||
},
|
||||
"log_level":"debug",
|
||||
"log_file":"./clio.log",
|
||||
"online_delete":0,
|
||||
"extractor_threads":8,
|
||||
"read_only":false
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
},
|
||||
{
|
||||
"channel": "WebServer",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "Subscriptions",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "RPC",
|
||||
"log_level": "error"
|
||||
},
|
||||
{
|
||||
"channel": "ETL",
|
||||
"log_level": "debug"
|
||||
},
|
||||
{
|
||||
"channel": "Performance",
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"log_level": "info",
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
|
||||
"log_to_console": true,
|
||||
"log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
//"start_sequence": [integer] the ledger index to start from,
|
||||
//"finish_sequence": [integer] the ledger index to finish at,
|
||||
//"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
//"ssl_key_file" : "/full/path/to/key.file"
|
||||
}
|
||||
|
||||
@@ -1,47 +1,47 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/CassandraBackend.h>
|
||||
#include <backend/PostgresBackend.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
namespace Backend {
|
||||
std::shared_ptr<BackendInterface>
|
||||
make_Backend(boost::asio::io_context& ioc, boost::json::object const& config)
|
||||
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
|
||||
|
||||
boost::json::object dbConfig = config.at("database").as_object();
|
||||
|
||||
bool readOnly = false;
|
||||
if (config.contains("read_only"))
|
||||
readOnly = config.at("read_only").as_bool();
|
||||
|
||||
auto type = dbConfig.at("type").as_string();
|
||||
static clio::Logger log{"Backend"};
|
||||
log.info() << "Constructing BackendInterface";
|
||||
|
||||
auto readOnly = config.valueOr("read_only", false);
|
||||
auto type = config.value<std::string>("database.type");
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
if (boost::iequals(type, "cassandra"))
|
||||
{
|
||||
if (config.contains("online_delete"))
|
||||
dbConfig.at(type).as_object()["ttl"] =
|
||||
config.at("online_delete").as_int64() * 4;
|
||||
backend = std::make_shared<CassandraBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
}
|
||||
else if (boost::iequals(type, "postgres"))
|
||||
{
|
||||
if (dbConfig.contains("experimental") &&
|
||||
dbConfig.at("experimental").is_bool() &&
|
||||
dbConfig.at("experimental").as_bool())
|
||||
backend = std::make_shared<PostgresBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(fatal)
|
||||
<< "Postgres support is experimental at this time. "
|
||||
<< "If you would really like to use Postgres, add "
|
||||
"\"experimental\":true to your database config";
|
||||
auto cfg = config.section("database." + type);
|
||||
auto ttl = config.valueOr<uint32_t>("online_delete", 0) * 4;
|
||||
backend = std::make_shared<CassandraBackend>(ioc, cfg, ttl);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
@@ -55,11 +55,8 @@ make_Backend(boost::asio::io_context& ioc, boost::json::object const& config)
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << ": Constructed BackendInterface Successfully";
|
||||
log.info() << "Constructed BackendInterface Successfully";
|
||||
|
||||
return backend;
|
||||
}
|
||||
} // namespace Backend
|
||||
|
||||
#endif // RIPPLE_REPORTING_BACKEND_FACTORY
|
||||
|
||||
@@ -1,6 +1,34 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
namespace Backend {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
@@ -19,7 +47,6 @@ BackendInterface::writeLedgerObject(
|
||||
std::string&& blob)
|
||||
{
|
||||
assert(key.size() == sizeof(ripple::uint256));
|
||||
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
@@ -27,7 +54,7 @@ std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
gLog.trace() << "called";
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
@@ -44,7 +71,7 @@ BackendInterface::hardFetchLedgerRangeNoThrow(
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
gLog.trace() << "called";
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
@@ -58,21 +85,17 @@ BackendInterface::fetchLedgerObject(
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache and missed in db";
|
||||
gLog.trace() << "Missed cache and missed in db";
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache but found in db";
|
||||
gLog.trace() << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
}
|
||||
}
|
||||
@@ -94,9 +117,8 @@ BackendInterface::fetchLedgerObjects(
|
||||
else
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hits = " << keys.size() - misses.size()
|
||||
<< " - cache misses = " << misses.size();
|
||||
gLog.trace() << "Cache hits = " << keys.size() - misses.size()
|
||||
<< " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
@@ -122,11 +144,9 @@ BackendInterface::fetchSuccessorKey(
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -180,8 +200,8 @@ BackendInterface::fetchBookOffers(
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " - offerDir.has_value() "
|
||||
<< offerDir.has_value() << " breaking";
|
||||
gLog.trace() << "offerDir.has_value() " << offerDir.has_value()
|
||||
<< " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
@@ -197,8 +217,7 @@ BackendInterface::fetchBookOffers(
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " next is empty. breaking";
|
||||
gLog.trace() << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
@@ -215,29 +234,27 @@ BackendInterface::fetchBookOffers(
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " key = " << ripple::strHex(keys[i])
|
||||
<< " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
gLog.trace() << "Key = " << ripple::strHex(keys[i])
|
||||
<< " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " "
|
||||
<< "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin))
|
||||
<< " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
|
||||
<< std::to_string(numSucc) << " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis)
|
||||
<< " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages)
|
||||
<< ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = "
|
||||
<< std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
gLog.debug() << "Fetching " << std::to_string(keys.size())
|
||||
<< " offers took " << std::to_string(getMillis(mid - begin))
|
||||
<< " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis)
|
||||
<< " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
<< " Fetching next page of dir took "
|
||||
<< std::to_string(pageMillis) << " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages)
|
||||
<< ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = "
|
||||
<< std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
|
||||
return page;
|
||||
}
|
||||
@@ -259,7 +276,8 @@ BackendInterface::fetchLedgerPage(
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back()
|
||||
: cursor ? *cursor
|
||||
: firstKey;
|
||||
uint32_t seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
std::uint32_t const seq =
|
||||
outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
reachedEnd = true;
|
||||
@@ -274,16 +292,15 @@ BackendInterface::fetchLedgerPage(
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " incorrect successor table. key = "
|
||||
gLog.error()
|
||||
<< "Deleted or non-existent object in successor table. key = "
|
||||
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
|
||||
assert(false);
|
||||
gLog.error() << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
@@ -304,7 +321,7 @@ BackendInterface::fetchFees(
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees";
|
||||
gLog.error() << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,48 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/SimpleCache.h>
|
||||
#include <backend/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
/**
|
||||
* @brief Throws an error when database read time limit is exceeded.
|
||||
*
|
||||
* This class is throws an error when read time limit is exceeded but
|
||||
* is also paired with a separate class to retry the connection.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
public:
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
@@ -18,10 +50,20 @@ class DatabaseTimeout : public std::exception
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Separate class that reattempts connection after time limit.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param func Instance of Cassandra database handler class.
|
||||
* @param waitMs Is the arbitrary time limit of 500ms.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
retryOnTimeout(F func, size_t waitMs = 500)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
@@ -30,26 +72,55 @@ retryOnTimeout(F func, size_t waitMs = 500)
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
log.error()
|
||||
<< "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " function timed out. Retrying ... ";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Passes in serialized handlers in an asynchronous fashion.
|
||||
*
|
||||
* Note that the synchronous auto passes handlers critical to supporting
|
||||
* the Clio backend. The coroutine types are checked if same/different.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronous(F&& f)
|
||||
{
|
||||
/** @brief Serialized handlers and their execution.
|
||||
*
|
||||
* The ctx class is converted into a serialized handler, also named
|
||||
* ctx, and is used to pass a stream of data into the method.
|
||||
*/
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::io_context::strand strand(ctx);
|
||||
std::optional<boost::asio::io_context::work> work;
|
||||
|
||||
/*! @brief Place the ctx within the vector of serialized handlers. */
|
||||
work.emplace(ctx);
|
||||
|
||||
/**
|
||||
* @brief If/else statements regarding coroutine type matching.
|
||||
*
|
||||
* R is the currently executing coroutine that is about to get passed.
|
||||
* If corountine types do not match, the current one's type is stored.
|
||||
*/
|
||||
using R = typename std::result_of<F(boost::asio::yield_context&)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
/**
|
||||
* @brief When the coroutine type is the same
|
||||
*
|
||||
* The spawn function enables programs to implement asynchronous logic
|
||||
* in a synchronous manner. res stores the instance of the currently
|
||||
* executing coroutine, yield. The different type is returned.
|
||||
*/
|
||||
R res;
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
@@ -62,6 +133,7 @@ synchronous(F&& f)
|
||||
}
|
||||
else
|
||||
{
|
||||
/*! @brief When the corutine type is different, run as normal. */
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
@@ -72,6 +144,13 @@ synchronous(F&& f)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reestablishes synchronous connection on timeout.
|
||||
*
|
||||
* @tparam Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra database handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(F&& f)
|
||||
@@ -79,32 +158,47 @@ synchronousAndRetryOnTimeout(F&& f)
|
||||
return retryOnTimeout([&]() { return synchronous(f); });
|
||||
}
|
||||
|
||||
/*! @brief Handles ledger and transaction backend data. */
|
||||
class BackendInterface
|
||||
{
|
||||
/**
|
||||
* @brief Shared mutexes and a cache for the interface.
|
||||
*
|
||||
* rngMutex is a shared mutex. Shared mutexes prevent shared data
|
||||
* from being accessed by multiple threads and has two levels of
|
||||
* access: shared and exclusive.
|
||||
*/
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
SimpleCache cache_;
|
||||
|
||||
// mutex used for open() and close()
|
||||
mutable std::mutex mutex_;
|
||||
/**
|
||||
* @brief Public read methods
|
||||
*
|
||||
* All of these reads methods can throw DatabaseTimeout. When writing
|
||||
* code in an RPC handler, this exception does not need to be caught:
|
||||
* when an RPC results in a timeout, an error is returned to the client.
|
||||
*/
|
||||
|
||||
public:
|
||||
BackendInterface(boost::json::object const& config)
|
||||
BackendInterface(clio::Config const& config)
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface()
|
||||
{
|
||||
}
|
||||
|
||||
// *** public read methods ***
|
||||
// All of these reads methods can throw DatabaseTimeout. When writing code
|
||||
// in an RPC handler, this exception does not need to be caught: when an RPC
|
||||
// results in a timeout, an error is returned to the client
|
||||
/*! @brief LEDGER METHODS */
|
||||
public:
|
||||
// *** ledger methods
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
*
|
||||
* const version holds the original cache state; the other tracks
|
||||
* historical changes.
|
||||
*
|
||||
* @return SimpleCache const&
|
||||
*/
|
||||
SimpleCache const&
|
||||
cache() const
|
||||
{
|
||||
@@ -117,19 +211,23 @@ public:
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/*! @brief Fetches a specific ledger by sequence number. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches a specific ledger by hash. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the latest ledger sequence. */
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the current ledger range while locking that process */
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const
|
||||
{
|
||||
@@ -137,6 +235,14 @@ public:
|
||||
return range;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences to be tracked.
|
||||
*
|
||||
* Function that continues updating the range sliding window or creates
|
||||
* a new sliding window once the maxSequence limit has been reached.
|
||||
*
|
||||
* @param newMax Unsigned 32-bit integer representing new max of range.
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax)
|
||||
{
|
||||
@@ -148,70 +254,187 @@ public:
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the fees for specific transactions.
|
||||
*
|
||||
* @param seq Unsigned 32-bit integer reprsenting sequence.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<ripple::Fees>
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
|
||||
|
||||
// *** transaction methods
|
||||
/*! @brief TRANSACTION METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash Unsigned 256-bit integer representing hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes Unsigned integer value representing a hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual AccountTransactions
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account
|
||||
*
|
||||
* @param account A specific XRPL Account, speciifed by unique type
|
||||
* accountID.
|
||||
* @param limit Paging limit for how many transactions can be returned per
|
||||
* page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Unsigned 32-bit integer for latest total
|
||||
* transactions.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<ripple::uint256>
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// *** state data methods
|
||||
/*! @brief NFT methods */
|
||||
/**
|
||||
* @brief Fetches a specific NFT
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<NFT>
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param limit Paging limit as to how many transactions return per page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursorIn Represents transaction number and ledger sequence.
|
||||
* @param yield Currently executing coroutine is passed in as input.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief STATE DATA METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific ledger object: vector of unsigned chars
|
||||
*
|
||||
* @param key Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<Blob>
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
|
||||
*
|
||||
* @param keys Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob>
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObject */
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObjects */
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers: vector of objects
|
||||
*
|
||||
* Objects are made of a key value, vector of unsigned chars (blob),
|
||||
* and a boolean detailing whether keys and blob match.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<LedgerObject>
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Fetches a page of ledger objects, ordered by key/index.
|
||||
// Used by ledger_data
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Paging limit as to how many transactions returned per page.
|
||||
* @param outOfOrder Boolean on whether ledger page is out of order.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return LedgerPage
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
@@ -220,26 +443,37 @@ public:
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
// Fetches the successor to key/index
|
||||
/*! @brief Fetches successor object from key/index. */
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Fetches successor key from key/index. */
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
// Fetches the successor to key/index
|
||||
|
||||
/*! @brief Virtual function version of fetchSuccessorKey. */
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return BookOffersPage
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
@@ -248,6 +482,16 @@ public:
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Returns a ledger range
|
||||
*
|
||||
* Ledger range is a struct of min and max sequence numbers). Due to
|
||||
* the use of [&], which denotes a special case of a lambda expression
|
||||
* where values found outside the scope are passed by reference, wrt the
|
||||
* currently executing coroutine.
|
||||
*
|
||||
* @return std::optional<LedgerRange>
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const
|
||||
{
|
||||
@@ -256,27 +500,52 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerInfo Const on ledger information.
|
||||
* @param ledgerHeader r-value string representing ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* The key and blob are r-value references and do NOT have memory addresses.
|
||||
*
|
||||
* @param key String represented as an r-value.
|
||||
* @param seq Unsigned integer representing a sequence.
|
||||
* @param blob r-value vector of unsigned characters (blob).
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash r-value reference. No memory address.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param date Unsigned 32-bit integer.
|
||||
* @param transaction r-value reference. No memory address.
|
||||
* @param metadata r-value refrence. No memory address.
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
@@ -285,44 +554,101 @@ public:
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new transaction for a specific NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Passed in as an r-value reference.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param successor Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) = 0;
|
||||
|
||||
// Tell the database we are about to begin writing data for a particular
|
||||
// ledger.
|
||||
/*! @brief Tells database we will write data for a specific ledger. */
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
// Tell the database we have finished writing all data for a particular
|
||||
// ledger
|
||||
// TODO change the return value to represent different results. committed,
|
||||
// write conflict, errored, successful but not committed
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* TODO: change the return value to represent different results:
|
||||
* Committed, write conflict, errored, successful but not committed
|
||||
*
|
||||
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
/**
|
||||
* @brief Selectively delets parts of the database.
|
||||
*
|
||||
* @param numLedgersToKeep Unsigned 32-bit integer on number of ledgers to
|
||||
* keep.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
virtual bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Open the database. Set up all of the necessary objects and
|
||||
// datastructures. After this call completes, the database is ready for
|
||||
// use.
|
||||
/**
|
||||
* @brief Opens the database
|
||||
*
|
||||
* Open the database. Set up all of the necessary objects and
|
||||
* datastructures. After this call completes, the database is
|
||||
* ready for use.
|
||||
*
|
||||
* @param readOnly Boolean whether ledger is read only.
|
||||
*/
|
||||
virtual void
|
||||
open(bool readOnly) = 0;
|
||||
|
||||
// Close the database, releasing any resources
|
||||
/*! @brief Closes the database, releasing any resources. */
|
||||
virtual void
|
||||
close(){};
|
||||
|
||||
// *** private helper methods
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Private helper method to write ledger object
|
||||
*
|
||||
* @param key r-value string representing key.
|
||||
* @param seq Unsigned 32-bit integer representing sequence.
|
||||
* @param blob r-value vector of unsigned chars.
|
||||
*/
|
||||
virtual void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
@@ -335,4 +661,3 @@ private:
|
||||
|
||||
} // namespace Backend
|
||||
using BackendInterface = Backend::BackendInterface;
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,28 +1,52 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_CASSANDRABACKEND_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/async_result.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <cassandra.h>
|
||||
#include <cstddef>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <config/Config.h>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
class CassandraPreparedStatement
|
||||
{
|
||||
private:
|
||||
clio::Logger log_{"Backend"};
|
||||
CassPrepared const* prepared_ = nullptr;
|
||||
|
||||
public:
|
||||
@@ -63,7 +87,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error preparing statement : " << rc << ", "
|
||||
<< cass_error_desc(rc) << ". query : " << query;
|
||||
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||
log_.error() << ss.str();
|
||||
}
|
||||
cass_future_free(prepareFuture);
|
||||
return rc == CASS_OK;
|
||||
@@ -71,7 +95,7 @@ public:
|
||||
|
||||
~CassandraPreparedStatement()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__;
|
||||
log_.trace() << "called";
|
||||
if (prepared_)
|
||||
{
|
||||
cass_prepared_free(prepared_);
|
||||
@@ -84,6 +108,7 @@ class CassandraStatement
|
||||
{
|
||||
CassStatement* statement_ = nullptr;
|
||||
size_t curBindingIndex_ = 0;
|
||||
clio::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
CassandraStatement(CassandraPreparedStatement const& prepared)
|
||||
@@ -115,13 +140,13 @@ public:
|
||||
throw std::runtime_error(
|
||||
"CassandraStatement::bindNextBoolean - statement_ is null");
|
||||
CassError rc = cass_statement_bind_bool(
|
||||
statement_, 1, static_cast<cass_bool_t>(val));
|
||||
statement_, curBindingIndex_, static_cast<cass_bool_t>(val));
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "Error binding boolean to statement: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
curBindingIndex_++;
|
||||
@@ -177,7 +202,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding bytes to statement: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
curBindingIndex_++;
|
||||
@@ -189,8 +214,8 @@ public:
|
||||
if (!statement_)
|
||||
throw std::runtime_error(
|
||||
"CassandraStatement::bindNextUInt - statement_ is null");
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< std::to_string(curBindingIndex_) << " " << std::to_string(value);
|
||||
log_.trace() << std::to_string(curBindingIndex_) << " "
|
||||
<< std::to_string(value);
|
||||
CassError rc =
|
||||
cass_statement_bind_int32(statement_, curBindingIndex_, value);
|
||||
if (rc != CASS_OK)
|
||||
@@ -198,7 +223,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding uint to statement: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
curBindingIndex_++;
|
||||
@@ -223,7 +248,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding int to statement: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
curBindingIndex_++;
|
||||
@@ -239,7 +264,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding int to tuple: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
rc = cass_tuple_set_int64(tuple, 1, second);
|
||||
@@ -248,7 +273,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding int to tuple: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple);
|
||||
@@ -257,7 +282,7 @@ public:
|
||||
std::stringstream ss;
|
||||
ss << "Error binding tuple to statement: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
|
||||
log_.error() << ss.str();
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
cass_tuple_free(tuple);
|
||||
@@ -273,6 +298,7 @@ public:
|
||||
|
||||
class CassandraResult
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
CassResult const* result_ = nullptr;
|
||||
CassRow const* row_ = nullptr;
|
||||
CassIterator* iter_ = nullptr;
|
||||
@@ -363,7 +389,7 @@ public:
|
||||
std::stringstream msg;
|
||||
msg << "CassandraResult::getBytes - error getting value: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << msg.str();
|
||||
log_.error() << msg.str();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
curGetIndex_++;
|
||||
@@ -384,7 +410,7 @@ public:
|
||||
std::stringstream msg;
|
||||
msg << "CassandraResult::getuint256 - error getting value: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << msg.str();
|
||||
log_.error() << msg.str();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
curGetIndex_++;
|
||||
@@ -404,7 +430,7 @@ public:
|
||||
std::stringstream msg;
|
||||
msg << "CassandraResult::getInt64 - error getting value: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << msg.str();
|
||||
log_.error() << msg.str();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
++curGetIndex_;
|
||||
@@ -481,6 +507,31 @@ public:
|
||||
return {first, second};
|
||||
}
|
||||
|
||||
// TODO: should be replaced with a templated implementation as is very
|
||||
// similar to other getters
|
||||
bool
|
||||
getBool()
|
||||
{
|
||||
if (!row_)
|
||||
{
|
||||
std::string msg{"No result"};
|
||||
log_.error() << msg;
|
||||
throw std::runtime_error(msg);
|
||||
}
|
||||
cass_bool_t val;
|
||||
CassError rc =
|
||||
cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Error getting value: " << rc << ", " << cass_error_desc(rc);
|
||||
log_.error() << msg.str();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
++curGetIndex_;
|
||||
return val;
|
||||
}
|
||||
|
||||
~CassandraResult()
|
||||
{
|
||||
if (result_ != nullptr)
|
||||
@@ -489,6 +540,7 @@ public:
|
||||
cass_iterator_free(iter_);
|
||||
}
|
||||
};
|
||||
|
||||
inline bool
|
||||
isTimeout(CassError rc)
|
||||
{
|
||||
@@ -568,6 +620,7 @@ private:
|
||||
return ret;
|
||||
}
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
std::atomic<bool> open_{false};
|
||||
|
||||
std::unique_ptr<CassSession, void (*)(CassSession*)> session_{
|
||||
@@ -599,6 +652,12 @@ private:
|
||||
CassandraPreparedStatement insertAccountTx_;
|
||||
CassandraPreparedStatement selectAccountTx_;
|
||||
CassandraPreparedStatement selectAccountTxForward_;
|
||||
CassandraPreparedStatement insertNFT_;
|
||||
CassandraPreparedStatement selectNFT_;
|
||||
CassandraPreparedStatement insertIssuerNFT_;
|
||||
CassandraPreparedStatement insertNFTTx_;
|
||||
CassandraPreparedStatement selectNFTTx_;
|
||||
CassandraPreparedStatement selectNFTTxForward_;
|
||||
CassandraPreparedStatement insertLedgerHeader_;
|
||||
CassandraPreparedStatement insertLedgerHash_;
|
||||
CassandraPreparedStatement updateLedgerRange_;
|
||||
@@ -612,16 +671,18 @@ private:
|
||||
uint32_t syncInterval_ = 1;
|
||||
uint32_t lastSync_ = 0;
|
||||
|
||||
// maximum number of concurrent in flight requests. New requests will wait
|
||||
// for earlier requests to finish if this limit is exceeded
|
||||
std::uint32_t maxRequestsOutstanding = 10000;
|
||||
// we keep this small because the indexer runs in the background, and we
|
||||
// don't want the database to be swamped when the indexer is running
|
||||
std::uint32_t indexerMaxRequestsOutstanding = 10;
|
||||
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
|
||||
// maximum number of concurrent in flight write requests. New requests will
|
||||
// wait for earlier requests to finish if this limit is exceeded
|
||||
std::uint32_t maxWriteRequestsOutstanding = 10000;
|
||||
mutable std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
|
||||
|
||||
// maximum number of concurrent in flight read requests. isTooBusy() will
|
||||
// return true if the number of in flight read requests exceeds this limit
|
||||
std::uint32_t maxReadRequestsOutstanding = 100000;
|
||||
mutable std::atomic_uint32_t numReadRequestsOutstanding_ = 0;
|
||||
|
||||
// mutex and condition_variable to limit the number of concurrent in flight
|
||||
// requests
|
||||
// write requests
|
||||
mutable std::mutex throttleMutex_;
|
||||
mutable std::condition_variable throttleCv_;
|
||||
|
||||
@@ -635,15 +696,17 @@ private:
|
||||
std::optional<boost::asio::io_context::work> work_;
|
||||
std::thread ioThread_;
|
||||
|
||||
boost::json::object config_;
|
||||
clio::Config config_;
|
||||
uint32_t ttl_ = 0;
|
||||
|
||||
mutable std::uint32_t ledgerSequence_ = 0;
|
||||
|
||||
public:
|
||||
CassandraBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config)
|
||||
: BackendInterface(config), config_(config)
|
||||
clio::Config const& config,
|
||||
uint32_t ttl)
|
||||
: BackendInterface(config), config_(config), ttl_(ttl)
|
||||
{
|
||||
work_.emplace(ioContext_);
|
||||
ioThread_ = std::thread([this]() { ioContext_.run(); });
|
||||
@@ -683,12 +746,12 @@ public:
|
||||
open_ = false;
|
||||
}
|
||||
|
||||
AccountTransactions
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
bool
|
||||
@@ -712,13 +775,11 @@ public:
|
||||
statement.bindNextInt(ledgerSequence_ - 1);
|
||||
if (!executeSyncUpdate(statement))
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " Update failed for ledger "
|
||||
<< std::to_string(ledgerSequence_) << ". Returning";
|
||||
log_.warn() << "Update failed for ledger "
|
||||
<< std::to_string(ledgerSequence_) << ". Returning";
|
||||
return false;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger "
|
||||
<< std::to_string(ledgerSequence_);
|
||||
log_.info() << "Committed ledger " << std::to_string(ledgerSequence_);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -752,22 +813,20 @@ public:
|
||||
statement.bindNextInt(lastSync_);
|
||||
if (!executeSyncUpdate(statement))
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " Update failed for ledger "
|
||||
<< std::to_string(ledgerSequence_) << ". Returning";
|
||||
log_.warn() << "Update failed for ledger "
|
||||
<< std::to_string(ledgerSequence_) << ". Returning";
|
||||
return false;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Committed ledger "
|
||||
<< std::to_string(ledgerSequence_);
|
||||
log_.info() << "Committed ledger "
|
||||
<< std::to_string(ledgerSequence_);
|
||||
lastSync_ = ledgerSequence_;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Skipping commit. sync interval is "
|
||||
<< std::to_string(syncInterval_) << " - last sync is "
|
||||
<< std::to_string(lastSync_) << " - ledger sequence is "
|
||||
<< std::to_string(ledgerSequence_);
|
||||
log_.info() << "Skipping commit. sync interval is "
|
||||
<< std::to_string(syncInterval_) << " - last sync is "
|
||||
<< std::to_string(lastSync_) << " - ledger sequence is "
|
||||
<< std::to_string(ledgerSequence_);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -787,12 +846,12 @@ public:
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__;
|
||||
log_.trace() << "called";
|
||||
CassandraStatement statement{selectLatestLedger_};
|
||||
CassandraResult result = executeAsyncRead(statement, yield);
|
||||
if (!result.hasResult())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
log_.error()
|
||||
<< "CassandraBackend::fetchLatestLedgerSequence - no rows";
|
||||
return {};
|
||||
}
|
||||
@@ -804,13 +863,13 @@ public:
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__;
|
||||
log_.trace() << "called";
|
||||
CassandraStatement statement{selectLedgerBySeq_};
|
||||
statement.bindNextInt(sequence);
|
||||
CassandraResult result = executeAsyncRead(statement, yield);
|
||||
if (!result)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
|
||||
log_.error() << "No rows";
|
||||
return {};
|
||||
}
|
||||
std::vector<unsigned char> header = result.getBytes();
|
||||
@@ -830,7 +889,7 @@ public:
|
||||
|
||||
if (!result.hasResult())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
|
||||
log_.debug() << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -852,6 +911,20 @@ public:
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
// Synchronously fetch the object with key key, as of ledger with sequence
|
||||
// sequence
|
||||
std::optional<Blob>
|
||||
@@ -863,7 +936,7 @@ public:
|
||||
std::optional<int64_t>
|
||||
getToken(void const* key, boost::asio::yield_context& yield) const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << "Fetching from cassandra";
|
||||
log_.trace() << "Fetching from cassandra";
|
||||
CassandraStatement statement{getToken_};
|
||||
statement.bindNextBytes(key, 32);
|
||||
|
||||
@@ -871,7 +944,7 @@ public:
|
||||
|
||||
if (!result)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
|
||||
log_.error() << "No rows";
|
||||
return {};
|
||||
}
|
||||
int64_t token = result.getInt64();
|
||||
@@ -886,14 +959,14 @@ public:
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__;
|
||||
log_.trace() << "called";
|
||||
CassandraStatement statement{selectTransaction_};
|
||||
statement.bindNextBytes(hash);
|
||||
CassandraResult result = executeAsyncRead(statement, yield);
|
||||
|
||||
if (!result)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
|
||||
log_.error() << "No rows";
|
||||
return {};
|
||||
}
|
||||
return {
|
||||
@@ -941,6 +1014,9 @@ public:
|
||||
writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
@@ -949,6 +1025,9 @@ public:
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override;
|
||||
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData>&& data) override;
|
||||
|
||||
void
|
||||
startWrites() const override
|
||||
{
|
||||
@@ -967,33 +1046,34 @@ public:
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
bool
|
||||
isTooBusy() const override;
|
||||
|
||||
inline void
|
||||
incremementOutstandingRequestCount() const
|
||||
incrementOutstandingRequestCount() const
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddRequest())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " : "
|
||||
<< "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
log_.debug() << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddRequest(); });
|
||||
}
|
||||
}
|
||||
++numRequestsOutstanding_;
|
||||
++numWriteRequestsOutstanding_;
|
||||
}
|
||||
|
||||
inline void
|
||||
decrementOutstandingRequestCount() const
|
||||
{
|
||||
// sanity check
|
||||
if (numRequestsOutstanding_ == 0)
|
||||
if (numWriteRequestsOutstanding_ == 0)
|
||||
{
|
||||
assert(false);
|
||||
throw std::runtime_error("decrementing num outstanding below 0");
|
||||
}
|
||||
size_t cur = (--numRequestsOutstanding_);
|
||||
size_t cur = (--numWriteRequestsOutstanding_);
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
@@ -1012,12 +1092,13 @@ public:
|
||||
inline bool
|
||||
canAddRequest() const
|
||||
{
|
||||
return numRequestsOutstanding_ < maxRequestsOutstanding;
|
||||
return numWriteRequestsOutstanding_ < maxWriteRequestsOutstanding;
|
||||
}
|
||||
|
||||
inline bool
|
||||
finishedAllRequests() const
|
||||
{
|
||||
return numRequestsOutstanding_ == 0;
|
||||
return numWriteRequestsOutstanding_ == 0;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1050,7 +1131,7 @@ public:
|
||||
bool isRetry) const
|
||||
{
|
||||
if (!isRetry)
|
||||
incremementOutstandingRequestCount();
|
||||
incrementOutstandingRequestCount();
|
||||
executeAsyncHelper(statement, callback, callbackData);
|
||||
}
|
||||
|
||||
@@ -1079,7 +1160,7 @@ public:
|
||||
ss << "Cassandra sync write error";
|
||||
ss << ", retrying";
|
||||
ss << ": " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||
log_.warn() << ss.str();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
} while (rc != CASS_OK);
|
||||
@@ -1103,7 +1184,7 @@ public:
|
||||
ss << "Cassandra sync update error";
|
||||
ss << ", retrying";
|
||||
ss << ": " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||
log_.warn() << ss.str();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
} while (rc != CASS_OK);
|
||||
@@ -1113,7 +1194,7 @@ public:
|
||||
CassRow const* row = cass_result_first_row(res);
|
||||
if (!row)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "executeSyncUpdate - no rows";
|
||||
log_.error() << "executeSyncUpdate - no rows";
|
||||
cass_result_free(res);
|
||||
return false;
|
||||
}
|
||||
@@ -1122,16 +1203,14 @@ public:
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_result_free(res);
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< "executeSyncUpdate - error getting result " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
log_.error() << "executeSyncUpdate - error getting result " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
return false;
|
||||
}
|
||||
cass_result_free(res);
|
||||
if (success != cass_true && timedOut)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " Update failed, but timedOut is true";
|
||||
log_.warn() << "Update failed, but timedOut is true";
|
||||
// if there was a timeout, the update may have succeeded in the
|
||||
// background on the first attempt. To determine if this happened,
|
||||
// we query the range from the db, making sure the range is what
|
||||
@@ -1158,22 +1237,23 @@ public:
|
||||
CassError rc;
|
||||
do
|
||||
{
|
||||
++numReadRequestsOutstanding_;
|
||||
fut = cass_session_execute(session_.get(), statement.get());
|
||||
|
||||
boost::system::error_code ec;
|
||||
rc = cass_future_error_code(fut, yield[ec]);
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< "Cannot read async cass_future_error_code";
|
||||
log_.error() << "Cannot read async cass_future_error_code";
|
||||
}
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "Cassandra executeAsyncRead error";
|
||||
ss << ": " << cass_error_desc(rc);
|
||||
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||
log_.error() << ss.str();
|
||||
}
|
||||
if (isTimeout(rc))
|
||||
{
|
||||
@@ -1196,4 +1276,3 @@ public:
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
|
||||
@@ -1,16 +1,37 @@
|
||||
#ifndef CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
#define CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <backend/Pg.h>
|
||||
|
||||
#include <backend/Types.h>
|
||||
|
||||
/// Struct used to keep track of what to write to transactions and
|
||||
/// account_transactions tables in Postgres
|
||||
/// Struct used to keep track of what to write to
|
||||
/// account_transactions/account_tx tables
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
@@ -32,6 +53,57 @@ struct AccountTransactionsData
|
||||
AccountTransactionsData() = default;
|
||||
};
|
||||
|
||||
/// Represents a link from a tx to an NFT that was targeted/modified/created
|
||||
/// by it. Gets written to nf_token_transactions table and the like.
|
||||
struct NFTTransactionsData
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::uint256 txHash;
|
||||
|
||||
NFTTransactionsData(
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::TxMeta const& meta,
|
||||
ripple::uint256 const& txHash)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, txHash(txHash)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
|
||||
/// table and the like.
|
||||
struct NFTsData
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
|
||||
// The transaction index is only stored because we want to store only the
|
||||
// final state of an NFT per ledger. Since we pull this from transactions
|
||||
// we keep track of which tx index created this so we can de-duplicate, as
|
||||
// it is possible for one ledger to have multiple txs that change the
|
||||
// state of the same NFT.
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::AccountID owner;
|
||||
bool isBurned;
|
||||
|
||||
NFTsData(
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::TxMeta const& meta,
|
||||
bool isBurned)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, owner(owner)
|
||||
, isBurned(isBurned)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
@@ -127,4 +199,3 @@ uint256ToString(ripple::uint256 const& uint)
|
||||
}
|
||||
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
#endif
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
#include <backend/LayeredCache.h>
|
||||
namespace Backend {
|
||||
|
||||
void
|
||||
LayeredCache::insert(
|
||||
ripple::uint256 const& key,
|
||||
Blob const& value,
|
||||
uint32_t seq)
|
||||
{
|
||||
auto entry = map_[key];
|
||||
// stale insert, do nothing
|
||||
if (seq <= entry.recent.seq)
|
||||
return;
|
||||
entry.old = entry.recent;
|
||||
entry.recent = {seq, value};
|
||||
if (value.empty())
|
||||
pendingDeletes_.push_back(key);
|
||||
if (!entry.old.blob.empty())
|
||||
pendingSweeps_.push_back(key);
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
LayeredCache::select(CacheEntry const& entry, uint32_t seq) const
|
||||
{
|
||||
if (seq < entry.old.seq)
|
||||
return {};
|
||||
if (seq < entry.recent.seq && !entry.old.blob.empty())
|
||||
return entry.old.blob;
|
||||
if (!entry.recent.blob.empty())
|
||||
return entry.recent.blob;
|
||||
return {};
|
||||
}
|
||||
void
|
||||
LayeredCache::update(std::vector<LedgerObject> const& blobs, uint32_t seq)
|
||||
{
|
||||
std::unique_lock lck{mtx_};
|
||||
if (seq > mostRecentSequence_)
|
||||
mostRecentSequence_ = seq;
|
||||
for (auto const& k : pendingSweeps_)
|
||||
{
|
||||
auto e = map_[k];
|
||||
e.old = {};
|
||||
}
|
||||
for (auto const& k : pendingDeletes_)
|
||||
{
|
||||
map_.erase(k);
|
||||
}
|
||||
for (auto const& b : blobs)
|
||||
{
|
||||
insert(b.key, b.blob, seq);
|
||||
}
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
LayeredCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
ripple::uint256 curKey = key;
|
||||
while (true)
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
if (seq < mostRecentSequence_ - 1)
|
||||
return {};
|
||||
auto e = map_.upper_bound(curKey);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
auto blob = select(entry, seq);
|
||||
if (!blob)
|
||||
{
|
||||
curKey = e->first;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
return {{e->first, *blob}};
|
||||
}
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
LayeredCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
ripple::uint256 curKey = key;
|
||||
std::shared_lock lck{mtx_};
|
||||
while (true)
|
||||
{
|
||||
if (seq < mostRecentSequence_ - 1)
|
||||
return {};
|
||||
auto e = map_.lower_bound(curKey);
|
||||
--e;
|
||||
if (e == map_.begin())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
auto blob = select(entry, seq);
|
||||
if (!blob)
|
||||
{
|
||||
curKey = e->first;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
return {{e->first, *blob}};
|
||||
}
|
||||
}
|
||||
std::optional<Blob>
|
||||
LayeredCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
return select(entry, seq);
|
||||
}
|
||||
} // namespace Backend
|
||||
@@ -1,73 +0,0 @@
|
||||
#ifndef CLIO_LAYEREDCACHE_H_INCLUDED
|
||||
#define CLIO_LAYEREDCACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
namespace Backend {
|
||||
class LayeredCache
|
||||
{
|
||||
struct SeqBlobPair
|
||||
{
|
||||
uint32_t seq;
|
||||
Blob blob;
|
||||
};
|
||||
struct CacheEntry
|
||||
{
|
||||
SeqBlobPair recent;
|
||||
SeqBlobPair old;
|
||||
};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
std::vector<ripple::uint256> pendingDeletes_;
|
||||
std::vector<ripple::uint256> pendingSweeps_;
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t mostRecentSequence_;
|
||||
|
||||
void
|
||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq);
|
||||
|
||||
/*
|
||||
void
|
||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
||||
{
|
||||
map_.emplace(key,{{seq,value,{}});
|
||||
}
|
||||
void
|
||||
update(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
||||
{
|
||||
auto& entry = map_.find(key);
|
||||
entry.old = entry.recent;
|
||||
entry.recent = {seq, value};
|
||||
pendingSweeps_.push_back(key);
|
||||
}
|
||||
void
|
||||
erase(ripple::uint256 const& key, uint32_t seq)
|
||||
{
|
||||
update(key, {}, seq);
|
||||
pendingDeletes_.push_back(key);
|
||||
}
|
||||
*/
|
||||
std::optional<Blob>
|
||||
select(CacheEntry const& entry, uint32_t seq) const;
|
||||
|
||||
public:
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
1789
src/backend/Pg.cpp
1789
src/backend/Pg.cpp
File diff suppressed because it is too large
Load Diff
571
src/backend/Pg.h
571
src/backend/Pg.h
@@ -1,571 +0,0 @@
|
||||
#ifndef RIPPLE_CORE_PG_H_INCLUDED
|
||||
#define RIPPLE_CORE_PG_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/icl/closed_interval.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <libpq-fe.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// These postgres structs must be freed only by the postgres API.
|
||||
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
|
||||
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
|
||||
using asio_socket_type = std::unique_ptr<
|
||||
boost::asio::ip::tcp::socket,
|
||||
void (*)(boost::asio::ip::tcp::socket*)>;
|
||||
|
||||
/** first: command
|
||||
* second: parameter values
|
||||
*
|
||||
* The 2nd member takes an optional string to
|
||||
* distinguish between NULL parameters and empty strings. An empty
|
||||
* item corresponds to a NULL parameter.
|
||||
*
|
||||
* Postgres reads each parameter as a c-string, regardless of actual type.
|
||||
* Binary types (bytea) need to be converted to hex and prepended with
|
||||
* \x ("\\x").
|
||||
*/
|
||||
using pg_params =
|
||||
std::pair<char const*, std::vector<std::optional<std::string>>>;
|
||||
|
||||
/** Parameter values for pg API. */
|
||||
using pg_formatted_params = std::vector<char const*>;
|
||||
|
||||
/** Parameters for managing postgres connections. */
|
||||
struct PgConfig
|
||||
{
|
||||
/** Maximum connections allowed to db. */
|
||||
std::size_t max_connections{1000};
|
||||
/** Close idle connections past this duration. */
|
||||
std::chrono::seconds timeout{600};
|
||||
|
||||
/** Index of DB connection parameter names. */
|
||||
std::vector<char const*> keywordsIdx;
|
||||
/** DB connection parameter names. */
|
||||
std::vector<std::string> keywords;
|
||||
/** Index of DB connection parameter values. */
|
||||
std::vector<char const*> valuesIdx;
|
||||
/** DB connection parameter values. */
|
||||
std::vector<std::string> values;
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class that operates on postgres query results.
|
||||
*
|
||||
* The functions that return results do not check first whether the
|
||||
* expected results are actually there. Therefore, the caller first needs
|
||||
* to check whether or not a valid response was returned using the operator
|
||||
* bool() overload. If number of tuples or fields are unknown, then check
|
||||
* those. Each result field should be checked for null before attempting
|
||||
* to return results. Finally, the caller must know the type of the field
|
||||
* before calling the corresponding function to return a field. Postgres
|
||||
* internally stores each result field as null-terminated strings.
|
||||
*/
|
||||
class PgResult
|
||||
{
|
||||
// The result object must be freed using the libpq API PQclear() call.
|
||||
pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }};
|
||||
std::optional<std::pair<ExecStatusType, std::string>> error_;
|
||||
|
||||
public:
|
||||
/** Constructor for when the process is stopping.
|
||||
*
|
||||
*/
|
||||
PgResult()
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for successful query results.
|
||||
*
|
||||
* @param result Query result.
|
||||
*/
|
||||
explicit PgResult(pg_result_type&& result) : result_(std::move(result))
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for failed query results.
|
||||
*
|
||||
* @param result Query result that contains error information.
|
||||
* @param conn Postgres connection that contains error information.
|
||||
*/
|
||||
PgResult(PGresult* result, PGconn* conn)
|
||||
: error_({PQresultStatus(result), PQerrorMessage(conn)})
|
||||
{
|
||||
}
|
||||
|
||||
/** Return field as a null-terminated string pointer.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
char const*
|
||||
c_str(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetvalue(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
std::vector<unsigned char>
|
||||
asUnHexedBlob(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
std::string_view view{c_str(ntuple, nfield) + 2};
|
||||
auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend());
|
||||
if (res)
|
||||
return *res;
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
asUInt256(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
ripple::uint256 val;
|
||||
if (!val.parseHex(c_str(ntuple, nfield) + 2))
|
||||
throw std::runtime_error("Pg - failed to parse hex into uint256");
|
||||
return val;
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' INT type (32 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int32_t
|
||||
asInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int32_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' BIGINT type (64 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int64_t
|
||||
asBigInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int64_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Returns whether the field is NULL or not.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Whether field is NULL.
|
||||
*/
|
||||
bool
|
||||
isNull(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetisnull(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
/** Check whether a valid response occurred.
|
||||
*
|
||||
* @return Whether or not the query returned a valid response.
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
return result_ != nullptr;
|
||||
}
|
||||
|
||||
/** Message describing the query results suitable for diagnostics.
|
||||
*
|
||||
* If error, then the postgres error type and message are returned.
|
||||
* Otherwise, "ok"
|
||||
*
|
||||
* @return Query result message.
|
||||
*/
|
||||
std::string
|
||||
msg() const;
|
||||
|
||||
/** Get number of rows in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result rows.
|
||||
*/
|
||||
int
|
||||
ntuples() const
|
||||
{
|
||||
return PQntuples(result_.get());
|
||||
}
|
||||
|
||||
/** Get number of fields in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result fields.
|
||||
*/
|
||||
int
|
||||
nfields() const
|
||||
{
|
||||
return PQnfields(result_.get());
|
||||
}
|
||||
|
||||
/** Return result status of the command.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
ExecStatusType
|
||||
status() const
|
||||
{
|
||||
return PQresultStatus(result_.get());
|
||||
}
|
||||
};
|
||||
|
||||
/* Class that contains and operates upon a postgres connection. */
|
||||
class Pg
|
||||
{
|
||||
friend class PgPool;
|
||||
friend class PgQuery;
|
||||
|
||||
PgConfig const& config_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
bool& stop_;
|
||||
std::mutex& mutex_;
|
||||
|
||||
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
|
||||
|
||||
// The connection object must be freed using the libpq API PQfinish() call.
|
||||
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
|
||||
|
||||
inline asio_socket_type
|
||||
getSocket(boost::asio::yield_context& strand);
|
||||
|
||||
inline PgResult
|
||||
waitForStatus(boost::asio::yield_context& yield, ExecStatusType expected);
|
||||
|
||||
inline void
|
||||
flush(boost::asio::yield_context& yield);
|
||||
|
||||
/** Clear results from the connection.
|
||||
*
|
||||
* Results from previous commands must be cleared before new commands
|
||||
* can be processed. This function should be called on connections
|
||||
* that weren't processed completely before being reused, such as
|
||||
* when being checked-in.
|
||||
*
|
||||
* @return whether or not connection still exists.
|
||||
*/
|
||||
bool
|
||||
clear();
|
||||
|
||||
/** Connect to postgres.
|
||||
*
|
||||
* Idempotently connects to postgres by first checking whether an
|
||||
* existing connection is already present. If connection is not present
|
||||
* or in an errored state, reconnects to the database.
|
||||
*/
|
||||
void
|
||||
connect(boost::asio::yield_context& yield);
|
||||
|
||||
/** Disconnect from postgres. */
|
||||
void
|
||||
disconnect()
|
||||
{
|
||||
conn_.reset();
|
||||
socket_.reset();
|
||||
}
|
||||
|
||||
/** Execute postgres query.
|
||||
*
|
||||
* If parameters are included, then the command should contain only a
|
||||
* single SQL statement. If no parameters, then multiple SQL statements
|
||||
* delimited by semi-colons can be processed. The response is from
|
||||
* the last command executed.
|
||||
*
|
||||
* @param command postgres API command string.
|
||||
* @param nParams postgres API number of parameters.
|
||||
* @param values postgres API array of parameter.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(
|
||||
char const* command,
|
||||
std::size_t const nParams,
|
||||
char const* const* values,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
/** Execute postgres query with no parameters.
|
||||
*
|
||||
* @param command Query string.
|
||||
* @return Query result object;
|
||||
*/
|
||||
PgResult
|
||||
query(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return query(command, 0, nullptr, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command and parameter values.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(pg_params const& dbParams, boost::asio::yield_context& yield);
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
public:
|
||||
/** Constructor for Pg class.
|
||||
*
|
||||
* @param config Config parameters.
|
||||
* @param j Logger object.
|
||||
* @param stop Reference to connection pool's stop flag.
|
||||
* @param mutex Reference to connection pool's mutex.
|
||||
*/
|
||||
Pg(PgConfig const& config,
|
||||
boost::asio::io_context& ctx,
|
||||
bool& stop,
|
||||
std::mutex& mutex)
|
||||
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Database connection pool.
|
||||
*
|
||||
* Allow re-use of postgres connections. Postgres connections are created
|
||||
* as needed until configurable limit is reached. After use, each connection
|
||||
* is placed in a container ordered by time of use. Each request for
|
||||
* a connection grabs the most recently used connection from the container.
|
||||
* If none are available, a new connection is used (up to configured limit).
|
||||
* Idle connections are destroyed periodically after configurable
|
||||
* timeout duration.
|
||||
*
|
||||
* This should be stored as a shared pointer so PgQuery objects can safely
|
||||
* outlive it.
|
||||
*/
|
||||
class PgPool
|
||||
{
|
||||
friend class PgQuery;
|
||||
|
||||
using clock_type = std::chrono::steady_clock;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
PgConfig config_;
|
||||
std::mutex mutex_;
|
||||
std::condition_variable cond_;
|
||||
std::size_t connections_{};
|
||||
bool stop_{false};
|
||||
|
||||
/** Idle database connections ordered by timestamp to allow timing out. */
|
||||
std::multimap<std::chrono::time_point<clock_type>, std::unique_ptr<Pg>>
|
||||
idle_;
|
||||
|
||||
/** Get a postgres connection object.
|
||||
*
|
||||
* Return the most recent idle connection in the pool, if available.
|
||||
* Otherwise, return a new connection unless we're at the threshold.
|
||||
* If so, then wait until a connection becomes available.
|
||||
*
|
||||
* @return Postgres object.
|
||||
*/
|
||||
std::unique_ptr<Pg>
|
||||
checkout();
|
||||
|
||||
/** Return a postgres object to the pool for reuse.
|
||||
*
|
||||
* If connection is healthy, place in pool for reuse. After calling this,
|
||||
* the container no longer have a connection unless checkout() is called.
|
||||
*
|
||||
* @param pg Pg object.
|
||||
*/
|
||||
void
|
||||
checkin(std::unique_ptr<Pg>& pg);
|
||||
|
||||
public:
|
||||
/** Connection pool constructor.
|
||||
*
|
||||
* @param pgConfig Postgres config.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent.
|
||||
*/
|
||||
PgPool(boost::asio::io_context& ioc, boost::json::object const& config);
|
||||
|
||||
~PgPool()
|
||||
{
|
||||
onStop();
|
||||
}
|
||||
|
||||
PgConfig&
|
||||
config()
|
||||
{
|
||||
return config_;
|
||||
}
|
||||
|
||||
/** Initiate idle connection timer.
|
||||
*
|
||||
* The PgPool object needs to be fully constructed to support asynchronous
|
||||
* operations.
|
||||
*/
|
||||
void
|
||||
setup();
|
||||
|
||||
/** Prepare for process shutdown. (Stoppable) */
|
||||
void
|
||||
onStop();
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class to query postgres.
|
||||
*
|
||||
* This class should be used by functions outside of this
|
||||
* compilation unit for querying postgres. It automatically acquires and
|
||||
* relinquishes a database connection to handle each query.
|
||||
*/
|
||||
class PgQuery
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<PgPool> pool_;
|
||||
std::unique_ptr<Pg> pg_;
|
||||
|
||||
public:
|
||||
PgQuery() = delete;
|
||||
|
||||
PgQuery(std::shared_ptr<PgPool> const& pool)
|
||||
: pool_(pool), pg_(pool->checkout())
|
||||
{
|
||||
}
|
||||
|
||||
~PgQuery()
|
||||
{
|
||||
pool_->checkin(pg_);
|
||||
}
|
||||
|
||||
// TODO. add sendQuery and getResult, for sending the query and getting the
|
||||
// result asynchronously. This could be useful for sending a bunch of
|
||||
// requests concurrently
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command with parameters.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(pg_params const& dbParams, boost::asio::yield_context& yield)
|
||||
{
|
||||
if (!pg_) // It means we're stopping. Return empty result.
|
||||
return PgResult();
|
||||
return pg_->query(dbParams, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with only command statement.
|
||||
*
|
||||
* @param command Command statement.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return operator()(pg_params{command, {}}, yield);
|
||||
}
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield)
|
||||
{
|
||||
pg_->bulkInsert(table, records, yield);
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Create Postgres connection pool manager.
|
||||
*
|
||||
* @param pgConfig Configuration for Postgres.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent object.
|
||||
* @return Postgres connection pool manager
|
||||
*/
|
||||
std::shared_ptr<PgPool>
|
||||
make_PgPool(boost::asio::io_context& ioc, boost::json::object const& pgConfig);
|
||||
|
||||
/** Initialize the Postgres schema.
|
||||
*
|
||||
* This function ensures that the database is running the latest version
|
||||
* of the schema.
|
||||
*
|
||||
* @param pool Postgres connection pool manager.
|
||||
*/
|
||||
void
|
||||
initSchema(std::shared_ptr<PgPool> const& pool);
|
||||
void
|
||||
initAccountTx(std::shared_ptr<PgPool> const& pool);
|
||||
|
||||
// Load the ledger info for the specified ledger/s from the database
|
||||
// @param whichLedger specifies the ledger to load via ledger sequence, ledger
|
||||
// hash or std::monostate (which loads the most recent)
|
||||
// @return vector of LedgerInfos
|
||||
std::optional<ripple::LedgerInfo>
|
||||
getLedger(
|
||||
std::variant<std::monostate, ripple::uint256, std::uint32_t> const&
|
||||
whichLedger,
|
||||
std::shared_ptr<PgPool>& pgPool);
|
||||
|
||||
#endif // RIPPLE_CORE_PG_H_INCLUDED
|
||||
@@ -1,860 +0,0 @@
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <backend/PostgresBackend.h>
|
||||
#include <thread>
|
||||
namespace Backend {
|
||||
|
||||
// Type alias for async completion handlers
|
||||
using completion_token = boost::asio::yield_context;
|
||||
using function_type = void(boost::system::error_code);
|
||||
using result_type = boost::asio::async_result<completion_token, function_type>;
|
||||
using handler_type = typename result_type::completion_handler_type;
|
||||
|
||||
struct HandlerWrapper
|
||||
{
|
||||
handler_type handler;
|
||||
|
||||
HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
PostgresBackend::PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config)
|
||||
: BackendInterface(config)
|
||||
, pgPool_(make_PgPool(ioc, config))
|
||||
, writeConnection_(pgPool_)
|
||||
{
|
||||
if (config.contains("write_interval"))
|
||||
{
|
||||
writeInterval_ = config.at("write_interval").as_int64();
|
||||
}
|
||||
}
|
||||
void
|
||||
PostgresBackend::writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
auto cmd = boost::format(
|
||||
R"(INSERT INTO ledgers
|
||||
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
|
||||
|
||||
auto ledgerInsert = boost::str(
|
||||
cmd % ledgerInfo.seq % ripple::strHex(ledgerInfo.hash) %
|
||||
ripple::strHex(ledgerInfo.parentHash) % ledgerInfo.drops.drops() %
|
||||
ledgerInfo.closeTime.time_since_epoch().count() %
|
||||
ledgerInfo.parentCloseTime.time_since_epoch().count() %
|
||||
ledgerInfo.closeTimeResolution.count() % ledgerInfo.closeFlags %
|
||||
ripple::strHex(ledgerInfo.accountHash) %
|
||||
ripple::strHex(ledgerInfo.txHash));
|
||||
|
||||
auto res = writeConnection_(ledgerInsert.data(), yield);
|
||||
abortWrite_ = !res;
|
||||
inProcessLedger = ledgerInfo.seq;
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
PgQuery pg(pgPool_);
|
||||
for (auto const& record : data)
|
||||
{
|
||||
for (auto const& a : record.accounts)
|
||||
{
|
||||
std::string acct = ripple::strHex(a);
|
||||
accountTxBuffer_ << "\\\\x" << acct << '\t'
|
||||
<< std::to_string(record.ledgerSequence) << '\t'
|
||||
<< std::to_string(record.transactionIndex) << '\t'
|
||||
<< "\\\\x" << ripple::strHex(record.txHash)
|
||||
<< '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (abortWrite_)
|
||||
return;
|
||||
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(blob) << '\n';
|
||||
numRowsInObjectsBuffer_++;
|
||||
// If the buffer gets too large, the insert fails. Not sure why. So we
|
||||
// insert after 1 million records
|
||||
if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num objects = "
|
||||
<< numRowsInObjectsBuffer_;
|
||||
writeConnection_.bulkInsert("objects", objectsBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
objectsBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (range)
|
||||
{
|
||||
if (successors_.count(key) > 0)
|
||||
return;
|
||||
successors_.insert(key);
|
||||
}
|
||||
successorBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(successor) << '\n';
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << ripple::strHex(key) << " - " << std::to_string(seq);
|
||||
numRowsInSuccessorBuffer_++;
|
||||
if (numRowsInSuccessorBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num successors = "
|
||||
<< numRowsInSuccessorBuffer_;
|
||||
writeConnection_.bulkInsert(
|
||||
"successor", successorBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
successorBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
|
||||
<< std::to_string(seq) << '\t' << std::to_string(date)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(transaction)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
|
||||
{
|
||||
if (!res)
|
||||
{
|
||||
auto msg = res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg;
|
||||
if (msg.find("statement timeout"))
|
||||
throw DatabaseTimeout();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
if (res.status() != PGRES_TUPLES_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << " : Postgres response should have been "
|
||||
"PGRES_TUPLES_OK but instead was "
|
||||
<< res.status() << " - msg = " << res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg.str();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " Postgres result msg : " << res.msg();
|
||||
if (res.isNull() || res.ntuples() == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
else if (res.ntuples() > 0)
|
||||
{
|
||||
if (res.nfields() != numFieldsExpected)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Wrong number of fields in Postgres "
|
||||
"response. Expected "
|
||||
<< numFieldsExpected << ", but got " << res.nfields();
|
||||
throw std::runtime_error(msg.str());
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
return res.ntuples();
|
||||
}
|
||||
|
||||
ripple::LedgerInfo
|
||||
parseLedgerInfo(PgResult const& res)
|
||||
{
|
||||
std::int64_t ledgerSeq = res.asBigInt(0, 0);
|
||||
ripple::uint256 hash = res.asUInt256(0, 1);
|
||||
ripple::uint256 prevHash = res.asUInt256(0, 2);
|
||||
std::int64_t totalCoins = res.asBigInt(0, 3);
|
||||
std::int64_t closeTime = res.asBigInt(0, 4);
|
||||
std::int64_t parentCloseTime = res.asBigInt(0, 5);
|
||||
std::int64_t closeTimeRes = res.asBigInt(0, 6);
|
||||
std::int64_t closeFlags = res.asBigInt(0, 7);
|
||||
ripple::uint256 accountHash = res.asUInt256(0, 8);
|
||||
ripple::uint256 txHash = res.asUInt256(0, 9);
|
||||
|
||||
using time_point = ripple::NetClock::time_point;
|
||||
using duration = ripple::NetClock::duration;
|
||||
|
||||
ripple::LedgerInfo info;
|
||||
info.seq = ledgerSeq;
|
||||
info.hash = hash;
|
||||
info.parentHash = prevHash;
|
||||
info.drops = totalCoins;
|
||||
info.closeTime = time_point{duration{closeTime}};
|
||||
info.parentCloseTime = time_point{duration{parentCloseTime}};
|
||||
info.closeFlags = closeFlags;
|
||||
info.closeTimeResolution = duration{closeTimeRes};
|
||||
info.accountHash = accountHash;
|
||||
info.txHash = txHash;
|
||||
info.validated = true;
|
||||
return info;
|
||||
}
|
||||
std::optional<std::uint32_t>
|
||||
PostgresBackend::fetchLatestLedgerSequence(
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
auto const query =
|
||||
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(query, yield); checkResult(res, 1))
|
||||
return res.asBigInt(0, 0);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_seq = "
|
||||
<< std::to_string(sequence);
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_hash = \'\\x"
|
||||
<< ripple::to_string(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
PostgresBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()", yield);
|
||||
if (!range)
|
||||
return {};
|
||||
|
||||
std::string res{range.c_str()};
|
||||
BOOST_LOG_TRIVIAL(debug) << "range is = " << res;
|
||||
try
|
||||
{
|
||||
size_t minVal = 0;
|
||||
size_t maxVal = 0;
|
||||
if (res == "empty" || res == "error" || res.empty())
|
||||
return {};
|
||||
else if (size_t delim = res.find('-'); delim != std::string::npos)
|
||||
{
|
||||
minVal = std::stol(res.substr(0, delim));
|
||||
maxVal = std::stol(res.substr(delim + 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
minVal = maxVal = std::stol(res);
|
||||
}
|
||||
return LedgerRange{minVal, maxVal};
|
||||
}
|
||||
catch (std::exception&)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " : "
|
||||
<< "Error parsing result of getCompleteLedgers()";
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
PostgresBackend::doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM objects WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto blob = res.asUnHexedBlob(0, 0);
|
||||
if (blob.size())
|
||||
return blob;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM transactions "
|
||||
"WHERE hash = "
|
||||
<< "\'\\x" << ripple::strHex(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 4))
|
||||
{
|
||||
return {
|
||||
{res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)}};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction, metadata, ledger_seq,date FROM transactions "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 4))
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
txns.push_back(
|
||||
{res.asUnHexedBlob(i, 0),
|
||||
res.asUnHexedBlob(i, 1),
|
||||
res.asBigInt(i, 2),
|
||||
res.asBigInt(i, 3)});
|
||||
}
|
||||
return txns;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
std::vector<ripple::uint256>
|
||||
PostgresBackend::fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT hash FROM transactions WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 1))
|
||||
{
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
hashes.push_back(res.asUInt256(i, 0));
|
||||
}
|
||||
return hashes;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
PostgresBackend::doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT next FROM successor WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(ledgerSequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto next = res.asUInt256(0, 0);
|
||||
if (next == lastKey)
|
||||
return {};
|
||||
return next;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!hashes.size())
|
||||
return {};
|
||||
|
||||
std::vector<TransactionAndMetadata> results;
|
||||
results.resize(hashes.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
std::atomic_uint numRemaining = hashes.size();
|
||||
std::atomic_bool errored = false;
|
||||
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
boost::asio::spawn(
|
||||
get_associated_executor(yield),
|
||||
[this, &hash, &results, hw, &numRemaining, &errored, i](
|
||||
boost::asio::yield_context yield) {
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " getting txn = " << i;
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM "
|
||||
"transactions "
|
||||
"WHERE HASH = \'\\x"
|
||||
<< ripple::strHex(hash) << "\'";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 4))
|
||||
{
|
||||
results[i] = {
|
||||
res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)};
|
||||
}
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(hashes.size())
|
||||
<< " transactions asynchronously. took "
|
||||
<< std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
PostgresBackend::doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!keys.size())
|
||||
return {};
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
std::atomic_uint numRemaining = keys.size();
|
||||
std::atomic_bool errored = false;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
auto const& key = keys[i];
|
||||
boost::asio::spawn(
|
||||
boost::asio::get_associated_executor(yield),
|
||||
[this, &key, &results, &numRemaining, &errored, hw, i, sequence](
|
||||
boost::asio::yield_context yield) {
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM "
|
||||
"objects "
|
||||
"WHERE key = \'\\x"
|
||||
<< ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 1))
|
||||
results[i] = res.asUnHexedBlob();
|
||||
}
|
||||
catch (DatabaseTimeout const& ex)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(keys.size())
|
||||
<< " objects asynchronously. ms = " << std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
PostgresBackend::fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT key,object FROM objects "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 2))
|
||||
{
|
||||
std::vector<LedgerObject> objects;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
objects.push_back({res.asUInt256(i, 0), res.asUnHexedBlob(i, 1)});
|
||||
}
|
||||
return objects;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
AccountTransactions
|
||||
PostgresBackend::fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
pg_params dbParams;
|
||||
|
||||
char const*& command = dbParams.first;
|
||||
std::vector<std::optional<std::string>>& values = dbParams.second;
|
||||
command =
|
||||
"SELECT account_tx($1::bytea, $2::bigint, $3::bool, "
|
||||
"$4::bigint, $5::bigint)";
|
||||
values.resize(5);
|
||||
values[0] = "\\x" + strHex(account);
|
||||
|
||||
values[1] = std::to_string(limit);
|
||||
|
||||
values[2] = std::to_string(forward);
|
||||
|
||||
if (cursor)
|
||||
{
|
||||
values[3] = std::to_string(cursor->ledgerSequence);
|
||||
values[4] = std::to_string(cursor->transactionIndex);
|
||||
}
|
||||
for (size_t i = 0; i < values.size(); ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << "value " << std::to_string(i) << " = "
|
||||
<< (values[i] ? values[i].value() : "null");
|
||||
}
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto res = pgQuery(dbParams, yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
auto duration = ((end - start).count()) / 1000000000.0;
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " : executed stored_procedure in "
|
||||
<< std::to_string(duration)
|
||||
<< " num records = " << std::to_string(checkResult(res, 1));
|
||||
|
||||
checkResult(res, 1);
|
||||
|
||||
char const* resultStr = res.c_str();
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||
<< "postgres result = " << resultStr
|
||||
<< " : account = " << strHex(account);
|
||||
|
||||
boost::json::value raw = boost::json::parse(resultStr);
|
||||
boost::json::object responseObj = raw.as_object();
|
||||
BOOST_LOG_TRIVIAL(debug) << " parsed = " << responseObj;
|
||||
if (responseObj.contains("transactions"))
|
||||
{
|
||||
auto txns = responseObj.at("transactions").as_array();
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (auto& hashHex : txns)
|
||||
{
|
||||
ripple::uint256 hash;
|
||||
if (hash.parseHex(hashHex.at("hash").as_string().c_str() + 2))
|
||||
hashes.push_back(hash);
|
||||
}
|
||||
if (responseObj.contains("cursor"))
|
||||
{
|
||||
return {
|
||||
fetchTransactions(hashes, yield),
|
||||
{{responseObj.at("cursor").at("ledger_sequence").as_int64(),
|
||||
responseObj.at("cursor")
|
||||
.at("transaction_index")
|
||||
.as_int64()}}};
|
||||
}
|
||||
return {fetchTransactions(hashes, yield), {}};
|
||||
}
|
||||
return {{}, {}};
|
||||
} // namespace Backend
|
||||
|
||||
void
|
||||
PostgresBackend::open(bool readOnly)
|
||||
{
|
||||
initSchema(pgPool_);
|
||||
initAccountTx(pgPool_);
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::close()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::startWrites() const
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
abortWrite_ = false;
|
||||
auto res = writeConnection_("BEGIN", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error creating transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doFinishWrites()
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (!abortWrite_)
|
||||
{
|
||||
std::string txStr = transactionsBuffer_.str();
|
||||
writeConnection_.bulkInsert("transactions", txStr, yield);
|
||||
writeConnection_.bulkInsert(
|
||||
"account_transactions", accountTxBuffer_.str(), yield);
|
||||
std::string objectsStr = objectsBuffer_.str();
|
||||
if (objectsStr.size())
|
||||
writeConnection_.bulkInsert("objects", objectsStr, yield);
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " objects size = " << objectsStr.size()
|
||||
<< " txns size = " << txStr.size();
|
||||
std::string successorStr = successorBuffer_.str();
|
||||
if (successorStr.size())
|
||||
writeConnection_.bulkInsert("successor", successorStr, yield);
|
||||
if (!range)
|
||||
{
|
||||
std::stringstream indexCreate;
|
||||
indexCreate
|
||||
<< "CREATE INDEX diff ON objects USING hash(ledger_seq) "
|
||||
"WHERE NOT "
|
||||
"ledger_seq = "
|
||||
<< std::to_string(inProcessLedger);
|
||||
writeConnection_(indexCreate.str().data(), yield);
|
||||
}
|
||||
}
|
||||
auto res = writeConnection_("COMMIT", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error committing transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
transactionsBuffer_.str("");
|
||||
transactionsBuffer_.clear();
|
||||
objectsBuffer_.str("");
|
||||
objectsBuffer_.clear();
|
||||
successorBuffer_.str("");
|
||||
successorBuffer_.clear();
|
||||
successors_.clear();
|
||||
accountTxBuffer_.str("");
|
||||
accountTxBuffer_.clear();
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
});
|
||||
|
||||
return !abortWrite_;
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return false;
|
||||
std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
|
||||
if (minLedger <= rng->minSequence)
|
||||
return false;
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery("SET statement_timeout TO 0", yield);
|
||||
std::optional<ripple::uint256> cursor;
|
||||
while (true)
|
||||
{
|
||||
auto [objects, curCursor] = retryOnTimeout([&]() {
|
||||
return fetchLedgerPage(cursor, minLedger, 256, false, yield);
|
||||
});
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
||||
std::stringstream objectsBuffer;
|
||||
|
||||
for (auto& obj : objects)
|
||||
{
|
||||
objectsBuffer << "\\\\x" << ripple::strHex(obj.key) << '\t'
|
||||
<< std::to_string(minLedger) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(obj.blob) << '\n';
|
||||
}
|
||||
pgQuery.bulkInsert("objects", objectsBuffer.str(), yield);
|
||||
cursor = curCursor;
|
||||
if (!cursor)
|
||||
break;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " finished inserting into objects";
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM ledgers WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from ledgers table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM keys WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from keys table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM books WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from books table");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
@@ -1,145 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#include <boost/json.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
|
||||
namespace Backend {
|
||||
class PostgresBackend : public BackendInterface
|
||||
{
|
||||
private:
|
||||
mutable size_t numRowsInObjectsBuffer_ = 0;
|
||||
mutable std::stringstream objectsBuffer_;
|
||||
mutable size_t numRowsInSuccessorBuffer_ = 0;
|
||||
mutable std::stringstream successorBuffer_;
|
||||
mutable std::stringstream transactionsBuffer_;
|
||||
mutable std::stringstream accountTxBuffer_;
|
||||
std::shared_ptr<PgPool> pgPool_;
|
||||
mutable PgQuery writeConnection_;
|
||||
mutable bool abortWrite_ = false;
|
||||
std::uint32_t writeInterval_ = 1000000;
|
||||
std::uint32_t inProcessLedger = 0;
|
||||
mutable std::unordered_set<std::string> successors_;
|
||||
|
||||
const char* const set_timeout = "SET statement_timeout TO 10000";
|
||||
|
||||
public:
|
||||
PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config);
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
AccountTransactions
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) override;
|
||||
|
||||
void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob) override;
|
||||
|
||||
void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) override;
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override;
|
||||
|
||||
void
|
||||
writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
open(bool readOnly) override;
|
||||
|
||||
void
|
||||
close() override;
|
||||
|
||||
void
|
||||
startWrites() const override;
|
||||
|
||||
bool
|
||||
doFinishWrites() override;
|
||||
|
||||
bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
@@ -1,174 +1,132 @@
|
||||
The data model used by clio is different than that used by rippled.
|
||||
rippled uses what is known as a SHAMap, which is a tree structure, with
|
||||
actual ledger and transaction data at the leaves of the tree. Looking up a record
|
||||
is a tree traversal, where the key is used to determine the path to the proper
|
||||
leaf node. The path from root to leaf is used as a proof-tree on the p2p network,
|
||||
where nodes can prove that a piece of data is present in a ledger by sending
|
||||
the path from root to leaf. Other nodes can verify this path and be certain
|
||||
that the data does actually exist in the ledger in question.
|
||||
# Clio Backend
|
||||
## Background
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
|
||||
clio instead flattens the data model, so lookups are O(1). This results in time
|
||||
and space savings. This is possible because clio does not participate in the peer
|
||||
to peer protocol, and thus does not need to verify any data. clio fully trusts the
|
||||
rippled nodes that are being used as a data source.
|
||||
## Data Model
|
||||
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
|
||||
|
||||
clio uses certain features of database query languages to make this happen. Many
|
||||
databases provide the necessary features to implement the clio data model. At the
|
||||
time of writing, the data model is implemented in PostgreSQL and CQL (the query
|
||||
language used by Apache Cassandra and ScyllaDB).
|
||||
Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
|
||||
|
||||
The below examples are a sort of pseudo query language
|
||||
There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type.
|
||||
|
||||
## Ledgers
|
||||
**Keywords**
|
||||
*Sequence*: A unique incrementing identification number used to label the different ledger versions.
|
||||
*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
|
||||
*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
|
||||
*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
|
||||
*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
|
||||
*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
|
||||
|
||||
We store ledger headers in a ledgers table. In PostgreSQL, we store
|
||||
the headers in their deserialized form, so we can look up by sequence or hash.
|
||||
## Cassandra Implementation
|
||||
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
|
||||
|
||||
In Cassandra, we store the headers as blobs. The primary table maps a ledger sequence
|
||||
to the blob, and a secondary table maps a ledger hash to a ledger sequence.
|
||||
In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below.
|
||||
|
||||
## Transactions
|
||||
Transactions are stored in a very basic table, with a schema like so:
|
||||
*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.*
|
||||
|
||||
|
||||
### `ledger_transactions`
|
||||
```
|
||||
CREATE TABLE transactions (
|
||||
hash blob,
|
||||
ledger_sequence int,
|
||||
transaction blob,
|
||||
PRIMARY KEY(hash))
|
||||
CREATE TABLE clio.ledger_transactions (
|
||||
ledger_sequence bigint, # The sequence number of the ledger version
|
||||
hash blob, # Hash of all the transactions on this ledger version
|
||||
PRIMARY KEY (ledger_sequence, hash)
|
||||
) WITH CLUSTERING ORDER BY (hash ASC) ...
|
||||
```
|
||||
This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order.
|
||||
|
||||
### `transactions`
|
||||
```
|
||||
The primary key is the hash.
|
||||
CREATE TABLE clio.transactions (
|
||||
hash blob PRIMARY KEY, # The transaction hash
|
||||
date bigint, # Date of the transaction
|
||||
ledger_sequence bigint, # The sequence that the transaction was validated
|
||||
metadata blob, # Metadata of the transaction
|
||||
transaction blob # Data of the transaction
|
||||
) ...
|
||||
```
|
||||
This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key.
|
||||
|
||||
A common query pattern is fetching all transactions in a ledger. In PostgreSQL,
|
||||
nothing special is needed for this. We just query:
|
||||
To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
|
||||
|
||||
### `ledger_hashes`
|
||||
```
|
||||
SELECT * FROM transactions WHERE ledger_sequence = s;
|
||||
CREATE TABLE clio.ledger_hashes (
|
||||
hash blob PRIMARY KEY, # Hash of entire ledger version's data
|
||||
sequence bigint # The sequence of the ledger version
|
||||
) ...
|
||||
```
|
||||
This table stores the hash of all ledger versions by their sequences.
|
||||
### `ledger_range`
|
||||
```
|
||||
Cassandra doesn't handle queries like this well, since `ledger_sequence` is not
|
||||
the primary key, so we use a second table that maps a ledger sequence number
|
||||
to all of the hashes in that ledger:
|
||||
CREATE TABLE clio.ledger_range (
|
||||
is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range
|
||||
sequence bigint # The sequence number of the starting/stopping range
|
||||
) ...
|
||||
```
|
||||
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
|
||||
|
||||
### `objects`
|
||||
```
|
||||
CREATE TABLE transaction_hashes (
|
||||
ledger_sequence int,
|
||||
hash blob,
|
||||
PRIMARY KEY(ledger_sequence, blob))
|
||||
CREATE TABLE clio.objects (
|
||||
key blob, # Object index of the object
|
||||
sequence bigint, # The sequence this object was last updated
|
||||
object blob, # Data of the object
|
||||
PRIMARY KEY (key, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
|
||||
|
||||
This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
|
||||
|
||||
### `ledgers`
|
||||
```
|
||||
This table uses a compound primary key, so we can have multiple records with
|
||||
the same ledger sequence but different hash. Looking up all of the transactions
|
||||
in a given ledger then requires querying the transaction_hashes table to get the hashes of
|
||||
all of the transactions in the ledger, and then using those hashes to query the
|
||||
transactions table. Sometimes we only want the hashes though.
|
||||
|
||||
## Ledger data
|
||||
|
||||
Ledger data is more complicated than transaction data. Objects have different versions,
|
||||
where applying transactions in a particular ledger changes an object with a given
|
||||
key. A basic example is an account root object: the balance changes with every
|
||||
transaction sent or received, though the key (object ID) for this object remains the same.
|
||||
|
||||
Ledger data then is modeled like so:
|
||||
CREATE TABLE clio.ledgers (
|
||||
sequence bigint PRIMARY KEY, # Sequence of the ledger version
|
||||
header blob # Data of the header
|
||||
) ...
|
||||
```
|
||||
This table stores the ledger header data of specific ledger versions by their sequence.
|
||||
|
||||
### `diff`
|
||||
```
|
||||
CREATE TABLE objects (
|
||||
id blob,
|
||||
ledger_sequence int,
|
||||
object blob,
|
||||
PRIMARY KEY(key,ledger_sequence))
|
||||
CREATE TABLE clio.diff (
|
||||
seq bigint, # Sequence of the ledger version
|
||||
key blob, # Hash of changes in the ledger version
|
||||
PRIMARY KEY (seq, key)
|
||||
) WITH CLUSTERING ORDER BY (key ASC) ...
|
||||
```
|
||||
This table stores the object index of all the changes in each ledger version.
|
||||
|
||||
### `account_tx`
|
||||
```
|
||||
CREATE TABLE clio.account_tx (
|
||||
account blob,
|
||||
seq_idx frozen<tuple<bigint, bigint>>, # Tuple of (ledger_index, transaction_index)
|
||||
hash blob, # Hash of the transaction
|
||||
PRIMARY KEY (account, seq_idx)
|
||||
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
|
||||
```
|
||||
This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received.
|
||||
|
||||
The `objects` table has a compound primary key. This is essential. Looking up
|
||||
a ledger object as of a given ledger then is just:
|
||||
|
||||
### `successor`
|
||||
```
|
||||
SELECT object FROM objects WHERE id = ? and ledger_sequence <= ?
|
||||
ORDER BY ledger_sequence DESC LIMIT 1;
|
||||
```
|
||||
This gives us the most recent ledger object written at or before a specified ledger.
|
||||
CREATE TABLE clio.successor (
|
||||
key blob, # Object index
|
||||
seq bigint, # The sequnce that this ledger object's predecessor and successor was updated
|
||||
next blob, # Index of the next object that existed in this sequence
|
||||
PRIMARY KEY (key, seq)
|
||||
) WITH CLUSTERING ORDER BY (seq ASC) ...
|
||||
```
|
||||
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
|
||||
|
||||
When a ledger object is deleted, we write a record where `object` is just an empty blob.
|
||||

|
||||
*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace*
|
||||
|
||||
### Next
|
||||
Generally RPCs that read ledger data will just use the above query pattern. However,
|
||||
a few RPCs (`book_offers` and `ledger_data`) make use of a certain tree operation
|
||||
called `successor`, which takes in an object id and ledger sequence, and returns
|
||||
the id of the successor object in the ledger. This is the object in the ledger with the smallest id
|
||||
greater than the input id.
|
||||
|
||||
This problem is quite difficult for clio's data model, since computing this
|
||||
generally requires the inner nodes of the tree, which clio doesn't store. A naive
|
||||
way to do this with PostgreSQL is like so:
|
||||
```
|
||||
SELECT * FROM objects WHERE id > ? AND ledger_sequence <= s ORDER BY id ASC, ledger_sequence DESC LIMIT 1;
|
||||
```
|
||||
This query is not really possible with Cassandra, unless you use ALLOW FILTERING, which
|
||||
is an anti pattern (for good reason!). It would require contacting basically every node
|
||||
in the entire cluster.
|
||||
|
||||
But even with Postgres, this query is not scalable. Why? Consider what the query
|
||||
is doing at the database level. The database starts at the input id, and begins scanning
|
||||
the table in ascending order of id. It needs to skip over any records that don't actually
|
||||
exist in the desired ledger, which are objects that have been deleted, or objects that
|
||||
were created later. As ledger history grows, this query skips over more and more records,
|
||||
which results in the query taking longer and longer. The time this query takes grows
|
||||
unbounded then, as ledger history just keeps growing. With under a million ledgers, this
|
||||
query is usable, but as we approach 10 million ledgers are more, the query starts to become very slow.
|
||||
|
||||
To alleviate this issue, the data model uses a checkpointing method. We create a second
|
||||
table called keys, like so:
|
||||
```
|
||||
CREATE TABLE keys (
|
||||
ledger_sequence int,
|
||||
id blob,
|
||||
PRIMARY KEY(ledger_sequence, id)
|
||||
)
|
||||
```
|
||||
However, this table does not have an entry for every ledger sequence. Instead,
|
||||
this table has an entry for rougly every 1 million ledgers. We call these ledgers
|
||||
flag ledgers. For each flag ledger, the keys table contains every object id in that
|
||||
ledger, as well as every object id that existed in any ledger between the last flag
|
||||
ledger and this one. This is a lot of keys, but not every key that ever existed (which
|
||||
is what the naive attempt at implementing successor was iterating over). In this manner,
|
||||
the performance is bounded. If we wanted to increase the performance of the successor operation,
|
||||
we can increase the frequency of flag ledgers. However, this will use more space. 1 million
|
||||
was chosen as a reasonable tradeoff to bound the performance, but not use too much space,
|
||||
especially since this is only needed for two RPC calls.
|
||||
|
||||
We write to this table every ledger, for each new key. However, we also need to handle
|
||||
keys that existed in the previous flag ledger. To do that, at each flag ledger, we
|
||||
iterate through the previous flag ledger, and write any keys that are still present
|
||||
in the new flag ledger. This is done asynchronously.
|
||||
|
||||
## Account Transactions
|
||||
rippled offers a RPC called `account_tx`. This RPC returns all transactions that
|
||||
affect a given account, and allows users to page backwards or forwards in time.
|
||||
Generally, this is a modeled with a table like so:
|
||||
```
|
||||
CREATE TABLE account_tx (
|
||||
account blob,
|
||||
ledger_sequence int,
|
||||
transaction_index int,
|
||||
hash blob,
|
||||
PRIMARY KEY(account,ledger_sequence,transaction_index))
|
||||
```
|
||||
|
||||
An example of looking up from this table going backwards in time is:
|
||||
```
|
||||
SELECT hash FROM account_tx WHERE account = ?
|
||||
AND ledger_sequence <= ? and transaction_index <= ?
|
||||
ORDER BY ledger_sequence DESC, transaction_index DESC;
|
||||
```
|
||||
|
||||
This query returns the hashes, and then we use those hashes to read from the
|
||||
transactions table.
|
||||
|
||||
## Comments
|
||||
There are various nuances around how these data models are tuned and optimized
|
||||
for each database implementation. Cassandra and PostgreSQL are very different,
|
||||
so some slight modifications are needed. However, the general model outlined here
|
||||
is implemented by both databases, and when adding a new database, this general model
|
||||
should be followed, unless there is a good reason not to. Generally, a database will be
|
||||
decently similar to either PostgreSQL or Cassandra, so using those as a basis should
|
||||
be sufficient.
|
||||
|
||||
Whatever database is used, clio requires strong consistency, and durability. For this
|
||||
reason, any replication strategy needs to maintain strong consistency.
|
||||
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps:
|
||||
1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
|
||||
2. If `v` is...
|
||||
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
|
||||
2. Being **modified**, do nothing.
|
||||
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
|
||||
|
||||
@@ -1,5 +1,25 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/SimpleCache.h>
|
||||
namespace Backend {
|
||||
|
||||
uint32_t
|
||||
SimpleCache::latestLedgerSequence() const
|
||||
{
|
||||
@@ -13,6 +33,9 @@ SimpleCache::update(
|
||||
uint32_t seq,
|
||||
bool isBackground)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
{
|
||||
std::unique_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
@@ -26,6 +49,7 @@ SimpleCache::update(
|
||||
{
|
||||
if (isBackground && deletes_.count(obj.key))
|
||||
continue;
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
{
|
||||
@@ -41,19 +65,23 @@ SimpleCache::update(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock{mtx_};
|
||||
successorReqCounter_++;
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.upper_bound(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
successorHitCounter_++;
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
@@ -74,17 +102,28 @@ SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
objectReqCounter_++;
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
if (seq < e->second.seq)
|
||||
return {};
|
||||
objectHitCounter_++;
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::setDisabled()
|
||||
{
|
||||
disabled_ = true;
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::setFull()
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
full_ = true;
|
||||
std::unique_lock lck{mtx_};
|
||||
deletes_.clear();
|
||||
@@ -101,4 +140,18 @@ SimpleCache::size() const
|
||||
std::shared_lock lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
float
|
||||
SimpleCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
return 1;
|
||||
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||
}
|
||||
float
|
||||
SimpleCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
return 1;
|
||||
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||
}
|
||||
} // namespace Backend
|
||||
|
||||
@@ -1,5 +1,23 @@
|
||||
#ifndef CLIO_SIMPLECACHE_H_INCLUDED
|
||||
#define CLIO_SIMPLECACHE_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
@@ -17,10 +35,19 @@ class SimpleCache
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_;
|
||||
mutable std::atomic_uint32_t objectHitCounter_;
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_;
|
||||
mutable std::atomic_uint32_t successorHitCounter_;
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
// temporary set to prevent background thread from writing already deleted
|
||||
// data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
@@ -45,6 +72,9 @@ public:
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
void
|
||||
setFull();
|
||||
|
||||
@@ -57,7 +87,12 @@ public:
|
||||
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
|
||||
@@ -1,6 +1,26 @@
|
||||
#ifndef CLIO_TYPES_H_INCLUDED
|
||||
#define CLIO_TYPES_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@@ -46,16 +66,34 @@ struct TransactionAndMetadata
|
||||
}
|
||||
};
|
||||
|
||||
struct AccountTransactionsCursor
|
||||
struct TransactionsCursor
|
||||
{
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
};
|
||||
|
||||
struct AccountTransactions
|
||||
struct TransactionsAndCursor
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<AccountTransactionsCursor> cursor;
|
||||
std::optional<TransactionsCursor> cursor;
|
||||
};
|
||||
|
||||
struct NFT
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
ripple::AccountID owner;
|
||||
bool isBurned;
|
||||
|
||||
// clearly two tokens are the same if they have the same ID, but this
|
||||
// struct stores the state of a given token at a given ledger sequence, so
|
||||
// we also need to compare with ledgerSequence
|
||||
bool
|
||||
operator==(NFT const& other) const
|
||||
{
|
||||
return tokenID == other.tokenID &&
|
||||
ledgerSequence == other.ledgerSequence;
|
||||
}
|
||||
};
|
||||
|
||||
struct LedgerRange
|
||||
@@ -70,4 +108,3 @@ constexpr ripple::uint256 lastKey{
|
||||
constexpr ripple::uint256 hi192{
|
||||
"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
|
||||
190
src/config/Config.cpp
Normal file
190
src/config/Config.cpp
Normal file
@@ -0,0 +1,190 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <fstream>
|
||||
|
||||
namespace clio {
|
||||
|
||||
// Note: `store_(store)` MUST use `()` instead of `{}` otherwise gcc
|
||||
// picks `initializer_list` constructor and anything passed becomes an
|
||||
// array :-D
|
||||
Config::Config(boost::json::value store) : store_(std::move(store))
|
||||
{
|
||||
}
|
||||
|
||||
Config::operator bool() const noexcept
|
||||
{
|
||||
return not store_.is_null();
|
||||
}
|
||||
|
||||
bool
|
||||
Config::contains(key_type key) const
|
||||
{
|
||||
return lookup(key).has_value();
|
||||
}
|
||||
|
||||
std::optional<boost::json::value>
|
||||
Config::lookup(key_type key) const
|
||||
{
|
||||
if (store_.is_null())
|
||||
return std::nullopt;
|
||||
|
||||
std::reference_wrapper<boost::json::value const> cur = std::cref(store_);
|
||||
auto hasBrokenPath = false;
|
||||
auto tokenized = detail::Tokenizer<key_type, Separator>{key};
|
||||
std::string subkey{};
|
||||
|
||||
auto maybeSection = tokenized.next();
|
||||
while (maybeSection.has_value())
|
||||
{
|
||||
auto section = maybeSection.value();
|
||||
subkey += section;
|
||||
|
||||
if (not hasBrokenPath)
|
||||
{
|
||||
if (not cur.get().is_object())
|
||||
throw detail::StoreException(
|
||||
"Not an object at '" + subkey + "'");
|
||||
if (not cur.get().as_object().contains(section))
|
||||
hasBrokenPath = true;
|
||||
else
|
||||
cur = std::cref(cur.get().as_object().at(section));
|
||||
}
|
||||
|
||||
subkey += Separator;
|
||||
maybeSection = tokenized.next();
|
||||
}
|
||||
|
||||
if (hasBrokenPath)
|
||||
return std::nullopt;
|
||||
return std::make_optional(cur);
|
||||
}
|
||||
|
||||
std::optional<Config::array_type>
|
||||
Config::maybeArray(key_type key) const
|
||||
{
|
||||
try
|
||||
{
|
||||
auto maybe_arr = lookup(key);
|
||||
if (maybe_arr && maybe_arr->is_array())
|
||||
{
|
||||
auto& arr = maybe_arr->as_array();
|
||||
array_type out;
|
||||
out.reserve(arr.size());
|
||||
|
||||
std::transform(
|
||||
std::begin(arr),
|
||||
std::end(arr),
|
||||
std::back_inserter(out),
|
||||
[](auto&& element) { return Config{std::move(element)}; });
|
||||
return std::make_optional<array_type>(std::move(out));
|
||||
}
|
||||
}
|
||||
catch (detail::StoreException const&)
|
||||
{
|
||||
// ignore store error, but rethrow key errors
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::array(key_type key) const
|
||||
{
|
||||
if (auto maybe_arr = maybeArray(key); maybe_arr)
|
||||
return maybe_arr.value();
|
||||
throw std::logic_error("No array found at '" + key + "'");
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::arrayOr(key_type key, array_type fallback) const
|
||||
{
|
||||
if (auto maybe_arr = maybeArray(key); maybe_arr)
|
||||
return maybe_arr.value();
|
||||
return fallback;
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::arrayOrThrow(key_type key, std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeArray(key).value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
Config
|
||||
Config::section(key_type key) const
|
||||
{
|
||||
auto maybe_element = lookup(key);
|
||||
if (maybe_element && maybe_element->is_object())
|
||||
return Config{std::move(*maybe_element)};
|
||||
throw std::logic_error("No section found at '" + key + "'");
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::array() const
|
||||
{
|
||||
if (not store_.is_array())
|
||||
throw std::logic_error("_self_ is not an array");
|
||||
|
||||
array_type out;
|
||||
auto const& arr = store_.as_array();
|
||||
out.reserve(arr.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(arr),
|
||||
std::cend(arr),
|
||||
std::back_inserter(out),
|
||||
[](auto const& element) { return Config{element}; });
|
||||
return out;
|
||||
}
|
||||
|
||||
Config
|
||||
ConfigReader::open(std::filesystem::path path)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::ifstream in(path, std::ios::in | std::ios::binary);
|
||||
if (in)
|
||||
{
|
||||
std::stringstream contents;
|
||||
contents << in.rdbuf();
|
||||
auto opts = boost::json::parse_options{};
|
||||
opts.allow_comments = true;
|
||||
return Config{boost::json::parse(contents.str(), {}, opts)};
|
||||
}
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
LogService::error() << "Could not read configuration file from '"
|
||||
<< path.string() << "': " << e.what();
|
||||
}
|
||||
|
||||
return Config{};
|
||||
}
|
||||
|
||||
} // namespace clio
|
||||
405
src/config/Config.h
Normal file
405
src/config/Config.h
Normal file
@@ -0,0 +1,405 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <config/detail/Helpers.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace clio {
|
||||
|
||||
/**
|
||||
* @brief Convenience wrapper to query a JSON configuration file.
|
||||
*
|
||||
* Any custom data type can be supported by implementing the right `tag_invoke`
|
||||
* for `boost::json::value_to`.
|
||||
*/
|
||||
class Config final
|
||||
{
|
||||
boost::json::value store_;
|
||||
static constexpr char Separator = '.';
|
||||
|
||||
public:
|
||||
using key_type = std::string; /*! The type of key used */
|
||||
using array_type = std::vector<Config>; /*! The type of array used */
|
||||
using write_cursor_type = std::pair<
|
||||
std::optional<std::reference_wrapper<boost::json::value>>,
|
||||
key_type>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Config object.
|
||||
* @param store boost::json::value that backs this instance
|
||||
*/
|
||||
explicit Config(boost::json::value store = {});
|
||||
|
||||
//
|
||||
// Querying the store
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Checks whether underlying store is not null.
|
||||
*
|
||||
* @return true If the store is null
|
||||
* @return false If the store is not null
|
||||
*/
|
||||
operator bool() const noexcept;
|
||||
|
||||
/**
|
||||
* @brief Checks whether something exists under given key.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return true If something exists under key
|
||||
* @return false If nothing exists under key
|
||||
* @throws std::logic_error If the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
contains(key_type key) const;
|
||||
|
||||
//
|
||||
// Key value access
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key that returns std::optional.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned wrapped in an optional. If the value exists but the conversion
|
||||
* to Result is not possible - a runtime_error will be thrown. If the value
|
||||
* does not exist under the specified key - std::nullopt is returned.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @return std::optional<Result> Optional value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not possible
|
||||
* or key is of invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] std::optional<Result>
|
||||
maybeValue(key_type key) const
|
||||
{
|
||||
auto maybe_element = lookup(key);
|
||||
if (maybe_element)
|
||||
return std::make_optional<Result>(
|
||||
checkedAs<Result>(key, *maybe_element));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible OR the value does not exist - a logic_error
|
||||
* will be thrown.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @return Result Value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not
|
||||
* possible, value does not exist under specified key path or the key is of
|
||||
* invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
value(key_type key) const
|
||||
{
|
||||
return maybeValue<Result>(key).value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key with fallback.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible - a logic_error will be thrown. If the value
|
||||
* does not exist under the specified key - user specified fallback is
|
||||
* returned.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @param fallback The fallback value
|
||||
* @return Result Value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not possible
|
||||
* or the key is of invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOr(key_type key, Result fallback) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>(key).value_or(fallback);
|
||||
}
|
||||
catch (detail::StoreException const&)
|
||||
{
|
||||
return fallback;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key with custom error handling.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible OR the value does not exist - a runtime_error
|
||||
* will be thrown with the user specified message.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @param err The custom error message
|
||||
* @return Result Value of desired type
|
||||
* @throws std::runtime_error Thrown if conversion to Result is not possible
|
||||
* or value does not exist under key
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOrThrow(key_type key, std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>(key).value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key that returns std::optional.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be
|
||||
* returned wrapped in an optional. If the array does not exist under the
|
||||
* specified key - std::nullopt is returned.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return std::optional<array_type> Optional array
|
||||
* @throws std::logic_error Thrown if the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] std::optional<array_type>
|
||||
maybeArray(key_type key) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be
|
||||
* returned. If the array does not exist under the
|
||||
* specified key an std::logic_error is thrown.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return array_type The array
|
||||
* @throws std::logic_error Thrown if there is no array under the desired
|
||||
* key or the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
array(key_type key) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key with fallback.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be returned.
|
||||
* If the array does not exist or another type is stored under the desired
|
||||
* key - user specified fallback is returned.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @param fallback The fallback array
|
||||
* @return array_type The array
|
||||
* @throws std::logic_error Thrown if the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
arrayOr(key_type key, array_type fallback) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key with custom error handling.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be returned.
|
||||
* If the array does not exist or another type is stored under the desired
|
||||
* key - std::runtime_error is thrown with the user specified error message.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @param err The custom error message
|
||||
* @return array_type The array
|
||||
* @throws std::runtime_error Thrown if there is no array under the desired
|
||||
* key
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
arrayOrThrow(key_type key, std::string_view err) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching a sub section by key.
|
||||
*
|
||||
* Will attempt to fetch an entire section under the desired key and return
|
||||
* it as a Config instance. If the section does not exist or another type is
|
||||
* stored under the desired key - std::logic_error is thrown.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return Config Section represented as a separate instance of Config
|
||||
* @throws std::logic_error Thrown if there is no section under the
|
||||
* desired key or the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] Config
|
||||
section(key_type key) const;
|
||||
|
||||
//
|
||||
// Direct self-value access
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance. Wraps as std::optional.
|
||||
*
|
||||
* See @ref maybeValue(key_type) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] std::optional<Result>
|
||||
maybeValue() const
|
||||
{
|
||||
if (store_.is_null())
|
||||
return std::nullopt;
|
||||
return std::make_optional<Result>(checkedAs<Result>("_self_", store_));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance.
|
||||
*
|
||||
* See @ref value(key_type) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
value() const
|
||||
{
|
||||
return maybeValue<Result>().value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance with user-specified fallback.
|
||||
*
|
||||
* See @ref valueOr(key_type, Result) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOr(Result fallback) const
|
||||
{
|
||||
return maybeValue<Result>().valueOr(fallback);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance with user-specified error message.
|
||||
*
|
||||
* See @ref valueOrThrow(key_type, std::string_view) const for how this
|
||||
* works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOrThrow(std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>().value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the array directly referred to by the
|
||||
* instance.
|
||||
*
|
||||
* See @ref array(key_type) const for how this works.
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
array() const;
|
||||
|
||||
private:
|
||||
template <typename Return>
|
||||
[[nodiscard]] Return
|
||||
checkedAs(key_type key, boost::json::value const& value) const
|
||||
{
|
||||
auto has_error = false;
|
||||
if constexpr (std::is_same_v<Return, bool>)
|
||||
{
|
||||
if (not value.is_bool())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (std::is_same_v<Return, std::string>)
|
||||
{
|
||||
if (not value.is_string())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (std::is_same_v<Return, double>)
|
||||
{
|
||||
if (not value.is_number())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (
|
||||
std::is_convertible_v<Return, uint64_t> ||
|
||||
std::is_convertible_v<Return, int64_t>)
|
||||
{
|
||||
if (not value.is_int64() && not value.is_uint64())
|
||||
has_error = true;
|
||||
}
|
||||
|
||||
if (has_error)
|
||||
throw std::runtime_error(
|
||||
"Type for key '" + key + "' is '" +
|
||||
std::string{to_string(value.kind())} +
|
||||
"' in JSON but requested '" + detail::typeName<Return>() + "'");
|
||||
|
||||
return boost::json::value_to<Return>(value);
|
||||
}
|
||||
|
||||
std::optional<boost::json::value>
|
||||
lookup(key_type key) const;
|
||||
|
||||
write_cursor_type
|
||||
lookupForWrite(key_type key);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Simple configuration file reader.
|
||||
*
|
||||
* Reads the JSON file under specified path and creates a @ref Config object
|
||||
* from its contents.
|
||||
*/
|
||||
class ConfigReader final
|
||||
{
|
||||
public:
|
||||
static Config
|
||||
open(std::filesystem::path path);
|
||||
};
|
||||
|
||||
} // namespace clio
|
||||
164
src/config/detail/Helpers.h
Normal file
164
src/config/detail/Helpers.h
Normal file
@@ -0,0 +1,164 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Thrown when a KeyPath related error occurs
|
||||
*/
|
||||
struct KeyException : public ::std::logic_error
|
||||
{
|
||||
KeyException(::std::string msg) : ::std::logic_error{msg}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Thrown when a Store (config's storage) related error occurs.
|
||||
*/
|
||||
struct StoreException : public ::std::logic_error
|
||||
{
|
||||
StoreException(::std::string msg) : ::std::logic_error{msg}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Simple string tokenizer. Used by @ref Config.
|
||||
*
|
||||
* @tparam KeyType The type of key to use
|
||||
* @tparam Separator The separator character
|
||||
*/
|
||||
template <typename KeyType, char Separator>
|
||||
class Tokenizer final
|
||||
{
|
||||
using opt_key_t = std::optional<KeyType>;
|
||||
KeyType key_;
|
||||
KeyType token_{};
|
||||
std::queue<KeyType> tokens_{};
|
||||
|
||||
public:
|
||||
explicit Tokenizer(KeyType key) : key_{key}
|
||||
{
|
||||
if (key.empty())
|
||||
throw KeyException("Empty key");
|
||||
|
||||
for (auto const& c : key)
|
||||
{
|
||||
if (c == Separator)
|
||||
saveToken();
|
||||
else
|
||||
token_ += c;
|
||||
}
|
||||
|
||||
saveToken();
|
||||
}
|
||||
|
||||
[[nodiscard]] opt_key_t
|
||||
next()
|
||||
{
|
||||
if (tokens_.empty())
|
||||
return std::nullopt;
|
||||
auto token = tokens_.front();
|
||||
tokens_.pop();
|
||||
return std::make_optional(std::move(token));
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
saveToken()
|
||||
{
|
||||
if (token_.empty())
|
||||
throw KeyException("Empty token in key '" + key_ + "'.");
|
||||
tokens_.push(std::move(token_));
|
||||
token_ = {};
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static constexpr const char*
|
||||
typeName()
|
||||
{
|
||||
return typeid(T).name();
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<uint64_t>()
|
||||
{
|
||||
return "uint64_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<int64_t>()
|
||||
{
|
||||
return "int64_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<uint32_t>()
|
||||
{
|
||||
return "uint32_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<int32_t>()
|
||||
{
|
||||
return "int32_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<bool>()
|
||||
{
|
||||
return "bool";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<std::string>()
|
||||
{
|
||||
return "std::string";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<const char*>()
|
||||
{
|
||||
return "const char*";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<double>()
|
||||
{
|
||||
return "double";
|
||||
}
|
||||
|
||||
}; // namespace clio::detail
|
||||
@@ -1,5 +1,24 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
@@ -23,8 +42,6 @@ class NetworkValidatedLedgers
|
||||
|
||||
std::condition_variable cv_;
|
||||
|
||||
bool stopping_ = false;
|
||||
|
||||
public:
|
||||
static std::shared_ptr<NetworkValidatedLedgers>
|
||||
make_ValidatedLedgers()
|
||||
@@ -173,5 +190,3 @@ getMarkers(size_t numMarkers)
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
|
||||
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/asio/strand.hpp>
|
||||
@@ -5,62 +24,99 @@
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/src.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <etl/ETLSource.h>
|
||||
#include <etl/ProbingETLSource.h>
|
||||
#include <etl/ReportingETL.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <thread>
|
||||
|
||||
// Create ETL source without grpc endpoint
|
||||
// Fetch ledger and load initial ledger will fail for this source
|
||||
// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||
template <class Derived>
|
||||
ETLSourceImpl<Derived>::ETLSourceImpl(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
ETLLoadBalancer& balancer)
|
||||
: resolver_(boost::asio::make_strand(ioContext))
|
||||
, networkValidatedLedgers_(networkValidatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
, balancer_(balancer)
|
||||
, ioc_(ioContext)
|
||||
, timer_(ioContext)
|
||||
using namespace clio;
|
||||
|
||||
void
|
||||
ForwardCache::freshen()
|
||||
{
|
||||
if (config.contains("ip"))
|
||||
log_.trace() << "Freshening ForwardCache";
|
||||
|
||||
auto numOutstanding =
|
||||
std::make_shared<std::atomic_uint>(latestForwarded_.size());
|
||||
|
||||
for (auto const& cacheEntry : latestForwarded_)
|
||||
{
|
||||
auto ipJs = config.at("ip").as_string();
|
||||
ip_ = {ipJs.c_str(), ipJs.size()};
|
||||
boost::asio::spawn(
|
||||
strand_,
|
||||
[this, numOutstanding, command = cacheEntry.first](
|
||||
boost::asio::yield_context yield) {
|
||||
boost::json::object request = {{"command", command}};
|
||||
auto resp = source_.requestFromRippled(request, {}, yield);
|
||||
|
||||
if (!resp || resp->contains("error"))
|
||||
resp = {};
|
||||
|
||||
{
|
||||
std::unique_lock lk(mtx_);
|
||||
latestForwarded_[command] = resp;
|
||||
}
|
||||
});
|
||||
}
|
||||
if (config.contains("ws_port"))
|
||||
}
|
||||
|
||||
void
|
||||
ForwardCache::clear()
|
||||
{
|
||||
std::unique_lock lk(mtx_);
|
||||
for (auto& cacheEntry : latestForwarded_)
|
||||
latestForwarded_[cacheEntry.first] = {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ForwardCache::get(boost::json::object const& request) const
|
||||
{
|
||||
std::optional<std::string> command = {};
|
||||
if (request.contains("command") && !request.contains("method") &&
|
||||
request.at("command").is_string())
|
||||
command = request.at("command").as_string().c_str();
|
||||
else if (
|
||||
request.contains("method") && !request.contains("command") &&
|
||||
request.at("method").is_string())
|
||||
command = request.at("method").as_string().c_str();
|
||||
|
||||
if (!command)
|
||||
return {};
|
||||
if (RPC::specifiesCurrentOrClosedLedger(request))
|
||||
return {};
|
||||
|
||||
std::shared_lock lk(mtx_);
|
||||
if (!latestForwarded_.contains(*command))
|
||||
return {};
|
||||
|
||||
return {latestForwarded_.at(*command)};
|
||||
}
|
||||
|
||||
static boost::beast::websocket::stream_base::timeout
|
||||
make_TimeoutOption()
|
||||
{
|
||||
// See #289 for details.
|
||||
// TODO: investigate the issue and find if there is a solution other than
|
||||
// introducing artificial timeouts.
|
||||
if (true)
|
||||
{
|
||||
auto portjs = config.at("ws_port").as_string();
|
||||
wsPort_ = {portjs.c_str(), portjs.size()};
|
||||
// The only difference between this and the suggested client role is
|
||||
// that idle_timeout is set to 20 instead of none()
|
||||
auto opt = boost::beast::websocket::stream_base::timeout{};
|
||||
opt.handshake_timeout = std::chrono::seconds(30);
|
||||
opt.idle_timeout = std::chrono::seconds(20);
|
||||
opt.keep_alive_pings = false;
|
||||
return opt;
|
||||
}
|
||||
if (config.contains("grpc_port"))
|
||||
else
|
||||
{
|
||||
auto portjs = config.at("grpc_port").as_string();
|
||||
grpcPort_ = {portjs.c_str(), portjs.size()};
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{
|
||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateChannel(
|
||||
ss.str(), grpc::InsecureChannelCredentials()));
|
||||
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< "Exception while creating stub = " << e.what()
|
||||
<< " . Remote = " << toString();
|
||||
}
|
||||
return boost::beast::websocket::stream_base::timeout::suggested(
|
||||
boost::beast::role_type::client);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,6 +124,12 @@ template <class Derived>
|
||||
void
|
||||
ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
if (connected_)
|
||||
hooks_.onDisconnected(ec);
|
||||
|
||||
connected_ = false;
|
||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
@@ -89,15 +151,11 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
if (ec != boost::asio::error::operation_aborted &&
|
||||
ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
log_.error() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
log_.warn() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
@@ -106,7 +164,7 @@ ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " async_wait : ec = " << ec;
|
||||
log_.trace() << "async_wait : ec = " << ec;
|
||||
derived().close(startAgain);
|
||||
});
|
||||
}
|
||||
@@ -130,17 +188,27 @@ PlainETLSource::close(bool startAgain)
|
||||
[this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " async_close : "
|
||||
log_.error()
|
||||
<< " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<
|
||||
boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
@@ -165,8 +233,8 @@ SslETLSource::close(bool startAgain)
|
||||
[this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " async_close : "
|
||||
log_.error()
|
||||
<< " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
@@ -198,8 +266,7 @@ ETLSourceImpl<Derived>::onResolve(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// try again
|
||||
@@ -221,8 +288,7 @@ PlainETLSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -235,21 +301,17 @@ PlainETLSource::onConnect(
|
||||
// own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set suggested timeout settings for the websocket
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::timeout::suggested(
|
||||
boost::beast::role_type::client));
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator(
|
||||
[](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
||||
" clio-client");
|
||||
boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "coro-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
@@ -267,8 +329,7 @@ SslETLSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -281,21 +342,17 @@ SslETLSource::onConnect(
|
||||
// own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set suggested timeout settings for the websocket
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::timeout::suggested(
|
||||
boost::beast::role_type::client));
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator(
|
||||
[](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
||||
" clio-client");
|
||||
boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "coro-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
@@ -331,8 +388,11 @@ template <class Derived>
|
||||
void
|
||||
ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (auto action = hooks_.onConnected(ec);
|
||||
action == ETLSourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -345,7 +405,7 @@ ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
||||
{"streams",
|
||||
{"ledger", "manifests", "validations", "transactions_proposed"}}};
|
||||
std::string s = boost::json::serialize(jv);
|
||||
BOOST_LOG_TRIVIAL(trace) << "Sending subscribe stream message";
|
||||
log_.trace() << "Sending subscribe stream message";
|
||||
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator(
|
||||
@@ -371,8 +431,7 @@ ETLSourceImpl<Derived>::onWrite(
|
||||
boost::beast::error_code ec,
|
||||
size_t bytesWritten)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -389,8 +448,7 @@ template <class Derived>
|
||||
void
|
||||
ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
// if error or error reading message, start over
|
||||
if (ec)
|
||||
{
|
||||
@@ -402,8 +460,7 @@ ETLSourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
|
||||
boost::beast::flat_buffer buffer;
|
||||
swap(readBuffer_, buffer);
|
||||
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : calling async_read - " << toString();
|
||||
log_.trace() << "calling async_read - " << toString();
|
||||
derived().ws().async_read(
|
||||
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
@@ -413,7 +470,7 @@ template <class Derived>
|
||||
bool
|
||||
ETLSourceImpl<Derived>::handleMessage()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
|
||||
log_.trace() << toString();
|
||||
|
||||
setLastMsgTime();
|
||||
connected_ = true;
|
||||
@@ -422,9 +479,9 @@ ETLSourceImpl<Derived>::handleMessage()
|
||||
std::string msg{
|
||||
static_cast<char const*>(readBuffer_.data().data()),
|
||||
readBuffer_.size()};
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << msg;
|
||||
log_.trace() << msg;
|
||||
boost::json::value raw = boost::json::parse(msg);
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " parsed";
|
||||
log_.trace() << "parsed";
|
||||
boost::json::object response = raw.as_object();
|
||||
|
||||
uint32_t ledgerIndex = 0;
|
||||
@@ -443,20 +500,16 @@ ETLSourceImpl<Derived>::handleMessage()
|
||||
setValidatedRange(
|
||||
{validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " : "
|
||||
<< "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - "
|
||||
<< toString();
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response
|
||||
<< " - " << toString();
|
||||
}
|
||||
else if (
|
||||
response.contains("type") && response["type"] == "ledgerClosed")
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " : "
|
||||
<< "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - "
|
||||
<< toString();
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response
|
||||
<< " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = response["ledger_index"].as_int64();
|
||||
@@ -475,6 +528,7 @@ ETLSourceImpl<Derived>::handleMessage()
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (
|
||||
@@ -494,23 +548,23 @@ ETLSourceImpl<Derived>::handleMessage()
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " : "
|
||||
<< "Pushing ledger sequence = " << ledgerIndex << " - "
|
||||
<< toString();
|
||||
log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - "
|
||||
<< toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "Exception in handleMessage : " << e.what();
|
||||
log_.error() << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
class AsyncCallData
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
@@ -540,11 +594,11 @@ public:
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = "
|
||||
<< ripple::strHex(std::string(1, nextPrefix_));
|
||||
log_.debug() << "Setting up AsyncCallData. marker = "
|
||||
<< ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = "
|
||||
<< ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
@@ -564,26 +618,24 @@ public:
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
log_.trace() << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "AsyncCallData aborted";
|
||||
log_.error() << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code()
|
||||
<< " message = " << status_.error_message();
|
||||
log_.error() << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code()
|
||||
<< " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
@@ -606,7 +658,7 @@ public:
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(trace) << "Writing objects";
|
||||
log_.trace() << "Writing objects";
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(cur_->ledger_objects().objects_size());
|
||||
for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i)
|
||||
@@ -636,7 +688,7 @@ public:
|
||||
}
|
||||
backend.cache().update(
|
||||
cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
BOOST_LOG_TRIVIAL(trace) << "Wrote objects";
|
||||
log_.trace() << "Wrote objects";
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
@@ -700,8 +752,8 @@ ETLSourceImpl<Derived>::loadInitialLedger(
|
||||
calls.emplace_back(sequence, markers[i], nextMarker);
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Starting data download for ledger " << sequence
|
||||
<< ". Using source = " << toString();
|
||||
log_.debug() << "Starting data download for ledger " << sequence
|
||||
<< ". Using source = " << toString();
|
||||
|
||||
for (auto& c : calls)
|
||||
c.call(stub_, cq);
|
||||
@@ -719,21 +771,19 @@ ETLSourceImpl<Derived>::loadInitialLedger(
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "loadInitialLedger - ok is false";
|
||||
log_.error() << "loadInitialLedger - ok is false";
|
||||
return false;
|
||||
// handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
numFinished++;
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
log_.debug() << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
@@ -744,89 +794,84 @@ ETLSourceImpl<Derived>::loadInitialLedger(
|
||||
}
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Downloaded " << backend_->cache().size()
|
||||
<< " records from rippled";
|
||||
log_.info() << "Downloaded " << backend_->cache().size()
|
||||
<< " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " - finished loadInitialLedger. cache size = "
|
||||
<< backend_->cache().size();
|
||||
log_.info() << "Finished loadInitialLedger. cache size = "
|
||||
<< backend_->cache().size();
|
||||
size_t numWrites = 0;
|
||||
if (!abort)
|
||||
{
|
||||
backend_->cache().setFull();
|
||||
if (!cacheOnly)
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
for (auto& key : edgeKeys)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__
|
||||
<< " writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(
|
||||
*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(
|
||||
std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
ripple::uint256 prev = Backend::firstKey;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
assert(cur);
|
||||
if (prev == Backend::firstKey)
|
||||
auto seconds = util::timed<std::chrono::seconds>([&]() {
|
||||
for (auto& key : edgeKeys)
|
||||
{
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(prev),
|
||||
sequence,
|
||||
uint256ToString(cur->key));
|
||||
log_.debug()
|
||||
<< "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(
|
||||
*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(
|
||||
std::move(key),
|
||||
sequence,
|
||||
uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
ripple::uint256 prev = Backend::firstKey;
|
||||
while (auto cur =
|
||||
backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
assert(cur);
|
||||
if (prev == Backend::firstKey)
|
||||
{
|
||||
auto succ =
|
||||
backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " Writing book successor = "
|
||||
<< ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base),
|
||||
sequence,
|
||||
uint256ToString(cur->key));
|
||||
}
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(prev),
|
||||
sequence,
|
||||
uint256ToString(cur->key));
|
||||
}
|
||||
++numWrites;
|
||||
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
{
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
{
|
||||
auto succ =
|
||||
backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
log_.debug() << "Writing book successor = "
|
||||
<< ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base),
|
||||
sequence,
|
||||
uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
++numWrites;
|
||||
}
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
log_.info()
|
||||
<< "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Wrote "
|
||||
<< numWrites << " book successors";
|
||||
}
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(prev),
|
||||
sequence,
|
||||
uint256ToString(Backend::lastKey));
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(prev),
|
||||
sequence,
|
||||
uint256ToString(Backend::lastKey));
|
||||
|
||||
++numWrites;
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto seconds =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(end - start)
|
||||
.count();
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__
|
||||
<< " - Looping through cache and submitting all writes took "
|
||||
++numWrites;
|
||||
});
|
||||
log_.info()
|
||||
<< "Looping through cache and submitting all writes took "
|
||||
<< seconds
|
||||
<< " seconds. numWrites = " << std::to_string(numWrites);
|
||||
}
|
||||
@@ -857,49 +902,30 @@ ETLSourceImpl<Derived>::fetchLedger(
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "ETLSourceImpl::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString() << " status = " << status.error_message();
|
||||
log_.warn() << "ETLSourceImpl::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString() << " status = " << status.error_message();
|
||||
}
|
||||
// BOOST_LOG_TRIVIAL(debug)
|
||||
// << __func__ << " Message size = " << response.ByteSizeLong();
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
static std::unique_ptr<ETLSource>
|
||||
make_ETLSource(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
ETLLoadBalancer& balancer)
|
||||
{
|
||||
std::unique_ptr<ETLSource> src = nullptr;
|
||||
if (sslCtx)
|
||||
{
|
||||
src = std::make_unique<SslETLSource>(
|
||||
config,
|
||||
ioContext,
|
||||
sslCtx,
|
||||
backend,
|
||||
subscriptions,
|
||||
networkValidatedLedgers,
|
||||
balancer);
|
||||
}
|
||||
else
|
||||
{
|
||||
src = std::make_unique<PlainETLSource>(
|
||||
config,
|
||||
ioContext,
|
||||
backend,
|
||||
subscriptions,
|
||||
networkValidatedLedgers,
|
||||
balancer);
|
||||
}
|
||||
auto src = std::make_unique<ProbingETLSource>(
|
||||
config,
|
||||
ioContext,
|
||||
backend,
|
||||
subscriptions,
|
||||
networkValidatedLedgers,
|
||||
balancer);
|
||||
|
||||
src->run();
|
||||
|
||||
@@ -907,38 +933,24 @@ make_ETLSource(
|
||||
}
|
||||
|
||||
ETLLoadBalancer::ETLLoadBalancer(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
||||
{
|
||||
if (config.contains("num_markers") && config.at("num_markers").is_int64())
|
||||
{
|
||||
downloadRanges_ = config.at("num_markers").as_int64();
|
||||
|
||||
downloadRanges_ = std::clamp(downloadRanges_, {1}, {256});
|
||||
}
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
else if (backend->fetchLedgerRange())
|
||||
{
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
|
||||
for (auto& entry : config.at("etl_sources").as_array())
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
std::unique_ptr<ETLSource> source = make_ETLSource(
|
||||
entry.as_object(),
|
||||
ioContext,
|
||||
sslCtx,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
*this);
|
||||
entry, ioContext, backend, subscriptions, nwvl, *this);
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
||||
<< sources_.back()->toString();
|
||||
log_.info() << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -951,9 +963,9 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
if (!res)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence
|
||||
<< " source = " << source->toString();
|
||||
log_.error() << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence
|
||||
<< " source = " << source->toString();
|
||||
}
|
||||
return res;
|
||||
},
|
||||
@@ -968,26 +980,24 @@ ETLLoadBalancer::fetchLedger(
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
bool success = execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors](
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](
|
||||
auto& source) {
|
||||
auto [status, data] = source->fetchLedger(
|
||||
ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && (response.validated() || true))
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
log.info() << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Error getting ledger = " << ledgerSequence
|
||||
<< " Reply : " << response.DebugString()
|
||||
<< " error_code : " << status.error_code()
|
||||
<< " error_msg : " << status.error_message()
|
||||
<< " source = " << source->toString();
|
||||
log.warn() << "Error getting ledger = " << ledgerSequence
|
||||
<< ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code()
|
||||
<< ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
},
|
||||
@@ -1026,14 +1036,29 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
log_.debug() << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
|
||||
return requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
ETLSourceImpl<Derived>::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
log_.trace() << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!connected_)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< "Attempted to proxy but failed to connect to tx";
|
||||
log_.error() << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
@@ -1047,7 +1072,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioc_};
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Creating websocket";
|
||||
log_.trace() << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
||||
|
||||
// Look up the domain name
|
||||
@@ -1057,7 +1082,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
|
||||
ws->next_layer().expires_after(std::chrono::seconds(3));
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Connecting websocket";
|
||||
log_.trace() << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
@@ -1076,15 +1101,15 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
" websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
}));
|
||||
BOOST_LOG_TRIVIAL(debug) << "client ip: " << clientIp;
|
||||
log_.trace() << "client ip: " << clientIp;
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Sending request";
|
||||
log_.trace() << "Sending request";
|
||||
// Send the message
|
||||
ws->async_write(
|
||||
net::buffer(boost::json::serialize(request)), yield[ec]);
|
||||
@@ -1102,11 +1127,11 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< "Error parsing response: " << std::string{begin, end};
|
||||
log_.error() << "Error parsing response: "
|
||||
<< std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(debug) << "Successfully forward request";
|
||||
log_.trace() << "Successfully forward request";
|
||||
|
||||
response = parsed.as_object();
|
||||
|
||||
@@ -1115,7 +1140,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "Encountered exception : " << e.what();
|
||||
log_.error() << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
@@ -1132,47 +1157,38 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " : "
|
||||
<< "Attempting to execute func. ledger sequence = "
|
||||
<< ledgerSequence << " - source = " << source->toString();
|
||||
log_.debug() << "Attempting to execute func. ledger sequence = "
|
||||
<< ledgerSequence << " - source = " << source->toString();
|
||||
if (source->hasLedger(ledgerSequence) || true)
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " : "
|
||||
<< "Successfully executed func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
log_.debug() << "Successfully executed func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " : "
|
||||
<< "Failed to execute func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
log_.warn() << "Failed to execute func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< __func__ << " : "
|
||||
<< "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
log_.warn() << "Ledger not present at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " : "
|
||||
<< "Error executing function "
|
||||
<< " - ledger sequence = " << ledgerSequence
|
||||
<< " - Tried all sources. Sleeping and trying again";
|
||||
log_.error() << "Error executing function "
|
||||
<< " - ledger sequence = " << ledgerSequence
|
||||
<< " - Tried all sources. Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,32 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
@@ -7,14 +34,10 @@
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
class ETLLoadBalancer;
|
||||
class ETLSource;
|
||||
class ProbingETLSource;
|
||||
class SubscriptionManager;
|
||||
|
||||
/// This class manages a connection to a single ETL source. This is almost
|
||||
@@ -24,6 +47,59 @@ class SubscriptionManager;
|
||||
/// has. This class also has methods for extracting said ledgers. Lastly this
|
||||
/// class forwards transactions received on the transactions_proposed streams to
|
||||
/// any subscribers.
|
||||
class ForwardCache
|
||||
{
|
||||
using response_type = std::optional<boost::json::object>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
mutable std::atomic_bool stopping_ = false;
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::unordered_map<std::string, response_type> latestForwarded_;
|
||||
|
||||
boost::asio::io_context::strand strand_;
|
||||
boost::asio::steady_timer timer_;
|
||||
ETLSource const& source_;
|
||||
std::uint32_t duration_ = 10;
|
||||
|
||||
void
|
||||
clear();
|
||||
|
||||
public:
|
||||
ForwardCache(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
ETLSource const& source)
|
||||
: strand_(ioc), timer_(strand_), source_(source)
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
auto commands =
|
||||
config.arrayOrThrow("cache", "ETLSource cache must be array");
|
||||
|
||||
if (config.contains("cache_duration"))
|
||||
duration_ = config.valueOrThrow<uint32_t>(
|
||||
"cache_duration",
|
||||
"ETLSource cache_duration must be a number");
|
||||
|
||||
for (auto const& command : commands)
|
||||
{
|
||||
auto key = command.valueOrThrow<std::string>(
|
||||
"ETLSource forward command must be array of strings");
|
||||
latestForwarded_[key] = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is to be called every freshenDuration_ seconds.
|
||||
// It will request information from this etlSource, and
|
||||
// will populate the cache with the latest value. If the
|
||||
// request fails, it will evict that value from the cache.
|
||||
void
|
||||
freshen();
|
||||
|
||||
std::optional<boost::json::object>
|
||||
get(boost::json::object const& command) const;
|
||||
};
|
||||
|
||||
class ETLSource
|
||||
{
|
||||
@@ -37,6 +113,12 @@ public:
|
||||
virtual void
|
||||
run() = 0;
|
||||
|
||||
virtual void
|
||||
pause() = 0;
|
||||
|
||||
virtual void
|
||||
resume() = 0;
|
||||
|
||||
virtual std::string
|
||||
toString() const = 0;
|
||||
|
||||
@@ -64,6 +146,27 @@ public:
|
||||
virtual ~ETLSource()
|
||||
{
|
||||
}
|
||||
|
||||
protected:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
private:
|
||||
friend ForwardCache;
|
||||
friend ProbingETLSource;
|
||||
|
||||
virtual std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
};
|
||||
|
||||
struct ETLSourceHooks
|
||||
{
|
||||
enum class Action { STOP, PROCEED };
|
||||
|
||||
std::function<Action(boost::beast::error_code)> onConnected;
|
||||
std::function<Action(boost::beast::error_code)> onDisconnected;
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
@@ -81,7 +184,7 @@ class ETLSourceImpl : public ETLSource
|
||||
|
||||
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
|
||||
|
||||
std::string validatedLedgersRaw_;
|
||||
std::string validatedLedgersRaw_{"N/A"};
|
||||
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
|
||||
|
||||
@@ -105,6 +208,14 @@ class ETLSourceImpl : public ETLSource
|
||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||
ETLLoadBalancer& balancer_;
|
||||
|
||||
ForwardCache forwardCache_;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
protected:
|
||||
Derived&
|
||||
derived()
|
||||
@@ -123,10 +234,14 @@ protected:
|
||||
|
||||
std::atomic_bool closing_{false};
|
||||
|
||||
std::atomic_bool paused_{false};
|
||||
|
||||
ETLSourceHooks hooks_;
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
|
||||
log_.trace() << toString();
|
||||
|
||||
auto const host = ip_;
|
||||
auto const port = wsPort_;
|
||||
@@ -139,7 +254,7 @@ protected:
|
||||
public:
|
||||
~ETLSourceImpl()
|
||||
{
|
||||
close(false);
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -166,12 +281,49 @@ public:
|
||||
/// Fetch ledger and load initial ledger will fail for this source
|
||||
/// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||
ETLSourceImpl(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
ETLLoadBalancer& balancer);
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: resolver_(boost::asio::make_strand(ioContext))
|
||||
, networkValidatedLedgers_(networkValidatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
, balancer_(balancer)
|
||||
, forwardCache_(config, ioContext, *this)
|
||||
, ioc_(ioContext)
|
||||
, timer_(ioContext)
|
||||
, hooks_(hooks)
|
||||
{
|
||||
ip_ = config.valueOr<std::string>("ip", {});
|
||||
wsPort_ = config.valueOr<std::string>("ws_port", {});
|
||||
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value)
|
||||
{
|
||||
grpcPort_ = *value;
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{
|
||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(
|
||||
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||
log_.debug() << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.debug() << "Exception while creating stub = " << e.what()
|
||||
<< " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @param sequence ledger sequence to check for
|
||||
/// @return true if this source has the desired ledger
|
||||
@@ -240,7 +392,6 @@ public:
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
|
||||
@@ -258,9 +409,8 @@ public:
|
||||
std::string
|
||||
toString() const override
|
||||
{
|
||||
return "{ validated_ledger : " + getValidatedRange() +
|
||||
" , ip : " + ip_ + " , web socket port : " + wsPort_ +
|
||||
", grpc port : " + grpcPort_ + " }";
|
||||
return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ +
|
||||
", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}";
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
@@ -295,6 +445,22 @@ public:
|
||||
void
|
||||
reconnect(boost::beast::error_code ec);
|
||||
|
||||
/// Pause the source effectively stopping it from trying to reconnect
|
||||
void
|
||||
pause() override
|
||||
{
|
||||
paused_ = true;
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
/// Resume the source allowing it to reconnect again
|
||||
void
|
||||
resume() override
|
||||
{
|
||||
paused_ = false;
|
||||
derived().close(true);
|
||||
}
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onResolve(
|
||||
@@ -339,13 +505,21 @@ class PlainETLSource : public ETLSourceImpl<PlainETLSource>
|
||||
|
||||
public:
|
||||
PlainETLSource(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer)
|
||||
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: ETLSourceImpl(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
std::move(hooks))
|
||||
, ws_(std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc)))
|
||||
@@ -380,14 +554,22 @@ class SslETLSource : public ETLSourceImpl<SslETLSource>
|
||||
|
||||
public:
|
||||
SslETLSource(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer)
|
||||
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: ETLSourceImpl(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
std::move(hooks))
|
||||
, sslCtx_(sslCtx)
|
||||
, ws_(std::make_unique<boost::beast::websocket::stream<
|
||||
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
@@ -429,30 +611,28 @@ public:
|
||||
class ETLLoadBalancer
|
||||
{
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<ETLSource>> sources_;
|
||||
|
||||
std::uint32_t downloadRanges_ = 16;
|
||||
|
||||
public:
|
||||
ETLLoadBalancer(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||
|
||||
static std::shared_ptr<ETLLoadBalancer>
|
||||
make_ETLLoadBalancer(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
{
|
||||
return std::make_shared<ETLLoadBalancer>(
|
||||
config, ioc, sslCtx, backend, subscriptions, validatedLedgers);
|
||||
config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
|
||||
~ETLLoadBalancer()
|
||||
@@ -542,5 +722,3 @@ private:
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
388
src/etl/NFTHelpers.cpp
Normal file
388
src/etl/NFTHelpers.cpp
Normal file
@@ -0,0 +1,388 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||
#include <ripple/protocol/STBase.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <vector>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/Types.h>
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
// To find the minted token ID, we put all tokenIDs referenced in the
|
||||
// metadata from prior to the tx application into one vector, then all
|
||||
// tokenIDs referenced in the metadata from after the tx application into
|
||||
// another, then find the one tokenID that was added by this tx
|
||||
// application.
|
||||
std::vector<ripple::uint256> prevIDs;
|
||||
std::vector<ripple::uint256> finalIDs;
|
||||
|
||||
// The owner is not necessarily the issuer, if using authorized minter
|
||||
// flow. Determine owner from the ledger object ID of the NFTokenPages
|
||||
// that were changed.
|
||||
std::optional<ripple::AccountID> owner;
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE)
|
||||
continue;
|
||||
|
||||
if (!owner)
|
||||
owner = ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
{
|
||||
ripple::STArray const& toAddNFTs =
|
||||
node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
}
|
||||
// Else it's modified, as there should never be a deleted NFToken page
|
||||
// as a result of a mint.
|
||||
else
|
||||
{
|
||||
// When a mint results in splitting an existing page,
|
||||
// it results in a created page and a modified node. Sometimes,
|
||||
// the created node needs to be linked to a third page, resulting
|
||||
// in modifying that third page's PreviousPageMin or NextPageMin
|
||||
// field changing, but no NFTs within that page changing. In this
|
||||
// case, there will be no previous NFTs and we need to skip.
|
||||
// However, there will always be NFTs listed in the final fields,
|
||||
// as rippled outputs all fields in final fields even if they were
|
||||
// not changed.
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields)
|
||||
.downcast<ripple::STObject>();
|
||||
if (!previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
continue;
|
||||
|
||||
ripple::STArray const& toAddNFTs =
|
||||
previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(prevIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
|
||||
ripple::STArray const& toAddFinalNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddFinalNFTs.begin(),
|
||||
toAddFinalNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(finalIDs.begin(), finalIDs.end());
|
||||
std::sort(prevIDs.begin(), prevIDs.end());
|
||||
std::vector<ripple::uint256> tokenIDResult;
|
||||
std::set_difference(
|
||||
finalIDs.begin(),
|
||||
finalIDs.end(),
|
||||
prevIDs.begin(),
|
||||
prevIDs.end(),
|
||||
std::inserter(tokenIDResult, tokenIDResult.begin()));
|
||||
if (tokenIDResult.size() == 1 && owner)
|
||||
return {
|
||||
{NFTTransactionsData(
|
||||
tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
|
||||
std::vector<NFTTransactionsData> const txs = {
|
||||
NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
|
||||
|
||||
// Determine who owned the token when it was burned by finding an
|
||||
// NFTokenPage that was deleted or modified that contains this
|
||||
// tokenID.
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfCreatedNode)
|
||||
continue;
|
||||
|
||||
// NFT burn can result in an NFTokenPage being modified to no longer
|
||||
// include the target, or an NFTokenPage being deleted. If this is
|
||||
// modified, we want to look for the target in the fields prior to
|
||||
// modification. If deleted, it's possible that the page was modified
|
||||
// to remove the target NFT prior to the entire page being deleted. In
|
||||
// this case, we need to look in the PreviousFields. Otherwise, the
|
||||
// page was not modified prior to deleting and we need to look in the
|
||||
// FinalFields.
|
||||
std::optional<ripple::STArray> prevNFTs;
|
||||
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||
{
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields)
|
||||
.downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||
prevNFTs = node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
|
||||
if (!prevNFTs)
|
||||
continue;
|
||||
|
||||
auto const nft = std::find_if(
|
||||
prevNFTs->begin(),
|
||||
prevNFTs->end(),
|
||||
[&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != prevNFTs->end())
|
||||
return std::make_pair(
|
||||
txs,
|
||||
NFTsData(
|
||||
tokenID,
|
||||
ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
txMeta,
|
||||
true));
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - could not determine owner at burntime for tx "
|
||||
<< sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenAcceptOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
{
|
||||
// If we have the buy offer from this tx, we can determine the owner
|
||||
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||
// object.
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||
{
|
||||
auto const affectedBuyOffer = std::find_if(
|
||||
txMeta.getNodes().begin(),
|
||||
txMeta.getNodes().end(),
|
||||
[&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||
sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
});
|
||||
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
ripple::uint256 const tokenID =
|
||||
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
|
||||
ripple::AccountID const owner =
|
||||
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, owner, txMeta, false)};
|
||||
}
|
||||
|
||||
// Otherwise we have to infer the new owner from the affected nodes.
|
||||
auto const affectedSellOffer = std::find_if(
|
||||
txMeta.getNodes().begin(),
|
||||
txMeta.getNodes().end(),
|
||||
[&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||
sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
});
|
||||
if (affectedSellOffer == txMeta.getNodes().end())
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
ripple::uint256 const tokenID =
|
||||
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
|
||||
ripple::AccountID const seller =
|
||||
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfDeletedNode)
|
||||
continue;
|
||||
|
||||
ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
if (nodeOwner == seller)
|
||||
continue;
|
||||
|
||||
ripple::STArray const& nfts = [&node] {
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
return node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
return node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
}();
|
||||
|
||||
auto const nft = std::find_if(
|
||||
nfts.begin(),
|
||||
nfts.end(),
|
||||
[&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != nfts.end())
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, nodeOwner, txMeta, false)};
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
// This is the only transaction where there can be more than 1 element in
|
||||
// the returned vector, because you can cancel multiple offers in one
|
||||
// transaction using this feature. This transaction also never returns an
|
||||
// NFTsData because it does not change the state of an NFT itself.
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCancelOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
{
|
||||
std::vector<NFTTransactionsData> txs;
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_OFFER)
|
||||
continue;
|
||||
|
||||
ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
txs.emplace_back(tokenID, txMeta, sttx.getTransactionID());
|
||||
}
|
||||
|
||||
// Deduplicate any transactions based on tokenID/txIdx combo. Can't just
|
||||
// use txIdx because in this case one tx can cancel offers for several
|
||||
// NFTs.
|
||||
std::sort(
|
||||
txs.begin(),
|
||||
txs.end(),
|
||||
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID < b.tokenID &&
|
||||
a.transactionIndex < b.transactionIndex;
|
||||
});
|
||||
auto last = std::unique(
|
||||
txs.begin(),
|
||||
txs.end(),
|
||||
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID == b.tokenID &&
|
||||
a.transactionIndex == b.transactionIndex;
|
||||
});
|
||||
txs.erase(last, txs.end());
|
||||
return {txs, {}};
|
||||
}
|
||||
|
||||
// This transaction never returns an NFTokensData because it does not
|
||||
// change the state of an NFT itself.
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCreateOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
{
|
||||
return {
|
||||
{NFTTransactionsData(
|
||||
sttx.getFieldH256(ripple::sfNFTokenID),
|
||||
txMeta,
|
||||
sttx.getTransactionID())},
|
||||
{}};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||
return {{}, {}};
|
||||
|
||||
switch (sttx.getTxnType())
|
||||
{
|
||||
case ripple::TxType::ttNFTOKEN_MINT:
|
||||
return getNFTokenMintData(txMeta, sttx);
|
||||
|
||||
case ripple::TxType::ttNFTOKEN_BURN:
|
||||
return getNFTokenBurnData(txMeta, sttx);
|
||||
|
||||
case ripple::TxType::ttNFTOKEN_ACCEPT_OFFER:
|
||||
return getNFTokenAcceptOfferData(txMeta, sttx);
|
||||
|
||||
case ripple::TxType::ttNFTOKEN_CANCEL_OFFER:
|
||||
return getNFTokenCancelOfferData(txMeta, sttx);
|
||||
|
||||
case ripple::TxType::ttNFTOKEN_CREATE_OFFER:
|
||||
return getNFTokenCreateOfferData(txMeta, sttx);
|
||||
|
||||
default:
|
||||
return {{}, {}};
|
||||
}
|
||||
}
|
||||
219
src/etl/ProbingETLSource.cpp
Normal file
219
src/etl/ProbingETLSource.cpp
Normal file
@@ -0,0 +1,219 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ProbingETLSource.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
ProbingETLSource::ProbingETLSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
: sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<SslETLSource>(
|
||||
config,
|
||||
ioc,
|
||||
std::ref(sslCtx_),
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
make_SSLHooks())}
|
||||
, plainSrc_{make_shared<PlainETLSource>(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
make_PlainHooks())}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::run()
|
||||
{
|
||||
sslSrc_->run();
|
||||
plainSrc_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::pause()
|
||||
{
|
||||
sslSrc_->pause();
|
||||
plainSrc_->pause();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::resume()
|
||||
{
|
||||
sslSrc_->resume();
|
||||
plainSrc_->resume();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::isConnected() const
|
||||
{
|
||||
return currentSrc_ && currentSrc_->isConnected();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::hasLedger(uint32_t sequence) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return false;
|
||||
return currentSrc_->hasLedger(sequence);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
ProbingETLSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
{
|
||||
boost::json::object sourcesJson = {
|
||||
{"ws", plainSrc_->toJson()},
|
||||
{"wss", sslSrc_->toJson()},
|
||||
};
|
||||
|
||||
return {
|
||||
{"probing", sourcesJson},
|
||||
};
|
||||
}
|
||||
return currentSrc_->toJson();
|
||||
}
|
||||
|
||||
std::string
|
||||
ProbingETLSource::toString() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return "{probing... ws: " + plainSrc_->toString() +
|
||||
", wss: " + sslSrc_->toString() + "}";
|
||||
return currentSrc_->toString();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::loadInitialLedger(
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return false;
|
||||
return currentSrc_->loadInitialLedger(
|
||||
ledgerSequence, numMarkers, cacheOnly);
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ProbingETLSource::fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
bool getObjectNeighbors)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->fetchLedger(
|
||||
ledgerSequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingETLSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingETLSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
ETLSourceHooks
|
||||
ProbingETLSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
log_.info() << "Selected WSS as the main source: "
|
||||
<< currentSrc_->toString();
|
||||
}
|
||||
return ETLSourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
|
||||
ETLSourceHooks
|
||||
ProbingETLSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
log_.info() << "Selected Plain WS as the main source: "
|
||||
<< currentSrc_->toString();
|
||||
}
|
||||
return ETLSourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
112
src/etl/ProbingETLSource.h
Normal file
112
src/etl/ProbingETLSource.h
Normal file
@@ -0,0 +1,112 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <etl/ETLSource.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
/// This ETLSource implementation attempts to connect over both secure websocket
|
||||
/// and plain websocket. First to connect pauses the other and the probing is
|
||||
/// considered done at this point. If however the connected source loses
|
||||
/// connection the probing is kickstarted again.
|
||||
class ProbingETLSource : public ETLSource
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::mutex mtx_;
|
||||
boost::asio::ssl::context sslCtx_;
|
||||
std::shared_ptr<ETLSource> sslSrc_;
|
||||
std::shared_ptr<ETLSource> plainSrc_;
|
||||
std::shared_ptr<ETLSource> currentSrc_;
|
||||
|
||||
public:
|
||||
ProbingETLSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{
|
||||
boost::asio::ssl::context::tlsv12});
|
||||
|
||||
~ProbingETLSource() = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
|
||||
void
|
||||
pause() override;
|
||||
|
||||
void
|
||||
resume() override;
|
||||
|
||||
bool
|
||||
isConnected() const override;
|
||||
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override;
|
||||
|
||||
boost::json::object
|
||||
toJson() const override;
|
||||
|
||||
std::string
|
||||
toString() const override;
|
||||
|
||||
bool
|
||||
loadInitialLedger(
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly = false) override;
|
||||
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects = true,
|
||||
bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
ETLSourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
|
||||
ETLSourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
@@ -22,7 +22,7 @@ read-only mode. In read-only mode, the server does not perform ETL and simply
|
||||
publishes new ledgers as they are written to the database.
|
||||
If the database is not updated within a certain time period
|
||||
(currently hard coded at 20 seconds), clio will begin the ETL
|
||||
process and start writing to the database. Postgres will report an error when
|
||||
process and start writing to the database. The database will report an error when
|
||||
trying to write a record with a key that already exists. ETL uses this error to
|
||||
determine that another process is writing to the database, and subsequently
|
||||
falls back to a soft read-only mode. clio can also operate in strict
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,23 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
@@ -8,6 +26,7 @@
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/ETLSource.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
@@ -19,7 +38,22 @@
|
||||
|
||||
#include <chrono>
|
||||
|
||||
/**
|
||||
* Helper function for the ReportingETL, implemented in NFTHelpers.cpp, to
|
||||
* pull to-write data out of a transaction that relates to NFTs.
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
struct FormattedTransactionsData
|
||||
{
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
};
|
||||
class SubscriptionManager;
|
||||
|
||||
/**
|
||||
@@ -40,6 +74,8 @@ class SubscriptionManager;
|
||||
class ReportingETL
|
||||
{
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
|
||||
@@ -52,7 +88,23 @@ private:
|
||||
|
||||
// number of diffs to use to generate cursors to traverse the ledger in
|
||||
// parallel during initial cache download
|
||||
size_t numDiffs_ = 1;
|
||||
size_t numCacheDiffs_ = 32;
|
||||
// number of markers to use at one time to traverse the ledger in parallel
|
||||
// during initial cache download
|
||||
size_t numCacheMarkers_ = 48;
|
||||
// number of ledger objects to fetch concurrently per marker during cache
|
||||
// download
|
||||
size_t cachePageFetchSize_ = 512;
|
||||
// thread responsible for syncing the cache on startup
|
||||
std::thread cacheDownloader_;
|
||||
|
||||
struct ClioPeer
|
||||
{
|
||||
std::string ip;
|
||||
int port;
|
||||
};
|
||||
|
||||
std::vector<ClioPeer> clioPeers;
|
||||
|
||||
std::thread worker_;
|
||||
boost::asio::io_context& ioContext_;
|
||||
@@ -86,18 +138,6 @@ private:
|
||||
// deletion
|
||||
std::atomic_bool deleting_ = false;
|
||||
|
||||
/// Used to determine when to write to the database during the initial
|
||||
/// ledger download. By default, the software downloads an entire ledger and
|
||||
/// then writes to the database. If flushInterval_ is non-zero, the software
|
||||
/// will write to the database as new ledger data (SHAMap leaf nodes)
|
||||
/// arrives. It is not neccesarily more effient to write the data as it
|
||||
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
|
||||
/// nodes; flushing prematurely can result in the same SHAMap inner node
|
||||
/// being written to the database more than once. It is recommended to use
|
||||
/// the default value of 0 for this variable; however, different values can
|
||||
/// be experimented with if better performance is desired.
|
||||
size_t flushInterval_ = 0;
|
||||
|
||||
/// This variable controls the number of GetLedgerData calls that will be
|
||||
/// executed in parallel during the initial ledger download. GetLedgerData
|
||||
/// allows clients to page through a ledger over many RPC calls.
|
||||
@@ -123,29 +163,33 @@ private:
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
|
||||
size_t accumTxns_ = 0;
|
||||
size_t txnThreshold_ = 0;
|
||||
|
||||
/// The time that the most recently published ledger was published. Used by
|
||||
/// server_info
|
||||
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
||||
|
||||
mutable std::mutex publishTimeMtx_;
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
mutable std::shared_mutex publishTimeMtx_;
|
||||
|
||||
void
|
||||
setLastPublish()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
||||
std::unique_lock lck(publishTimeMtx_);
|
||||
lastPublish_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
/// The time that the most recently published ledger was closed.
|
||||
std::chrono::time_point<ripple::NetClock> lastCloseTime_;
|
||||
|
||||
mutable std::shared_mutex closeTimeMtx_;
|
||||
|
||||
void
|
||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||
{
|
||||
std::unique_lock lck(closeTimeMtx_);
|
||||
lastCloseTime_ = lastCloseTime;
|
||||
}
|
||||
|
||||
/// Download a ledger with specified sequence in full, via GetLedgerData,
|
||||
/// and write the data to the databases. This takes several minutes or
|
||||
/// longer.
|
||||
@@ -162,6 +206,16 @@ private:
|
||||
void
|
||||
loadCache(uint32_t seq);
|
||||
|
||||
void
|
||||
loadCacheFromDb(uint32_t seq);
|
||||
|
||||
bool
|
||||
loadCacheFromClioPeer(
|
||||
uint32_t ledgerSequence,
|
||||
std::string const& ip,
|
||||
std::string const& port,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
/// Run ETL. Extracts ledgers and writes them to the database, until a
|
||||
/// write conflict occurs (or the server shuts down).
|
||||
/// @note database must already be populated when this function is
|
||||
@@ -208,14 +262,16 @@ private:
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedgerDataAndDiff(uint32_t sequence);
|
||||
|
||||
/// Insert all of the extracted transactions into the ledger
|
||||
/// Insert all of the extracted transactions into the ledger, returning
|
||||
/// transactions related to accounts, transactions related to NFTs, and
|
||||
/// NFTs themselves for later processsing.
|
||||
/// @param ledger ledger to insert transactions into
|
||||
/// @param data data extracted from an ETL source
|
||||
/// @return struct that contains the neccessary info to write to the
|
||||
/// transctions and account_transactions tables in Postgres (mostly
|
||||
/// transaction hashes, corresponding nodestore hashes and affected
|
||||
/// account_transactions/account_tx and nft_token_transactions tables
|
||||
/// (mostly transaction hashes, corresponding nodestore hashes and affected
|
||||
/// accounts)
|
||||
std::vector<AccountTransactionsData>
|
||||
FormattedTransactionsData
|
||||
insertTransactions(
|
||||
ripple::LedgerInfo const& ledger,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& data);
|
||||
@@ -227,7 +283,7 @@ private:
|
||||
/// following parent
|
||||
/// @param parent the previous ledger
|
||||
/// @param rawData data extracted from an ETL source
|
||||
/// @return the newly built ledger and data to write to Postgres
|
||||
/// @return the newly built ledger and data to write to the database
|
||||
std::pair<ripple::LedgerInfo, bool>
|
||||
buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData);
|
||||
|
||||
@@ -265,7 +321,7 @@ private:
|
||||
void
|
||||
run()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << "Starting reporting etl";
|
||||
log_.info() << "Starting reporting etl";
|
||||
stopping_ = false;
|
||||
|
||||
doWork();
|
||||
@@ -276,7 +332,7 @@ private:
|
||||
|
||||
public:
|
||||
ReportingETL(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
@@ -285,7 +341,7 @@ public:
|
||||
|
||||
static std::shared_ptr<ReportingETL>
|
||||
make_ReportingETL(
|
||||
boost::json::object const& config,
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
@@ -302,14 +358,16 @@ public:
|
||||
|
||||
~ReportingETL()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << "onStop called";
|
||||
BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL";
|
||||
log_.info() << "onStop called";
|
||||
log_.debug() << "Stopping Reporting ETL";
|
||||
stopping_ = true;
|
||||
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
if (cacheDownloader_.joinable())
|
||||
cacheDownloader_.join();
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
|
||||
log_.debug() << "Joined ReportingETL worker thread";
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
@@ -322,13 +380,36 @@ public:
|
||||
result["read_only"] = readOnly_;
|
||||
auto last = getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now() - getLastPublish())
|
||||
.count());
|
||||
|
||||
result["last_publish_age_seconds"] =
|
||||
std::to_string(lastPublishAgeSeconds());
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const
|
||||
{
|
||||
std::shared_lock lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
lastPublishAgeSeconds() const
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now() - getLastPublish())
|
||||
.count();
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
{
|
||||
std::shared_lock lck(closeTimeMtx_);
|
||||
auto now = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
auto closeTime = lastCloseTime_.time_since_epoch().count();
|
||||
if (now < (rippleEpochStart + closeTime))
|
||||
return 0;
|
||||
return now - (rippleEpochStart + closeTime);
|
||||
}
|
||||
};
|
||||
|
||||
209
src/log/Logger.cpp
Normal file
209
src/log/Logger.cpp
Normal file
@@ -0,0 +1,209 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <filesystem>
|
||||
|
||||
namespace clio {
|
||||
|
||||
Logger LogService::general_log_ = Logger{"General"};
|
||||
Logger LogService::alert_log_ = Logger{"Alert"};
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& stream, Severity sev)
|
||||
{
|
||||
static constexpr std::array<const char*, 6> labels = {
|
||||
"TRC",
|
||||
"DBG",
|
||||
"NFO",
|
||||
"WRN",
|
||||
"ERR",
|
||||
"FTL",
|
||||
};
|
||||
|
||||
return stream << labels.at(static_cast<int>(sev));
|
||||
}
|
||||
|
||||
Severity
|
||||
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_string())
|
||||
throw std::runtime_error("`log_level` must be a string");
|
||||
auto const& logLevel = value.as_string();
|
||||
|
||||
if (boost::iequals(logLevel, "trace"))
|
||||
return Severity::TRC;
|
||||
else if (boost::iequals(logLevel, "debug"))
|
||||
return Severity::DBG;
|
||||
else if (boost::iequals(logLevel, "info"))
|
||||
return Severity::NFO;
|
||||
else if (
|
||||
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
|
||||
return Severity::WRN;
|
||||
else if (boost::iequals(logLevel, "error"))
|
||||
return Severity::ERR;
|
||||
else if (boost::iequals(logLevel, "fatal"))
|
||||
return Severity::FTL;
|
||||
else
|
||||
throw std::runtime_error(
|
||||
"Could not parse `log_level`: expected `trace`, `debug`, `info`, "
|
||||
"`warning`, `error` or `fatal`");
|
||||
}
|
||||
|
||||
void
|
||||
LogService::init(Config const& config)
|
||||
{
|
||||
namespace src = boost::log::sources;
|
||||
namespace keywords = boost::log::keywords;
|
||||
namespace sinks = boost::log::sinks;
|
||||
|
||||
boost::log::add_common_attributes();
|
||||
boost::log::register_simple_formatter_factory<Severity, char>("Severity");
|
||||
auto const defaultFormat =
|
||||
"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% "
|
||||
"%Message%";
|
||||
std::string format =
|
||||
config.valueOr<std::string>("log_format", defaultFormat);
|
||||
|
||||
if (config.valueOr("log_to_console", false))
|
||||
{
|
||||
boost::log::add_console_log(std::cout, keywords::format = format);
|
||||
}
|
||||
|
||||
auto logDir = config.maybeValue<std::string>("log_directory");
|
||||
if (logDir)
|
||||
{
|
||||
boost::filesystem::path dirPath{logDir.value()};
|
||||
if (!boost::filesystem::exists(dirPath))
|
||||
boost::filesystem::create_directories(dirPath);
|
||||
auto const rotationSize =
|
||||
config.valueOr<uint64_t>("log_rotation_size", 2048u) * 1024u *
|
||||
1024u;
|
||||
auto const rotationPeriod =
|
||||
config.valueOr<uint32_t>("log_rotation_hour_interval", 12u);
|
||||
auto const dirSize =
|
||||
config.valueOr<uint64_t>("log_directory_max_size", 50u * 1024u) *
|
||||
1024u * 1024u;
|
||||
auto fileSink = boost::log::add_file_log(
|
||||
keywords::file_name = dirPath / "clio.log",
|
||||
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
|
||||
keywords::auto_flush = true,
|
||||
keywords::format = format,
|
||||
keywords::open_mode = std::ios_base::app,
|
||||
keywords::rotation_size = rotationSize,
|
||||
keywords::time_based_rotation =
|
||||
sinks::file::rotation_at_time_interval(
|
||||
boost::posix_time::hours(rotationPeriod)));
|
||||
fileSink->locked_backend()->set_file_collector(
|
||||
sinks::file::make_collector(
|
||||
keywords::target = dirPath, keywords::max_size = dirSize));
|
||||
fileSink->locked_backend()->scan_for_files();
|
||||
}
|
||||
|
||||
// get default severity, can be overridden per channel using
|
||||
// the `log_channels` array
|
||||
auto defaultSeverity = config.valueOr<Severity>("log_level", Severity::NFO);
|
||||
static constexpr std::array<const char*, 7> channels = {
|
||||
"General",
|
||||
"WebServer",
|
||||
"Backend",
|
||||
"RPC",
|
||||
"ETL",
|
||||
"Subscriptions",
|
||||
"Performance",
|
||||
};
|
||||
|
||||
auto core = boost::log::core::get();
|
||||
auto min_severity = boost::log::expressions::channel_severity_filter(
|
||||
log_channel, log_severity);
|
||||
|
||||
for (auto const& channel : channels)
|
||||
min_severity[channel] = defaultSeverity;
|
||||
min_severity["Alert"] =
|
||||
Severity::WRN; // Channel for alerts, always warning severity
|
||||
|
||||
for (auto const overrides = config.arrayOr("log_channels", {});
|
||||
auto const& cfg : overrides)
|
||||
{
|
||||
auto name = cfg.valueOrThrow<std::string>(
|
||||
"channel", "Channel name is required");
|
||||
if (not std::count(std::begin(channels), std::end(channels), name))
|
||||
throw std::runtime_error(
|
||||
"Can't override settings for log channel " + name +
|
||||
": invalid channel");
|
||||
|
||||
min_severity[name] =
|
||||
cfg.valueOr<Severity>("log_level", defaultSeverity);
|
||||
}
|
||||
|
||||
core->set_filter(min_severity);
|
||||
LogService::info() << "Default log level = " << defaultSeverity;
|
||||
}
|
||||
|
||||
Logger::Pump
|
||||
Logger::trace(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::TRC, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::debug(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::DBG, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::info(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::NFO, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::warn(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::WRN, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::error(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::ERR, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::fatal(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::FTL, loc};
|
||||
};
|
||||
|
||||
std::string
|
||||
Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const
|
||||
{
|
||||
auto const file_path = std::string{loc.file_name()};
|
||||
auto idx = file_path.size();
|
||||
while (max_depth-- > 0)
|
||||
{
|
||||
idx = file_path.rfind('/', idx - 1);
|
||||
if (idx == std::string::npos || idx == 0)
|
||||
break;
|
||||
}
|
||||
return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' +
|
||||
std::to_string(loc.line());
|
||||
}
|
||||
|
||||
} // namespace clio
|
||||
314
src/log/Logger.h
Normal file
314
src/log/Logger.h
Normal file
@@ -0,0 +1,314 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/log/core/core.hpp>
|
||||
#include <boost/log/expressions/predicates/channel_severity_filter.hpp>
|
||||
#include <boost/log/sinks/unlocked_frontend.hpp>
|
||||
#include <boost/log/sources/record_ostream.hpp>
|
||||
#include <boost/log/sources/severity_channel_logger.hpp>
|
||||
#include <boost/log/sources/severity_feature.hpp>
|
||||
#include <boost/log/sources/severity_logger.hpp>
|
||||
#include <boost/log/utility/manipulators/add_value.hpp>
|
||||
#include <boost/log/utility/setup/common_attributes.hpp>
|
||||
#include <boost/log/utility/setup/console.hpp>
|
||||
#include <boost/log/utility/setup/file.hpp>
|
||||
#include <boost/log/utility/setup/formatter_parser.hpp>
|
||||
|
||||
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
|
||||
// this is used by fully compatible compilers like gcc
|
||||
#include <source_location>
|
||||
|
||||
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
|
||||
// this is used by clang on linux where source_location is still not out of
|
||||
// experimental headers
|
||||
#include <experimental/source_location>
|
||||
#endif
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace clio {
|
||||
|
||||
class Config;
|
||||
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
|
||||
using source_location_t = std::source_location;
|
||||
#define CURRENT_SRC_LOCATION source_location_t::current()
|
||||
|
||||
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
|
||||
using source_location_t = std::experimental::source_location;
|
||||
#define CURRENT_SRC_LOCATION source_location_t::current()
|
||||
|
||||
#else
|
||||
// A workaround for AppleClang that is lacking source_location atm.
|
||||
// TODO: remove this workaround when all compilers catch up to c++20
|
||||
class SourceLocation
|
||||
{
|
||||
std::string_view file_;
|
||||
std::size_t line_;
|
||||
|
||||
public:
|
||||
SourceLocation(std::string_view file, std::size_t line)
|
||||
: file_{file}, line_{line}
|
||||
{
|
||||
}
|
||||
std::string_view
|
||||
file_name() const
|
||||
{
|
||||
return file_;
|
||||
}
|
||||
std::size_t
|
||||
line() const
|
||||
{
|
||||
return line_;
|
||||
}
|
||||
};
|
||||
using source_location_t = SourceLocation;
|
||||
#define CURRENT_SRC_LOCATION \
|
||||
source_location_t(__builtin_FILE(), __builtin_LINE())
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Custom severity levels for @ref Logger.
|
||||
*/
|
||||
enum class Severity {
|
||||
TRC,
|
||||
DBG,
|
||||
NFO,
|
||||
WRN,
|
||||
ERR,
|
||||
FTL,
|
||||
};
|
||||
|
||||
BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity);
|
||||
BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string);
|
||||
|
||||
/**
|
||||
* @brief Custom labels for @ref Severity in log output.
|
||||
*
|
||||
* @param stream std::ostream The output stream
|
||||
* @param sev Severity The severity to output to the ostream
|
||||
* @return std::ostream& The same ostream we were given
|
||||
*/
|
||||
std::ostream&
|
||||
operator<<(std::ostream& stream, Severity sev);
|
||||
|
||||
/**
|
||||
* @brief Custom JSON parser for @ref Severity.
|
||||
*
|
||||
* @param value The JSON string to parse
|
||||
* @return Severity The parsed severity
|
||||
* @throws std::runtime_error Thrown if severity is not in the right format
|
||||
*/
|
||||
Severity
|
||||
tag_invoke(
|
||||
boost::json::value_to_tag<Severity>,
|
||||
boost::json::value const& value);
|
||||
|
||||
/**
|
||||
* @brief A simple thread-safe logger for the channel specified
|
||||
* in the constructor.
|
||||
*
|
||||
* This is cheap to copy and move. Designed to be used as a member variable or
|
||||
* otherwise. See @ref LogService::init() for setup of the logging core and
|
||||
* severity levels for each channel.
|
||||
*/
|
||||
class Logger final
|
||||
{
|
||||
using logger_t =
|
||||
boost::log::sources::severity_channel_logger_mt<Severity, std::string>;
|
||||
mutable logger_t logger_;
|
||||
|
||||
friend class LogService; // to expose the Pump interface
|
||||
|
||||
/**
|
||||
* @brief Helper that pumps data into a log record via `operator<<`.
|
||||
*/
|
||||
class Pump final
|
||||
{
|
||||
using pump_opt_t =
|
||||
std::optional<boost::log::aux::record_pump<logger_t>>;
|
||||
|
||||
boost::log::record rec_;
|
||||
pump_opt_t pump_ = std::nullopt;
|
||||
|
||||
public:
|
||||
~Pump() = default;
|
||||
Pump(logger_t& logger, Severity sev, source_location_t const& loc)
|
||||
: rec_{logger.open_record(boost::log::keywords::severity = sev)}
|
||||
{
|
||||
if (rec_)
|
||||
{
|
||||
pump_.emplace(boost::log::aux::make_record_pump(logger, rec_));
|
||||
pump_->stream() << boost::log::add_value(
|
||||
"SourceLocation", pretty_path(loc));
|
||||
}
|
||||
}
|
||||
|
||||
Pump(Pump&&) = delete;
|
||||
Pump(Pump const&) = delete;
|
||||
Pump&
|
||||
operator=(Pump const&) = delete;
|
||||
Pump&
|
||||
operator=(Pump&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Perfectly forwards any incoming data into the underlying
|
||||
* boost::log pump if the pump is available. nop otherwise.
|
||||
*
|
||||
* @tparam T Type of data to pump
|
||||
* @param data The data to pump
|
||||
* @return Pump& Reference to itself for chaining
|
||||
*/
|
||||
template <typename T>
|
||||
[[maybe_unused]] Pump&
|
||||
operator<<(T&& data)
|
||||
{
|
||||
if (pump_)
|
||||
pump_->stream() << std::forward<T>(data);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] std::string
|
||||
pretty_path(source_location_t const& loc, size_t max_depth = 3) const;
|
||||
};
|
||||
|
||||
public:
|
||||
~Logger() = default;
|
||||
/**
|
||||
* @brief Construct a new Logger object that produces loglines for the
|
||||
* specified channel.
|
||||
*
|
||||
* See @ref LogService::init() for general setup and configuration of
|
||||
* severity levels per channel.
|
||||
*
|
||||
* @param channel The channel this logger will report into.
|
||||
*/
|
||||
Logger(std::string channel)
|
||||
: logger_{boost::log::keywords::channel = channel}
|
||||
{
|
||||
}
|
||||
Logger(Logger const&) = default;
|
||||
Logger(Logger&&) = default;
|
||||
Logger&
|
||||
operator=(Logger const&) = default;
|
||||
Logger&
|
||||
operator=(Logger&&) = default;
|
||||
|
||||
/*! Interface for logging at @ref Severity::TRC severity */
|
||||
[[nodiscard]] Pump
|
||||
trace(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::DBG severity */
|
||||
[[nodiscard]] Pump
|
||||
debug(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::INFO severity */
|
||||
[[nodiscard]] Pump
|
||||
info(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::WRN severity */
|
||||
[[nodiscard]] Pump
|
||||
warn(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::ERR severity */
|
||||
[[nodiscard]] Pump
|
||||
error(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::FTL severity */
|
||||
[[nodiscard]] Pump
|
||||
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A global logging service.
|
||||
*
|
||||
* Used to initialize and setup the logging core as well as a globally available
|
||||
* entrypoint for logging into the `General` channel as well as raising alerts.
|
||||
*/
|
||||
class LogService
|
||||
{
|
||||
static Logger general_log_; /*! Global logger for General channel */
|
||||
static Logger alert_log_; /*! Global logger for Alerts channel */
|
||||
|
||||
public:
|
||||
LogService() = delete;
|
||||
|
||||
/**
|
||||
* @brief Global log core initialization from a @ref Config
|
||||
*/
|
||||
static void
|
||||
init(Config const& config);
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::TRC severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
trace(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.trace(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::DBG severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
debug(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.debug(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::NFO severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
info(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.info(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::WRN severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
warn(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.warn(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::ERR severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
error(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.error(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::FTL severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.fatal(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible Alert logger */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
alert(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return alert_log_.warn(loc);
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace clio
|
||||
241
src/main.cpp
241
src/main.cpp
@@ -1,241 +0,0 @@
|
||||
#include <grpc/impl/codegen/port_platform.h>
|
||||
#ifdef GRPC_TSAN_ENABLED
|
||||
#undef GRPC_TSAN_ENABLED
|
||||
#endif
|
||||
#ifdef GRPC_ASAN_ENABLED
|
||||
#undef GRPC_ASAN_ENABLED
|
||||
#endif
|
||||
|
||||
#include <boost/asio/dispatch.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time_types.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/log/core.hpp>
|
||||
#include <boost/log/expressions.hpp>
|
||||
#include <boost/log/sinks/text_file_backend.hpp>
|
||||
#include <boost/log/sources/record_ostream.hpp>
|
||||
#include <boost/log/sources/severity_logger.hpp>
|
||||
#include <boost/log/support/date_time.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
#include <boost/log/utility/setup/common_attributes.hpp>
|
||||
#include <boost/log/utility/setup/console.hpp>
|
||||
#include <boost/log/utility/setup/file.hpp>
|
||||
#include <algorithm>
|
||||
#include <backend/BackendFactory.h>
|
||||
#include <cstdlib>
|
||||
#include <etl/ReportingETL.h>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <webserver/Listener.h>
|
||||
|
||||
std::optional<boost::json::object>
|
||||
parse_config(const char* filename)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::ifstream in(filename, std::ios::in | std::ios::binary);
|
||||
if (in)
|
||||
{
|
||||
std::stringstream contents;
|
||||
contents << in.rdbuf();
|
||||
in.close();
|
||||
std::cout << contents.str() << std::endl;
|
||||
boost::json::value value = boost::json::parse(contents.str());
|
||||
return value.as_object();
|
||||
}
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
std::cout << e.what() << std::endl;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ssl::context>
|
||||
parse_certs(boost::json::object const& config)
|
||||
{
|
||||
if (!config.contains("ssl_cert_file") || !config.contains("ssl_key_file"))
|
||||
return {};
|
||||
|
||||
auto certFilename = config.at("ssl_cert_file").as_string().c_str();
|
||||
auto keyFilename = config.at("ssl_key_file").as_string().c_str();
|
||||
|
||||
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
|
||||
if (!readCert)
|
||||
return {};
|
||||
|
||||
std::stringstream contents;
|
||||
contents << readCert.rdbuf();
|
||||
readCert.close();
|
||||
std::string cert = contents.str();
|
||||
|
||||
std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary);
|
||||
if (!readKey)
|
||||
return {};
|
||||
|
||||
contents.str("");
|
||||
contents << readKey.rdbuf();
|
||||
readKey.close();
|
||||
std::string key = contents.str();
|
||||
|
||||
ssl::context ctx{ssl::context::tlsv12};
|
||||
|
||||
ctx.set_options(
|
||||
boost::asio::ssl::context::default_workarounds |
|
||||
boost::asio::ssl::context::no_sslv2);
|
||||
|
||||
ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size()));
|
||||
|
||||
ctx.use_private_key(
|
||||
boost::asio::buffer(key.data(), key.size()),
|
||||
boost::asio::ssl::context::file_format::pem);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void
|
||||
initLogging(boost::json::object const& config)
|
||||
{
|
||||
boost::log::add_common_attributes();
|
||||
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
|
||||
boost::log::add_console_log(
|
||||
std::cout, boost::log::keywords::format = format);
|
||||
if (config.contains("log_file"))
|
||||
{
|
||||
boost::log::add_file_log(
|
||||
config.at("log_file").as_string().c_str(),
|
||||
boost::log::keywords::format = format,
|
||||
boost::log::keywords::open_mode = std::ios_base::app);
|
||||
}
|
||||
auto const logLevel = config.contains("log_level")
|
||||
? config.at("log_level").as_string()
|
||||
: "info";
|
||||
if (boost::iequals(logLevel, "trace"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::trace);
|
||||
else if (boost::iequals(logLevel, "debug"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::debug);
|
||||
else if (boost::iequals(logLevel, "info"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
||||
else if (
|
||||
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::warning);
|
||||
else if (boost::iequals(logLevel, "error"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::error);
|
||||
else if (boost::iequals(logLevel, "fatal"))
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::fatal);
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
|
||||
<< ". Setting log level to info";
|
||||
boost::log::core::get()->set_filter(
|
||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
|
||||
}
|
||||
|
||||
void
|
||||
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
||||
{
|
||||
std::vector<std::thread> v;
|
||||
v.reserve(numThreads - 1);
|
||||
for (auto i = numThreads - 1; i > 0; --i)
|
||||
v.emplace_back([&ioc] { ioc.run(); });
|
||||
|
||||
ioc.run();
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
{
|
||||
// Check command line arguments.
|
||||
if (argc != 2)
|
||||
{
|
||||
std::cerr << "Usage: clio_server "
|
||||
"<config_file> \n"
|
||||
<< "Example:\n"
|
||||
<< " clio_server config.json \n";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
auto const config = parse_config(argv[1]);
|
||||
if (!config)
|
||||
{
|
||||
std::cerr << "Couldnt parse config. Exiting..." << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
initLogging(*config);
|
||||
|
||||
auto ctx = parse_certs(*config);
|
||||
auto ctxRef = ctx
|
||||
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
|
||||
: std::nullopt;
|
||||
|
||||
auto const threads = config->contains("workers")
|
||||
? config->at("workers").as_int64()
|
||||
: std::thread::hardware_concurrency();
|
||||
|
||||
if (threads <= 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(fatal) << "Workers is less than 0";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << "Number of workers = " << threads;
|
||||
|
||||
// io context to handle all incoming requests, as well as other things
|
||||
// This is not the only io context in the application
|
||||
boost::asio::io_context ioc{threads};
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
DOSGuard dosGuard{config.value(), ioc};
|
||||
|
||||
// Interface to the database
|
||||
std::shared_ptr<BackendInterface> backend{
|
||||
Backend::make_Backend(ioc, *config)};
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
std::shared_ptr<SubscriptionManager> subscriptions{
|
||||
SubscriptionManager::make_SubscriptionManager(*config, backend)};
|
||||
|
||||
// Tracks which ledgers have been validated by the
|
||||
// network
|
||||
std::shared_ptr<NetworkValidatedLedgers> ledgers{
|
||||
NetworkValidatedLedgers::make_ValidatedLedgers()};
|
||||
|
||||
// Handles the connection to one or more rippled nodes.
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and
|
||||
// accounts_proposed)
|
||||
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
|
||||
*config, ioc, ctxRef, backend, subscriptions, ledgers);
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only
|
||||
// mode, ETL only publishes
|
||||
auto etl = ReportingETL::make_ReportingETL(
|
||||
*config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
// The server handles incoming RPCs
|
||||
auto httpServer = Server::make_HttpServer(
|
||||
*config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
32
src/main/Build.h
Normal file
32
src/main/Build.h
Normal file
@@ -0,0 +1,32 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Build {
|
||||
|
||||
std::string const&
|
||||
getClioVersionString();
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString();
|
||||
|
||||
} // namespace Build
|
||||
77
src/main/impl/Build.cpp
Normal file
77
src/main/impl/Build.cpp
Normal file
@@ -0,0 +1,77 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/beast/core/SemanticVersion.h>
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
#include <algorithm>
|
||||
#include <main/Build.h>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Build {
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// The build version number. You must edit this for each release
|
||||
// and follow the format described at http://semver.org/
|
||||
//------------------------------------------------------------------------------
|
||||
// clang-format off
|
||||
|
||||
char const* const versionString = "1.0.4"
|
||||
// clang-format on
|
||||
"+"
|
||||
#ifdef CLIO_BUILD
|
||||
CLIO_BUILD
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
".DEBUG"
|
||||
#ifdef SANITIZER
|
||||
"."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef SANITIZER
|
||||
BOOST_PP_STRINGIZE(SANITIZER)
|
||||
#endif
|
||||
|
||||
#ifdef PKG
|
||||
"-release"
|
||||
#endif
|
||||
;
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = [] {
|
||||
std::string const s = versionString;
|
||||
beast::SemanticVersion v;
|
||||
if (!v.parse(s) || v.print() != s)
|
||||
throw std::runtime_error(s + ": Bad server version string");
|
||||
return s;
|
||||
}();
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace Build
|
||||
245
src/main/main.cpp
Normal file
245
src/main/main.cpp
Normal file
@@ -0,0 +1,245 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <grpc/impl/codegen/port_platform.h>
|
||||
#ifdef GRPC_TSAN_ENABLED
|
||||
#undef GRPC_TSAN_ENABLED
|
||||
#endif
|
||||
#ifdef GRPC_ASAN_ENABLED
|
||||
#undef GRPC_ASAN_ENABLED
|
||||
#endif
|
||||
|
||||
#include <backend/BackendFactory.h>
|
||||
#include <config/Config.h>
|
||||
#include <etl/ReportingETL.h>
|
||||
#include <log/Logger.h>
|
||||
#include <webserver/Listener.h>
|
||||
|
||||
#include <boost/asio/dispatch.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time_types.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <main/Build.h>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
using namespace clio;
|
||||
namespace po = boost::program_options;
|
||||
|
||||
/**
|
||||
* @brief Parse command line and return path to configuration file
|
||||
*
|
||||
* @param argc
|
||||
* @param argv
|
||||
* @return std::string Path to configuration file
|
||||
*/
|
||||
std::string
|
||||
parseCli(int argc, char* argv[])
|
||||
{
|
||||
static constexpr char defaultConfigPath[] = "/etc/opt/clio/config.json";
|
||||
|
||||
// clang-format off
|
||||
po::options_description description("Options");
|
||||
description.add_options()
|
||||
("help,h", "print help message and exit")
|
||||
("version,v", "print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(defaultConfigPath), "configuration file")
|
||||
;
|
||||
// clang-format on
|
||||
po::positional_options_description positional;
|
||||
positional.add("conf", 1);
|
||||
|
||||
po::variables_map parsed;
|
||||
po::store(
|
||||
po::command_line_parser(argc, argv)
|
||||
.options(description)
|
||||
.positional(positional)
|
||||
.run(),
|
||||
parsed);
|
||||
po::notify(parsed);
|
||||
|
||||
if (parsed.count("version"))
|
||||
{
|
||||
std::cout << Build::getClioFullVersionString() << '\n';
|
||||
std::exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
if (parsed.count("help"))
|
||||
{
|
||||
std::cout << "Clio server " << Build::getClioFullVersionString()
|
||||
<< "\n\n"
|
||||
<< description;
|
||||
std::exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
return parsed["conf"].as<std::string>();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Parse certificates from configuration file
|
||||
*
|
||||
* @param config The configuration
|
||||
* @return std::optional<ssl::context> SSL context if certificates were parsed
|
||||
*/
|
||||
std::optional<ssl::context>
|
||||
parseCerts(Config const& config)
|
||||
{
|
||||
if (!config.contains("ssl_cert_file") || !config.contains("ssl_key_file"))
|
||||
return {};
|
||||
|
||||
auto certFilename = config.value<std::string>("ssl_cert_file");
|
||||
auto keyFilename = config.value<std::string>("ssl_key_file");
|
||||
|
||||
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
|
||||
if (!readCert)
|
||||
return {};
|
||||
|
||||
std::stringstream contents;
|
||||
contents << readCert.rdbuf();
|
||||
std::string cert = contents.str();
|
||||
|
||||
std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary);
|
||||
if (!readKey)
|
||||
return {};
|
||||
|
||||
contents.str("");
|
||||
contents << readKey.rdbuf();
|
||||
readKey.close();
|
||||
std::string key = contents.str();
|
||||
|
||||
ssl::context ctx{ssl::context::tlsv12};
|
||||
|
||||
ctx.set_options(
|
||||
boost::asio::ssl::context::default_workarounds |
|
||||
boost::asio::ssl::context::no_sslv2);
|
||||
|
||||
ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size()));
|
||||
|
||||
ctx.use_private_key(
|
||||
boost::asio::buffer(key.data(), key.size()),
|
||||
boost::asio::ssl::context::file_format::pem);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start context threads
|
||||
*
|
||||
* @param ioc Context
|
||||
* @param numThreads Number of worker threads to start
|
||||
*/
|
||||
void
|
||||
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
||||
{
|
||||
std::vector<std::thread> v;
|
||||
v.reserve(numThreads - 1);
|
||||
for (auto i = numThreads - 1; i > 0; --i)
|
||||
v.emplace_back([&ioc] { ioc.run(); });
|
||||
|
||||
ioc.run();
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
try
|
||||
{
|
||||
auto const configPath = parseCli(argc, argv);
|
||||
auto const config = ConfigReader::open(configPath);
|
||||
if (!config)
|
||||
{
|
||||
std::cerr << "Couldnt parse config '" << configPath << "'."
|
||||
<< std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
LogService::init(config);
|
||||
LogService::info() << "Clio version: " << Build::getClioFullVersionString();
|
||||
|
||||
auto ctx = parseCerts(config);
|
||||
auto ctxRef = ctx
|
||||
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
|
||||
: std::nullopt;
|
||||
|
||||
auto const threads = config.valueOr("io_threads", 2);
|
||||
if (threads <= 0)
|
||||
{
|
||||
LogService::fatal() << "io_threads is less than 0";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
LogService::info() << "Number of io threads = " << threads;
|
||||
|
||||
// IO context to handle all incoming requests, as well as other things
|
||||
// This is not the only io context in the application
|
||||
boost::asio::io_context ioc{threads};
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
auto sweepHandler = IntervalSweepHandler{config, ioc};
|
||||
auto dosGuard = DOSGuard{config, sweepHandler};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = Backend::make_Backend(ioc, config);
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptions =
|
||||
SubscriptionManager::make_SubscriptionManager(config, backend);
|
||||
|
||||
// Tracks which ledgers have been validated by the
|
||||
// network
|
||||
auto ledgers = NetworkValidatedLedgers::make_ValidatedLedgers();
|
||||
|
||||
// Handles the connection to one or more rippled nodes.
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and
|
||||
// accounts_proposed)
|
||||
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
|
||||
config, ioc, backend, subscriptions, ledgers);
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only
|
||||
// mode, ETL only publishes
|
||||
auto etl = ReportingETL::make_ReportingETL(
|
||||
config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
// The server handles incoming RPCs
|
||||
auto httpServer = Server::make_HttpServer(
|
||||
config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
LogService::fatal() << "Exit on exception: " << e.what();
|
||||
}
|
||||
@@ -1,5 +1,25 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <rpc/Counters.h>
|
||||
#include <rpc/RPC.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
namespace RPC {
|
||||
|
||||
@@ -66,20 +86,23 @@ Counters::report()
|
||||
{
|
||||
std::shared_lock lk(mutex_);
|
||||
boost::json::object obj = {};
|
||||
obj[JS(rpc)] = boost::json::object{};
|
||||
auto& rpc = obj[JS(rpc)].as_object();
|
||||
|
||||
for (auto const& [method, info] : methodInfo_)
|
||||
{
|
||||
boost::json::object counters = {};
|
||||
counters["started"] = std::to_string(info.started);
|
||||
counters["finished"] = std::to_string(info.finished);
|
||||
counters["errored"] = std::to_string(info.errored);
|
||||
counters[JS(started)] = std::to_string(info.started);
|
||||
counters[JS(finished)] = std::to_string(info.finished);
|
||||
counters[JS(errored)] = std::to_string(info.errored);
|
||||
counters["forwarded"] = std::to_string(info.forwarded);
|
||||
counters["duration_us"] = std::to_string(info.duration);
|
||||
counters[JS(duration_us)] = std::to_string(info.duration);
|
||||
|
||||
obj[method] = std::move(counters);
|
||||
rpc[method] = std::move(counters);
|
||||
}
|
||||
obj["work_queue"] = workQueue_.get().report();
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace RPC
|
||||
|
||||
@@ -1,11 +1,32 @@
|
||||
#ifndef RPC_COUNTERS_H
|
||||
#define RPC_COUNTERS_H
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <rpc/WorkQueue.h>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace RPC {
|
||||
|
||||
@@ -29,8 +50,10 @@ private:
|
||||
std::shared_mutex mutex_;
|
||||
std::unordered_map<std::string, MethodInfo> methodInfo_;
|
||||
|
||||
std::reference_wrapper<const WorkQueue> workQueue_;
|
||||
|
||||
public:
|
||||
Counters() = default;
|
||||
Counters(WorkQueue const& wq) : workQueue_(std::cref(wq)){};
|
||||
|
||||
void
|
||||
rpcErrored(std::string const& method);
|
||||
@@ -48,5 +71,3 @@ public:
|
||||
};
|
||||
|
||||
} // namespace RPC
|
||||
|
||||
#endif // RPC_COUNTERS_H
|
||||
169
src/rpc/Errors.cpp
Normal file
169
src/rpc/Errors.cpp
Normal file
@@ -0,0 +1,169 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <rpc/Errors.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace {
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
// explicit deduction guide (not needed as of C++20, but clang be clang)
|
||||
template <class... Ts>
|
||||
overloadSet(Ts...) -> overloadSet<Ts...>;
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
|
||||
WarningInfo const&
|
||||
getWarningInfo(WarningCode code)
|
||||
{
|
||||
constexpr static WarningInfo infos[]{
|
||||
{warnUNKNOWN, "Unknown warning"},
|
||||
{warnRPC_CLIO,
|
||||
"This is a clio server. clio only serves validated data. If you "
|
||||
"want to talk to rippled, include 'ledger_index':'current' in your "
|
||||
"request"},
|
||||
{warnRPC_OUTDATED, "This server may be out of date"},
|
||||
{warnRPC_RATE_LIMIT, "You are about to be rate limited"}};
|
||||
|
||||
auto matchByCode = [code](auto const& info) { return info.code == code; };
|
||||
if (auto it = find_if(begin(infos), end(infos), matchByCode);
|
||||
it != end(infos))
|
||||
return *it;
|
||||
|
||||
throw(out_of_range("Invalid WarningCode"));
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
makeWarning(WarningCode code)
|
||||
{
|
||||
boost::json::object json;
|
||||
auto const& info = getWarningInfo(code);
|
||||
json["id"] = code;
|
||||
json["message"] = static_cast<string>(info.message);
|
||||
return json;
|
||||
}
|
||||
|
||||
ClioErrorInfo const&
|
||||
getErrorInfo(ClioError code)
|
||||
{
|
||||
constexpr static ClioErrorInfo infos[]{
|
||||
{ClioError::rpcMALFORMED_CURRENCY,
|
||||
"malformedCurrency",
|
||||
"Malformed currency."},
|
||||
{ClioError::rpcMALFORMED_REQUEST,
|
||||
"malformedRequest",
|
||||
"Malformed request."},
|
||||
{ClioError::rpcMALFORMED_OWNER, "malformedOwner", "Malformed owner."},
|
||||
{ClioError::rpcMALFORMED_ADDRESS,
|
||||
"malformedAddress",
|
||||
"Malformed address."},
|
||||
};
|
||||
|
||||
auto matchByCode = [code](auto const& info) { return info.code == code; };
|
||||
if (auto it = find_if(begin(infos), end(infos), matchByCode);
|
||||
it != end(infos))
|
||||
return *it;
|
||||
|
||||
throw(out_of_range("Invalid error code"));
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
makeError(
|
||||
RippledError err,
|
||||
optional<string_view> customError,
|
||||
optional<string_view> customMessage)
|
||||
{
|
||||
boost::json::object json;
|
||||
auto const& info = ripple::RPC::get_error_info(err);
|
||||
|
||||
json["error"] = customError.value_or(info.token.c_str()).data();
|
||||
json["error_code"] = static_cast<uint32_t>(err);
|
||||
json["error_message"] = customMessage.value_or(info.message.c_str()).data();
|
||||
json["status"] = "error";
|
||||
json["type"] = "response";
|
||||
return json;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
makeError(
|
||||
ClioError err,
|
||||
optional<string_view> customError,
|
||||
optional<string_view> customMessage)
|
||||
{
|
||||
boost::json::object json;
|
||||
auto const& info = getErrorInfo(err);
|
||||
|
||||
json["error"] = customError.value_or(info.error).data();
|
||||
json["error_code"] = static_cast<uint32_t>(info.code);
|
||||
json["error_message"] = customMessage.value_or(info.message).data();
|
||||
json["status"] = "error";
|
||||
json["type"] = "response";
|
||||
return json;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
makeError(Status const& status)
|
||||
{
|
||||
auto wrapOptional = [](string_view const& str) {
|
||||
return str.empty() ? nullopt : make_optional(str);
|
||||
};
|
||||
|
||||
auto res = visit(
|
||||
overloadSet{
|
||||
[&status, &wrapOptional](RippledError err) {
|
||||
if (err == ripple::rpcUNKNOWN)
|
||||
{
|
||||
return boost::json::object{
|
||||
{"error", status.message},
|
||||
{"type", "response"},
|
||||
{"status", "error"}};
|
||||
}
|
||||
|
||||
return makeError(
|
||||
err,
|
||||
wrapOptional(status.error),
|
||||
wrapOptional(status.message));
|
||||
},
|
||||
[&status, &wrapOptional](ClioError err) {
|
||||
return makeError(
|
||||
err,
|
||||
wrapOptional(status.error),
|
||||
wrapOptional(status.message));
|
||||
},
|
||||
},
|
||||
status.code);
|
||||
if (status.extraInfo)
|
||||
{
|
||||
for (auto& [key, value] : status.extraInfo.value())
|
||||
{
|
||||
res[key] = value;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
256
src/rpc/Errors.h
Normal file
256
src/rpc/Errors.h
Normal file
@@ -0,0 +1,256 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <variant>
|
||||
|
||||
namespace RPC {
|
||||
|
||||
/**
|
||||
* @brief Custom clio RPC Errors.
|
||||
*/
|
||||
enum class ClioError {
|
||||
rpcMALFORMED_CURRENCY = 5000,
|
||||
rpcMALFORMED_REQUEST = 5001,
|
||||
rpcMALFORMED_OWNER = 5002,
|
||||
rpcMALFORMED_ADDRESS = 5003,
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Holds info about a particular @ref ClioError.
|
||||
*/
|
||||
struct ClioErrorInfo
|
||||
{
|
||||
ClioError const code;
|
||||
std::string_view const error;
|
||||
std::string_view const message;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Clio uses compatible Rippled error codes for most RPC errors.
|
||||
*/
|
||||
using RippledError = ripple::error_code_i;
|
||||
|
||||
/**
|
||||
* @brief Clio operates on a combination of Rippled and Custom Clio error codes.
|
||||
*
|
||||
* @see RippledError For rippled error codes
|
||||
* @see ClioError For custom clio error codes
|
||||
*/
|
||||
using CombinedError = std::variant<RippledError, ClioError>;
|
||||
|
||||
/**
|
||||
* @brief A status returned from any RPC handler.
|
||||
*/
|
||||
struct Status
|
||||
{
|
||||
CombinedError code = RippledError::rpcSUCCESS;
|
||||
std::string error = "";
|
||||
std::string message = "";
|
||||
std::optional<boost::json::object> extraInfo;
|
||||
|
||||
Status() = default;
|
||||
/* implicit */ Status(CombinedError code) : code(code){};
|
||||
Status(CombinedError code, boost::json::object&& extraInfo)
|
||||
: code(code), extraInfo(std::move(extraInfo)){};
|
||||
|
||||
// HACK. Some rippled handlers explicitly specify errors.
|
||||
// This means that we have to be able to duplicate this
|
||||
// functionality.
|
||||
explicit Status(std::string const& message)
|
||||
: code(ripple::rpcUNKNOWN), message(message)
|
||||
{
|
||||
}
|
||||
|
||||
Status(CombinedError code, std::string message)
|
||||
: code(code), message(message)
|
||||
{
|
||||
}
|
||||
|
||||
Status(CombinedError code, std::string error, std::string message)
|
||||
: code(code), error(error), message(message)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns true if the Status is *not* OK.
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
if (auto err = std::get_if<RippledError>(&code))
|
||||
return *err != RippledError::rpcSUCCESS;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns true if the Status contains the desired @ref RippledError
|
||||
*
|
||||
* @param other The RippledError to match
|
||||
* @return bool true if status matches given error; false otherwise
|
||||
*/
|
||||
bool
|
||||
operator==(RippledError other) const
|
||||
{
|
||||
if (auto err = std::get_if<RippledError>(&code))
|
||||
return *err == other;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns true if the Status contains the desired @ref ClioError
|
||||
*
|
||||
* @param other The RippledError to match
|
||||
* @return bool true if status matches given error; false otherwise
|
||||
*/
|
||||
bool
|
||||
operator==(ClioError other) const
|
||||
{
|
||||
if (auto err = std::get_if<ClioError>(&code))
|
||||
return *err == other;
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Warning codes that can be returned by clio.
|
||||
*/
|
||||
enum WarningCode {
|
||||
warnUNKNOWN = -1,
|
||||
warnRPC_CLIO = 2001,
|
||||
warnRPC_OUTDATED = 2002,
|
||||
warnRPC_RATE_LIMIT = 2003
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Holds information about a clio warning.
|
||||
*/
|
||||
struct WarningInfo
|
||||
{
|
||||
constexpr WarningInfo() = default;
|
||||
constexpr WarningInfo(WarningCode code, char const* message)
|
||||
: code(code), message(message)
|
||||
{
|
||||
}
|
||||
|
||||
WarningCode code = warnUNKNOWN;
|
||||
std::string_view const message = "unknown warning";
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Invalid parameters error.
|
||||
*/
|
||||
class InvalidParamsError : public std::exception
|
||||
{
|
||||
std::string msg;
|
||||
|
||||
public:
|
||||
explicit InvalidParamsError(std::string const& msg) : msg(msg)
|
||||
{
|
||||
}
|
||||
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return msg.c_str();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Account not found error.
|
||||
*/
|
||||
class AccountNotFoundError : public std::exception
|
||||
{
|
||||
std::string account;
|
||||
|
||||
public:
|
||||
explicit AccountNotFoundError(std::string const& acct) : account(acct)
|
||||
{
|
||||
}
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return account.c_str();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A globally available @ref Status that represents a successful state
|
||||
*/
|
||||
static Status OK;
|
||||
|
||||
/**
|
||||
* @brief Get the warning info object from a warning code.
|
||||
*
|
||||
* @param code The warning code
|
||||
* @return WarningInfo const& A reference to the static warning info
|
||||
*/
|
||||
WarningInfo const&
|
||||
getWarningInfo(WarningCode code);
|
||||
|
||||
/**
|
||||
* @brief Generate JSON from a warning code.
|
||||
*
|
||||
* @param code The @ref WarningCode
|
||||
* @return boost::json::object The JSON output
|
||||
*/
|
||||
boost::json::object
|
||||
makeWarning(WarningCode code);
|
||||
|
||||
/**
|
||||
* @brief Generate JSON from a @ref Status.
|
||||
*
|
||||
* @param status The @ref Status
|
||||
* @return boost::json::object The JSON output
|
||||
*/
|
||||
boost::json::object
|
||||
makeError(Status const& status);
|
||||
|
||||
/**
|
||||
* @brief Generate JSON from a @ref RippledError.
|
||||
*
|
||||
* @param status The rippled @ref RippledError
|
||||
* @return boost::json::object The JSON output
|
||||
*/
|
||||
boost::json::object
|
||||
makeError(
|
||||
RippledError err,
|
||||
std::optional<std::string_view> customError = std::nullopt,
|
||||
std::optional<std::string_view> customMessage = std::nullopt);
|
||||
|
||||
/**
|
||||
* @brief Generate JSON from a @ref ClioError.
|
||||
*
|
||||
* @param status The clio's custom @ref ClioError
|
||||
* @return boost::json::object The JSON output
|
||||
*/
|
||||
boost::json::object
|
||||
makeError(
|
||||
ClioError err,
|
||||
std::optional<std::string_view> customError = std::nullopt,
|
||||
std::optional<std::string_view> customMessage = std::nullopt);
|
||||
|
||||
} // namespace RPC
|
||||
@@ -1,5 +1,23 @@
|
||||
#ifndef REPORTING_HANDLERS_H_INCLUDED
|
||||
#define REPORTING_HANDLERS_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <rpc/RPC.h>
|
||||
|
||||
@@ -21,6 +39,9 @@ doAccountCurrencies(Context const& context);
|
||||
Result
|
||||
doAccountLines(Context const& context);
|
||||
|
||||
Result
|
||||
doAccountNFTs(Context const& context);
|
||||
|
||||
Result
|
||||
doAccountObjects(Context const& context);
|
||||
|
||||
@@ -41,10 +62,26 @@ doChannelAuthorize(Context const& context);
|
||||
Result
|
||||
doChannelVerify(Context const& context);
|
||||
|
||||
// offers methods
|
||||
// book methods
|
||||
[[nodiscard]] Result
|
||||
doBookChanges(Context const& context);
|
||||
|
||||
Result
|
||||
doBookOffers(Context const& context);
|
||||
|
||||
// NFT methods
|
||||
Result
|
||||
doNFTBuyOffers(Context const& context);
|
||||
|
||||
Result
|
||||
doNFTSellOffers(Context const& context);
|
||||
|
||||
Result
|
||||
doNFTInfo(Context const& context);
|
||||
|
||||
Result
|
||||
doNFTHistory(Context const& context);
|
||||
|
||||
// ledger methods
|
||||
Result
|
||||
doLedger(Context const& context);
|
||||
@@ -83,4 +120,3 @@ doServerInfo(Context const& context);
|
||||
Result
|
||||
doRandom(Context const& context);
|
||||
} // namespace RPC
|
||||
#endif
|
||||
|
||||
352
src/rpc/RPC.cpp
352
src/rpc/RPC.cpp
@@ -1,22 +1,87 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLSource.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/Handlers.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <webserver/HttpBase.h>
|
||||
#include <webserver/WsBase.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
namespace RPC {
|
||||
using namespace std;
|
||||
using namespace clio;
|
||||
|
||||
std::optional<Context>
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gPerfLog{"Performance"};
|
||||
clio::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
Context::Context(
|
||||
boost::asio::yield_context& yield_,
|
||||
string const& command_,
|
||||
uint32_t version_,
|
||||
boost::json::object const& params_,
|
||||
shared_ptr<BackendInterface const> const& backend_,
|
||||
shared_ptr<SubscriptionManager> const& subscriptions_,
|
||||
shared_ptr<ETLLoadBalancer> const& balancer_,
|
||||
shared_ptr<ReportingETL const> const& etl_,
|
||||
shared_ptr<WsBase> const& session_,
|
||||
util::TagDecoratorFactory const& tagFactory_,
|
||||
Backend::LedgerRange const& range_,
|
||||
Counters& counters_,
|
||||
string const& clientIp_)
|
||||
: Taggable(tagFactory_)
|
||||
, yield(yield_)
|
||||
, method(command_)
|
||||
, version(version_)
|
||||
, params(params_)
|
||||
, backend(backend_)
|
||||
, subscriptions(subscriptions_)
|
||||
, balancer(balancer_)
|
||||
, etl(etl_)
|
||||
, session(session_)
|
||||
, range(range_)
|
||||
, counters(counters_)
|
||||
, clientIp(clientIp_)
|
||||
{
|
||||
gPerfLog.debug() << tag() << "new Context created";
|
||||
}
|
||||
|
||||
optional<Context>
|
||||
make_WsContext(
|
||||
boost::asio::yield_context& yc,
|
||||
boost::json::object const& request,
|
||||
std::shared_ptr<BackendInterface const> const& backend,
|
||||
std::shared_ptr<SubscriptionManager> const& subscriptions,
|
||||
std::shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
std::shared_ptr<ReportingETL const> const& etl,
|
||||
std::shared_ptr<WsBase> const& session,
|
||||
shared_ptr<BackendInterface const> const& backend,
|
||||
shared_ptr<SubscriptionManager> const& subscriptions,
|
||||
shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
shared_ptr<ReportingETL const> const& etl,
|
||||
shared_ptr<WsBase> const& session,
|
||||
util::TagDecoratorFactory const& tagFactory,
|
||||
Backend::LedgerRange const& range,
|
||||
Counters& counters,
|
||||
std::string const& clientIp)
|
||||
string const& clientIp)
|
||||
{
|
||||
boost::json::value commandValue = nullptr;
|
||||
if (!request.contains("command") && request.contains("method"))
|
||||
@@ -27,9 +92,9 @@ make_WsContext(
|
||||
if (!commandValue.is_string())
|
||||
return {};
|
||||
|
||||
std::string command = commandValue.as_string().c_str();
|
||||
string command = commandValue.as_string().c_str();
|
||||
|
||||
return Context{
|
||||
return make_optional<Context>(
|
||||
yc,
|
||||
command,
|
||||
1,
|
||||
@@ -39,27 +104,29 @@ make_WsContext(
|
||||
balancer,
|
||||
etl,
|
||||
session,
|
||||
tagFactory,
|
||||
range,
|
||||
counters,
|
||||
clientIp};
|
||||
clientIp);
|
||||
}
|
||||
|
||||
std::optional<Context>
|
||||
optional<Context>
|
||||
make_HttpContext(
|
||||
boost::asio::yield_context& yc,
|
||||
boost::json::object const& request,
|
||||
std::shared_ptr<BackendInterface const> const& backend,
|
||||
std::shared_ptr<SubscriptionManager> const& subscriptions,
|
||||
std::shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
std::shared_ptr<ReportingETL const> const& etl,
|
||||
shared_ptr<BackendInterface const> const& backend,
|
||||
shared_ptr<SubscriptionManager> const& subscriptions,
|
||||
shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
shared_ptr<ReportingETL const> const& etl,
|
||||
util::TagDecoratorFactory const& tagFactory,
|
||||
Backend::LedgerRange const& range,
|
||||
RPC::Counters& counters,
|
||||
std::string const& clientIp)
|
||||
string const& clientIp)
|
||||
{
|
||||
if (!request.contains("method") || !request.at("method").is_string())
|
||||
return {};
|
||||
|
||||
std::string const& command = request.at("method").as_string().c_str();
|
||||
string const& command = request.at("method").as_string().c_str();
|
||||
|
||||
if (command == "subscribe" || command == "unsubscribe")
|
||||
return {};
|
||||
@@ -75,7 +142,7 @@ make_HttpContext(
|
||||
if (!array.at(0).is_object())
|
||||
return {};
|
||||
|
||||
return Context{
|
||||
return make_optional<Context>(
|
||||
yc,
|
||||
command,
|
||||
1,
|
||||
@@ -85,97 +152,170 @@ make_HttpContext(
|
||||
balancer,
|
||||
etl,
|
||||
nullptr,
|
||||
tagFactory,
|
||||
range,
|
||||
counters,
|
||||
clientIp};
|
||||
clientIp);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
make_error(Error err)
|
||||
using LimitRange = tuple<uint32_t, uint32_t, uint32_t>;
|
||||
using HandlerFunction = function<Result(Context const&)>;
|
||||
|
||||
struct Handler
|
||||
{
|
||||
boost::json::object json;
|
||||
ripple::RPC::ErrorInfo const& info(ripple::RPC::get_error_info(err));
|
||||
json["error"] = info.token;
|
||||
json["error_code"] = static_cast<std::uint32_t>(err);
|
||||
json["error_message"] = info.message;
|
||||
json["status"] = "error";
|
||||
json["type"] = "response";
|
||||
return json;
|
||||
}
|
||||
string method;
|
||||
function<Result(Context const&)> handler;
|
||||
optional<LimitRange> limit;
|
||||
bool isClioOnly = false;
|
||||
};
|
||||
|
||||
boost::json::object
|
||||
make_error(Status const& status)
|
||||
class HandlerTable
|
||||
{
|
||||
boost::json::object json;
|
||||
ripple::RPC::ErrorInfo const& info(
|
||||
ripple::RPC::get_error_info(status.error));
|
||||
json["error"] =
|
||||
status.strCode.size() ? status.strCode.c_str() : info.token.c_str();
|
||||
json["error_code"] = static_cast<std::uint32_t>(status.error);
|
||||
json["error_message"] =
|
||||
status.message.size() ? status.message.c_str() : info.message.c_str();
|
||||
json["status"] = "error";
|
||||
json["type"] = "response";
|
||||
return json;
|
||||
}
|
||||
static std::unordered_map<std::string, std::function<Result(Context const&)>>
|
||||
handlerTable{
|
||||
{"account_channels", &doAccountChannels},
|
||||
{"account_currencies", &doAccountCurrencies},
|
||||
{"account_info", &doAccountInfo},
|
||||
{"account_lines", &doAccountLines},
|
||||
{"account_objects", &doAccountObjects},
|
||||
{"account_offers", &doAccountOffers},
|
||||
{"account_tx", &doAccountTx},
|
||||
{"gateway_balances", &doGatewayBalances},
|
||||
{"noripple_check", &doNoRippleCheck},
|
||||
{"book_offers", &doBookOffers},
|
||||
{"channel_authorize", &doChannelAuthorize},
|
||||
{"channel_verify", &doChannelVerify},
|
||||
{"ledger", &doLedger},
|
||||
{"ledger_data", &doLedgerData},
|
||||
{"ledger_entry", &doLedgerEntry},
|
||||
{"ledger_range", &doLedgerRange},
|
||||
{"ledger_data", &doLedgerData},
|
||||
{"subscribe", &doSubscribe},
|
||||
{"server_info", &doServerInfo},
|
||||
{"unsubscribe", &doUnsubscribe},
|
||||
{"tx", &doTx},
|
||||
{"transaction_entry", &doTransactionEntry},
|
||||
{"random", &doRandom}};
|
||||
unordered_map<string, Handler> handlerMap_;
|
||||
|
||||
static std::unordered_set<std::string> forwardCommands{
|
||||
public:
|
||||
HandlerTable(initializer_list<Handler> handlers)
|
||||
{
|
||||
for (auto const& handler : handlers)
|
||||
{
|
||||
handlerMap_[handler.method] = move(handler);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
contains(string const& method)
|
||||
{
|
||||
return handlerMap_.contains(method);
|
||||
}
|
||||
|
||||
optional<LimitRange>
|
||||
getLimitRange(string const& command)
|
||||
{
|
||||
if (!handlerMap_.contains(command))
|
||||
return {};
|
||||
|
||||
return handlerMap_[command].limit;
|
||||
}
|
||||
|
||||
optional<HandlerFunction>
|
||||
getHandler(string const& command)
|
||||
{
|
||||
if (!handlerMap_.contains(command))
|
||||
return {};
|
||||
|
||||
return handlerMap_[command].handler;
|
||||
}
|
||||
|
||||
bool
|
||||
isClioOnly(string const& command)
|
||||
{
|
||||
return handlerMap_.contains(command) && handlerMap_[command].isClioOnly;
|
||||
}
|
||||
};
|
||||
|
||||
static HandlerTable handlerTable{
|
||||
{"account_channels", &doAccountChannels, LimitRange{10, 50, 256}},
|
||||
{"account_currencies", &doAccountCurrencies, {}},
|
||||
{"account_info", &doAccountInfo, {}},
|
||||
{"account_lines", &doAccountLines, LimitRange{10, 50, 256}},
|
||||
{"account_nfts", &doAccountNFTs, LimitRange{1, 5, 10}},
|
||||
{"account_objects", &doAccountObjects, LimitRange{10, 50, 256}},
|
||||
{"account_offers", &doAccountOffers, LimitRange{10, 50, 256}},
|
||||
{"account_tx", &doAccountTx, LimitRange{1, 50, 100}},
|
||||
{"gateway_balances", &doGatewayBalances, {}},
|
||||
{"noripple_check", &doNoRippleCheck, LimitRange{1, 300, 500}},
|
||||
{"book_changes", &doBookChanges, {}},
|
||||
{"book_offers", &doBookOffers, LimitRange{1, 50, 100}},
|
||||
{"ledger", &doLedger, {}},
|
||||
{"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}},
|
||||
{"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}},
|
||||
{"nft_history", &doNFTHistory, LimitRange{1, 50, 100}, true},
|
||||
{"nft_info", &doNFTInfo, {}, true},
|
||||
{"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}},
|
||||
{"ledger_entry", &doLedgerEntry, {}},
|
||||
{"ledger_range", &doLedgerRange, {}},
|
||||
{"subscribe", &doSubscribe, {}},
|
||||
{"server_info", &doServerInfo, {}},
|
||||
{"unsubscribe", &doUnsubscribe, {}},
|
||||
{"tx", &doTx, {}},
|
||||
{"transaction_entry", &doTransactionEntry, {}},
|
||||
{"random", &doRandom, {}}};
|
||||
|
||||
static unordered_set<string> forwardCommands{
|
||||
"submit",
|
||||
"submit_multisigned",
|
||||
"fee",
|
||||
"ledger_closed",
|
||||
"ledger_current",
|
||||
"ripple_path_find",
|
||||
"manifest"};
|
||||
"manifest",
|
||||
"channel_authorize",
|
||||
"channel_verify"};
|
||||
|
||||
bool
|
||||
validHandler(std::string const& method)
|
||||
validHandler(string const& method)
|
||||
{
|
||||
return handlerTable.contains(method) || forwardCommands.contains(method);
|
||||
}
|
||||
|
||||
bool
|
||||
isClioOnly(string const& method)
|
||||
{
|
||||
return handlerTable.isClioOnly(method);
|
||||
}
|
||||
|
||||
bool
|
||||
shouldSuppressValidatedFlag(RPC::Context const& context)
|
||||
{
|
||||
return boost::iequals(context.method, "subscribe") ||
|
||||
boost::iequals(context.method, "unsubscribe");
|
||||
}
|
||||
|
||||
Status
|
||||
getLimit(RPC::Context const& context, uint32_t& limit)
|
||||
{
|
||||
if (!handlerTable.getHandler(context.method))
|
||||
return Status{RippledError::rpcUNKNOWN_COMMAND};
|
||||
|
||||
if (!handlerTable.getLimitRange(context.method))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"};
|
||||
|
||||
auto [lo, def, hi] = *handlerTable.getLimitRange(context.method);
|
||||
|
||||
if (context.params.contains(JS(limit)))
|
||||
{
|
||||
string errMsg = "Invalid field 'limit', not unsigned integer.";
|
||||
if (!context.params.at(JS(limit)).is_int64())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, errMsg};
|
||||
|
||||
int input = context.params.at(JS(limit)).as_int64();
|
||||
if (input <= 0)
|
||||
return Status{RippledError::rpcINVALID_PARAMS, errMsg};
|
||||
|
||||
limit = clamp(static_cast<uint32_t>(input), lo, hi);
|
||||
}
|
||||
else
|
||||
{
|
||||
limit = def;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
bool
|
||||
shouldForwardToRippled(Context const& ctx)
|
||||
{
|
||||
auto request = ctx.params;
|
||||
|
||||
if (isClioOnly(ctx.method))
|
||||
return false;
|
||||
|
||||
if (forwardCommands.find(ctx.method) != forwardCommands.end())
|
||||
return true;
|
||||
|
||||
if (request.contains("ledger_index"))
|
||||
{
|
||||
auto indexValue = request.at("ledger_index");
|
||||
if (indexValue.is_string())
|
||||
{
|
||||
std::string index = indexValue.as_string().c_str();
|
||||
return index == "current" || index == "closed";
|
||||
}
|
||||
}
|
||||
if (specifiesCurrentOrClosedLedger(request))
|
||||
return true;
|
||||
|
||||
if (ctx.method == "account_info" && request.contains("queue") &&
|
||||
request.at("queue").as_bool())
|
||||
@@ -198,10 +338,7 @@ buildResponse(Context const& ctx)
|
||||
ctx.counters.rpcForwarded(ctx.method);
|
||||
|
||||
if (!res)
|
||||
return Status{Error::rpcFAILED_TO_FORWARD};
|
||||
|
||||
if (res->contains("result") && res->at("result").is_object())
|
||||
return res->at("result").as_object();
|
||||
return Status{RippledError::rpcFAILED_TO_FORWARD};
|
||||
|
||||
return *res;
|
||||
}
|
||||
@@ -209,33 +346,50 @@ buildResponse(Context const& ctx)
|
||||
if (ctx.method == "ping")
|
||||
return boost::json::object{};
|
||||
|
||||
if (handlerTable.find(ctx.method) == handlerTable.end())
|
||||
return Status{Error::rpcUNKNOWN_COMMAND};
|
||||
if (ctx.backend->isTooBusy())
|
||||
{
|
||||
gLog.error() << "Database is too busy. Rejecting request";
|
||||
return Status{RippledError::rpcTOO_BUSY};
|
||||
}
|
||||
|
||||
auto method = handlerTable[ctx.method];
|
||||
auto method = handlerTable.getHandler(ctx.method);
|
||||
|
||||
if (!method)
|
||||
return Status{RippledError::rpcUNKNOWN_COMMAND};
|
||||
|
||||
try
|
||||
{
|
||||
auto v = method(ctx);
|
||||
gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method
|
||||
<< '`';
|
||||
auto v = (*method)(ctx);
|
||||
gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method
|
||||
<< '`';
|
||||
|
||||
if (auto object = std::get_if<boost::json::object>(&v))
|
||||
(*object)["validated"] = true;
|
||||
if (auto object = get_if<boost::json::object>(&v);
|
||||
object && not shouldSuppressValidatedFlag(ctx))
|
||||
{
|
||||
(*object)[JS(validated)] = true;
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
catch (InvalidParamsError const& err)
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, err.what()};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, err.what()};
|
||||
}
|
||||
catch (AccountNotFoundError const& err)
|
||||
{
|
||||
return Status{Error::rpcACT_NOT_FOUND, err.what()};
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, err.what()};
|
||||
}
|
||||
catch (std::exception const& err)
|
||||
catch (Backend::DatabaseTimeout const& t)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " caught exception : " << err.what();
|
||||
return Status{Error::rpcINTERNAL, err.what()};
|
||||
gLog.error() << "Database timeout";
|
||||
return Status{RippledError::rpcTOO_BUSY};
|
||||
}
|
||||
catch (exception const& err)
|
||||
{
|
||||
gLog.error() << ctx.tag() << " caught exception: " << err.what();
|
||||
return Status{RippledError::rpcINTERNAL};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
141
src/rpc/RPC.h
141
src/rpc/RPC.h
@@ -1,14 +1,37 @@
|
||||
#ifndef REPORTING_RPC_H_INCLUDED
|
||||
#define REPORTING_RPC_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/Counters.h>
|
||||
#include <rpc/Errors.h>
|
||||
#include <util/Taggable.h>
|
||||
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
|
||||
#include <optional>
|
||||
#include <rpc/Counters.h>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
/*
|
||||
* This file contains various classes necessary for executing RPC handlers.
|
||||
* Context gives the handlers access to various other parts of the application
|
||||
@@ -27,8 +50,9 @@ class ReportingETL;
|
||||
|
||||
namespace RPC {
|
||||
|
||||
struct Context
|
||||
struct Context : public util::Taggable
|
||||
{
|
||||
clio::Logger perfLog_{"Performance"};
|
||||
boost::asio::yield_context& yield;
|
||||
std::string method;
|
||||
std::uint32_t version;
|
||||
@@ -55,25 +79,11 @@ struct Context
|
||||
std::shared_ptr<ETLLoadBalancer> const& balancer_,
|
||||
std::shared_ptr<ReportingETL const> const& etl_,
|
||||
std::shared_ptr<WsBase> const& session_,
|
||||
util::TagDecoratorFactory const& tagFactory_,
|
||||
Backend::LedgerRange const& range_,
|
||||
Counters& counters_,
|
||||
std::string const& clientIp_)
|
||||
: yield(yield_)
|
||||
, method(command_)
|
||||
, version(version_)
|
||||
, params(params_)
|
||||
, backend(backend_)
|
||||
, subscriptions(subscriptions_)
|
||||
, balancer(balancer_)
|
||||
, etl(etl_)
|
||||
, session(session_)
|
||||
, range(range_)
|
||||
, counters(counters_)
|
||||
, clientIp(clientIp_)
|
||||
{
|
||||
}
|
||||
std::string const& clientIp_);
|
||||
};
|
||||
using Error = ripple::error_code_i;
|
||||
|
||||
struct AccountCursor
|
||||
{
|
||||
@@ -81,84 +91,20 @@ struct AccountCursor
|
||||
std::uint32_t hint;
|
||||
|
||||
std::string
|
||||
toString()
|
||||
toString() const
|
||||
{
|
||||
return ripple::strHex(index) + "," + std::to_string(hint);
|
||||
}
|
||||
|
||||
bool
|
||||
isNonZero()
|
||||
isNonZero() const
|
||||
{
|
||||
return index.isNonZero() || hint != 0;
|
||||
}
|
||||
};
|
||||
|
||||
struct Status
|
||||
{
|
||||
Error error = Error::rpcSUCCESS;
|
||||
std::string strCode = "";
|
||||
std::string message = "";
|
||||
|
||||
Status(){};
|
||||
|
||||
Status(Error error_) : error(error_){};
|
||||
|
||||
Status(Error error_, std::string message_)
|
||||
: error(error_), message(message_)
|
||||
{
|
||||
}
|
||||
Status(Error error_, std::string strCode_, std::string message_)
|
||||
: error(error_), strCode(strCode_), message(message_)
|
||||
{
|
||||
}
|
||||
|
||||
/** Returns true if the Status is *not* OK. */
|
||||
operator bool() const
|
||||
{
|
||||
return error != Error::rpcSUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
static Status OK;
|
||||
|
||||
using Result = std::variant<Status, boost::json::object>;
|
||||
|
||||
class InvalidParamsError : public std::exception
|
||||
{
|
||||
std::string msg;
|
||||
|
||||
public:
|
||||
InvalidParamsError(std::string const& msg) : msg(msg)
|
||||
{
|
||||
}
|
||||
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return msg.c_str();
|
||||
}
|
||||
};
|
||||
class AccountNotFoundError : public std::exception
|
||||
{
|
||||
std::string account;
|
||||
|
||||
public:
|
||||
AccountNotFoundError(std::string const& acct) : account(acct)
|
||||
{
|
||||
}
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return account.c_str();
|
||||
}
|
||||
};
|
||||
|
||||
boost::json::object
|
||||
make_error(Status const& status);
|
||||
|
||||
boost::json::object
|
||||
make_error(Error err);
|
||||
|
||||
std::optional<Context>
|
||||
make_WsContext(
|
||||
boost::asio::yield_context& yc,
|
||||
@@ -168,6 +114,7 @@ make_WsContext(
|
||||
std::shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
std::shared_ptr<ReportingETL const> const& etl,
|
||||
std::shared_ptr<WsBase> const& session,
|
||||
util::TagDecoratorFactory const& tagFactory,
|
||||
Backend::LedgerRange const& range,
|
||||
Counters& counters,
|
||||
std::string const& clientIp);
|
||||
@@ -180,6 +127,7 @@ make_HttpContext(
|
||||
std::shared_ptr<SubscriptionManager> const& subscriptions,
|
||||
std::shared_ptr<ETLLoadBalancer> const& balancer,
|
||||
std::shared_ptr<ReportingETL const> const& etl,
|
||||
util::TagDecoratorFactory const& tagFactory,
|
||||
Backend::LedgerRange const& range,
|
||||
Counters& counters,
|
||||
std::string const& clientIp);
|
||||
@@ -190,24 +138,29 @@ buildResponse(Context const& ctx);
|
||||
bool
|
||||
validHandler(std::string const& method);
|
||||
|
||||
bool
|
||||
isClioOnly(std::string const& method);
|
||||
|
||||
Status
|
||||
getLimit(RPC::Context const& context, std::uint32_t& limit);
|
||||
|
||||
template <class T>
|
||||
void
|
||||
logDuration(Context const& ctx, T const& dur)
|
||||
{
|
||||
static clio::Logger log{"RPC"};
|
||||
std::stringstream ss;
|
||||
ss << "Request processing duration = "
|
||||
ss << ctx.tag() << "Request processing duration = "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(dur).count()
|
||||
<< " milliseconds. request = " << ctx.params;
|
||||
auto seconds =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(dur).count();
|
||||
if (seconds > 10)
|
||||
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||
log.error() << ss.str();
|
||||
else if (seconds > 1)
|
||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||
log.warn() << ss.str();
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(debug) << ss.str();
|
||||
log.info() << ss.str();
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
|
||||
#endif // REPORTING_RPC_H_INCLUDED
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,24 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef XRPL_REPORTING_RPCHELPERS_H_INCLUDED
|
||||
#define XRPL_REPORTING_RPCHELPERS_H_INCLUDED
|
||||
/*
|
||||
* This file contains a variety of utility functions used when executing
|
||||
* the handlers
|
||||
@@ -14,11 +32,16 @@
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPC.h>
|
||||
|
||||
// Useful macro for borrowing from ripple::jss
|
||||
// static strings. (J)son (S)trings
|
||||
#define JS(x) ripple::jss::x.c_str()
|
||||
|
||||
// Access (SF)ield name (S)trings
|
||||
#define SFS(x) ripple::x.jsonName.c_str()
|
||||
|
||||
namespace RPC {
|
||||
std::optional<ripple::AccountID>
|
||||
accountFromStringStrict(std::string const& account);
|
||||
std::optional<ripple::AccountID>
|
||||
accountFromSeed(std::string const& account);
|
||||
|
||||
bool
|
||||
isOwnedByAccount(ripple::SLE const& sle, ripple::AccountID const& accountID);
|
||||
@@ -31,7 +54,6 @@ parseAccountCursor(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t seq,
|
||||
std::optional<std::string> jsonCursor,
|
||||
ripple::AccountID const& accountID,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
// TODO this function should probably be in a different file and namespace
|
||||
@@ -55,7 +77,8 @@ bool
|
||||
insertDeliveredAmount(
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta);
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta,
|
||||
uint32_t date);
|
||||
|
||||
boost::json::object
|
||||
toJson(ripple::STBase const& obj);
|
||||
@@ -91,7 +114,25 @@ traverseOwnedNodes(
|
||||
std::uint32_t limit,
|
||||
std::optional<std::string> jsonCursor,
|
||||
boost::asio::yield_context& yield,
|
||||
std::function<void(ripple::SLE)> atOwnedNode);
|
||||
std::function<void(ripple::SLE&&)> atOwnedNode);
|
||||
|
||||
std::variant<Status, AccountCursor>
|
||||
traverseOwnedNodes(
|
||||
BackendInterface const& backend,
|
||||
ripple::Keylet const& owner,
|
||||
ripple::uint256 const& hexMarker,
|
||||
std::uint32_t const startHint,
|
||||
std::uint32_t sequence,
|
||||
std::uint32_t limit,
|
||||
std::optional<std::string> jsonCursor,
|
||||
boost::asio::yield_context& yield,
|
||||
std::function<void(ripple::SLE&&)> atOwnedNode);
|
||||
|
||||
std::shared_ptr<ripple::SLE const>
|
||||
read(
|
||||
ripple::Keylet const& keylet,
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
Context const& context);
|
||||
|
||||
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
|
||||
keypairFromRequst(boost::json::object const& request);
|
||||
@@ -200,5 +241,52 @@ getString(
|
||||
boost::json::object const& request,
|
||||
std::string const& field,
|
||||
std::string dfault);
|
||||
|
||||
Status
|
||||
getHexMarker(boost::json::object const& request, ripple::uint256& marker);
|
||||
|
||||
Status
|
||||
getAccount(boost::json::object const& request, ripple::AccountID& accountId);
|
||||
|
||||
Status
|
||||
getAccount(
|
||||
boost::json::object const& request,
|
||||
ripple::AccountID& destAccount,
|
||||
boost::string_view const& field);
|
||||
|
||||
Status
|
||||
getOptionalAccount(
|
||||
boost::json::object const& request,
|
||||
std::optional<ripple::AccountID>& account,
|
||||
boost::string_view const& field);
|
||||
|
||||
Status
|
||||
getTaker(boost::json::object const& request, ripple::AccountID& takerID);
|
||||
|
||||
Status
|
||||
getChannelId(boost::json::object const& request, ripple::uint256& channelId);
|
||||
|
||||
bool
|
||||
specifiesCurrentOrClosedLedger(boost::json::object const& request);
|
||||
|
||||
std::variant<ripple::uint256, Status>
|
||||
getNFTID(boost::json::object const& request);
|
||||
|
||||
// This function is the driver for both `account_tx` and `nft_tx` and should
|
||||
// be used for any future transaction enumeration APIs.
|
||||
std::variant<Status, boost::json::object>
|
||||
traverseTransactions(
|
||||
Context const& context,
|
||||
std::function<Backend::TransactionsAndCursor(
|
||||
std::shared_ptr<Backend::BackendInterface const> const& backend,
|
||||
std::uint32_t const,
|
||||
bool const,
|
||||
std::optional<Backend::TransactionsCursor> const&,
|
||||
boost::asio::yield_context& yield)> transactionFetcher);
|
||||
|
||||
[[nodiscard]] boost::json::object const
|
||||
computeBookChanges(
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
std::vector<Backend::TransactionAndMetadata> const& transactions);
|
||||
|
||||
} // namespace RPC
|
||||
#endif
|
||||
|
||||
30
src/rpc/WorkQueue.cpp
Normal file
30
src/rpc/WorkQueue.cpp
Normal file
@@ -0,0 +1,30 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <rpc/WorkQueue.h>
|
||||
|
||||
WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize)
|
||||
{
|
||||
if (maxSize != 0)
|
||||
maxSize_ = maxSize;
|
||||
while (--numWorkers)
|
||||
{
|
||||
threads_.emplace_back([this] { ioc_.run(); });
|
||||
}
|
||||
}
|
||||
97
src/rpc/WorkQueue.h
Normal file
97
src/rpc/WorkQueue.h
Normal file
@@ -0,0 +1,97 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <shared_mutex>
|
||||
#include <thread>
|
||||
|
||||
class WorkQueue
|
||||
{
|
||||
// these are cumulative for the lifetime of the process
|
||||
std::atomic_uint64_t queued_ = 0;
|
||||
std::atomic_uint64_t durationUs_ = 0;
|
||||
|
||||
std::atomic_uint64_t curSize_ = 0;
|
||||
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
|
||||
clio::Logger log_{"RPC"};
|
||||
|
||||
public:
|
||||
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
|
||||
|
||||
template <typename F>
|
||||
bool
|
||||
postCoro(F&& f, bool isWhiteListed)
|
||||
{
|
||||
if (curSize_ >= maxSize_ && !isWhiteListed)
|
||||
{
|
||||
log_.warn() << "Queue is full. rejecting job. current size = "
|
||||
<< curSize_ << " max size = " << maxSize_;
|
||||
return false;
|
||||
}
|
||||
++curSize_;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
// Each time we enqueue a job, we want to post a symmetrical job that
|
||||
// will dequeue and run the job at the front of the job queue.
|
||||
boost::asio::spawn(
|
||||
ioc_,
|
||||
[this, f = std::move(f), start](boost::asio::yield_context yield) {
|
||||
auto run = std::chrono::system_clock::now();
|
||||
auto wait =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
run - start)
|
||||
.count();
|
||||
// increment queued_ here, in the same place we implement
|
||||
// durationUs_
|
||||
++queued_;
|
||||
durationUs_ += wait;
|
||||
log_.info() << "WorkQueue wait time = " << wait
|
||||
<< " queue size = " << curSize_;
|
||||
f(yield);
|
||||
--curSize_;
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
report() const
|
||||
{
|
||||
boost::json::object obj;
|
||||
obj["queued"] = queued_;
|
||||
obj["queued_duration_us"] = durationUs_;
|
||||
obj["current_queue_size"] = curSize_;
|
||||
obj["max_queue_size"] = maxSize_;
|
||||
return obj;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::thread> threads_ = {};
|
||||
|
||||
boost::asio::io_context ioc_ = {};
|
||||
std::optional<boost::asio::io_context::work> work_{ioc_};
|
||||
};
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
@@ -8,7 +27,7 @@
|
||||
#include <algorithm>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/Pg.h>
|
||||
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
namespace RPC {
|
||||
@@ -17,27 +36,27 @@ void
|
||||
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
|
||||
{
|
||||
boost::json::object jDst;
|
||||
jDst["channel_id"] = ripple::to_string(line.key());
|
||||
jDst["account"] = ripple::to_string(line.getAccountID(ripple::sfAccount));
|
||||
jDst["destination_account"] =
|
||||
jDst[JS(channel_id)] = ripple::to_string(line.key());
|
||||
jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount));
|
||||
jDst[JS(destination_account)] =
|
||||
ripple::to_string(line.getAccountID(ripple::sfDestination));
|
||||
jDst["amount"] = line[ripple::sfAmount].getText();
|
||||
jDst["balance"] = line[ripple::sfBalance].getText();
|
||||
jDst[JS(amount)] = line[ripple::sfAmount].getText();
|
||||
jDst[JS(balance)] = line[ripple::sfBalance].getText();
|
||||
if (publicKeyType(line[ripple::sfPublicKey]))
|
||||
{
|
||||
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
|
||||
jDst["public_key"] = toBase58(ripple::TokenType::AccountPublic, pk);
|
||||
jDst["public_key_hex"] = strHex(pk);
|
||||
jDst[JS(public_key)] = toBase58(ripple::TokenType::AccountPublic, pk);
|
||||
jDst[JS(public_key_hex)] = strHex(pk);
|
||||
}
|
||||
jDst["settle_delay"] = line[ripple::sfSettleDelay];
|
||||
jDst[JS(settle_delay)] = line[ripple::sfSettleDelay];
|
||||
if (auto const& v = line[~ripple::sfExpiration])
|
||||
jDst["expiration"] = *v;
|
||||
jDst[JS(expiration)] = *v;
|
||||
if (auto const& v = line[~ripple::sfCancelAfter])
|
||||
jDst["cancel_after"] = *v;
|
||||
jDst[JS(cancel_after)] = *v;
|
||||
if (auto const& v = line[~ripple::sfSourceTag])
|
||||
jDst["source_tag"] = *v;
|
||||
jDst[JS(source_tag)] = *v;
|
||||
if (auto const& v = line[~ripple::sfDestinationTag])
|
||||
jDst["destination_tag"] = *v;
|
||||
jDst[JS(destination_tag)] = *v;
|
||||
|
||||
jsonLines.push_back(jDst);
|
||||
}
|
||||
@@ -54,66 +73,46 @@ doAccountChannels(Context const& context)
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
auto rawAcct = context.backend->fetchLedgerObject(
|
||||
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
if (!rawAcct)
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
ripple::AccountID destAccount;
|
||||
if (auto const status =
|
||||
getAccount(request, destAccount, JS(destination_account));
|
||||
status)
|
||||
return status;
|
||||
|
||||
std::optional<ripple::AccountID> destAccount = {};
|
||||
if (request.contains("destination_account"))
|
||||
{
|
||||
if (!request.at("destination_account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "destinationNotString"};
|
||||
|
||||
destAccount = accountFromStringStrict(
|
||||
request.at("destination_account").as_string().c_str());
|
||||
|
||||
if (!destAccount)
|
||||
return Status{Error::rpcINVALID_PARAMS, "destinationMalformed"};
|
||||
}
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
}
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
std::optional<std::string> marker = {};
|
||||
if (request.contains("marker"))
|
||||
if (request.contains(JS(marker)))
|
||||
{
|
||||
if (!request.at("marker").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||
if (!request.at(JS(marker)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
|
||||
|
||||
marker = request.at("marker").as_string().c_str();
|
||||
marker = request.at(JS(marker)).as_string().c_str();
|
||||
}
|
||||
|
||||
response["account"] = ripple::to_string(*accountID);
|
||||
response["channels"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonChannels = response.at("channels").as_array();
|
||||
response[JS(account)] = ripple::to_string(accountID);
|
||||
response[JS(channels)] = boost::json::value(boost::json::array_kind);
|
||||
response[JS(limit)] = limit;
|
||||
boost::json::array& jsonChannels = response.at(JS(channels)).as_array();
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) {
|
||||
if (sle.getType() == ripple::ltPAYCHAN &&
|
||||
sle.getAccountID(ripple::sfAccount) == *accountID &&
|
||||
sle.getAccountID(ripple::sfAccount) == accountID &&
|
||||
(!destAccount ||
|
||||
*destAccount == sle.getAccountID(ripple::sfDestination)))
|
||||
destAccount == sle.getAccountID(ripple::sfDestination)))
|
||||
{
|
||||
if (limit-- == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
addChannel(jsonChannels, sle);
|
||||
}
|
||||
|
||||
@@ -122,23 +121,23 @@ doAccountChannels(Context const& context)
|
||||
|
||||
auto next = traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
limit,
|
||||
marker,
|
||||
context.yield,
|
||||
addToResponse);
|
||||
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
if (auto status = std::get_if<RPC::Status>(&next))
|
||||
return *status;
|
||||
|
||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
||||
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||
|
||||
if (nextCursor.isNonZero())
|
||||
response["marker"] = nextCursor.toString();
|
||||
if (nextMarker.isNonZero())
|
||||
response[JS(marker)] = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
@@ -24,20 +43,18 @@ doAccountCurrencies(Context const& context)
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
auto rawAcct = context.backend->fetchLedgerObject(
|
||||
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
if (!rawAcct)
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||
|
||||
std::set<std::string> send, receive;
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) {
|
||||
if (sle.getType() == ripple::ltRIPPLE_STATE)
|
||||
{
|
||||
ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance);
|
||||
@@ -61,26 +78,26 @@ doAccountCurrencies(Context const& context)
|
||||
|
||||
traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
std::numeric_limits<std::uint32_t>::max(),
|
||||
{},
|
||||
context.yield,
|
||||
addToResponse);
|
||||
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
response["receive_currencies"] =
|
||||
response[JS(receive_currencies)] =
|
||||
boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonReceive =
|
||||
response.at("receive_currencies").as_array();
|
||||
response.at(JS(receive_currencies)).as_array();
|
||||
|
||||
for (auto const& currency : receive)
|
||||
jsonReceive.push_back(currency.c_str());
|
||||
|
||||
response["send_currencies"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonSend = response.at("send_currencies").as_array();
|
||||
response[JS(send_currencies)] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonSend = response.at(JS(send_currencies)).as_array();
|
||||
|
||||
for (auto const& currency : send)
|
||||
jsonSend.push_back(currency.c_str());
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/json.hpp>
|
||||
@@ -29,12 +48,12 @@ doAccountInfo(Context const& context)
|
||||
boost::json::object response = {};
|
||||
|
||||
std::string strIdent;
|
||||
if (request.contains("account"))
|
||||
strIdent = request.at("account").as_string().c_str();
|
||||
else if (request.contains("ident"))
|
||||
strIdent = request.at("ident").as_string().c_str();
|
||||
if (request.contains(JS(account)))
|
||||
strIdent = request.at(JS(account)).as_string().c_str();
|
||||
else if (request.contains(JS(ident)))
|
||||
strIdent = request.at(JS(ident)).as_string().c_str();
|
||||
else
|
||||
return Status{Error::rpcACT_MALFORMED};
|
||||
return Status{RippledError::rpcACT_MALFORMED};
|
||||
|
||||
// We only need to fetch the ledger header because the ledger hash is
|
||||
// supposed to be included in the response. The ledger sequence is specified
|
||||
@@ -48,41 +67,28 @@ doAccountInfo(Context const& context)
|
||||
// Get info on account.
|
||||
auto accountID = accountFromStringStrict(strIdent);
|
||||
if (!accountID)
|
||||
return Status{Error::rpcACT_MALFORMED};
|
||||
|
||||
assert(accountID.has_value());
|
||||
return Status{RippledError::rpcACT_MALFORMED};
|
||||
|
||||
auto key = ripple::keylet::account(accountID.value());
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
std::optional<std::vector<unsigned char>> dbResponse =
|
||||
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
if (!dbResponse)
|
||||
{
|
||||
return Status{Error::rpcACT_NOT_FOUND};
|
||||
}
|
||||
return Status{RippledError::rpcACT_NOT_FOUND};
|
||||
|
||||
ripple::STLedgerEntry sle{
|
||||
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key};
|
||||
|
||||
if (!key.check(sle))
|
||||
return Status{Error::rpcDB_DESERIALIZATION};
|
||||
return Status{RippledError::rpcDB_DESERIALIZATION};
|
||||
|
||||
// if (!binary)
|
||||
// response["account_data"] = getJson(sle);
|
||||
// else
|
||||
// response["account_data"] = ripple::strHex(*dbResponse);
|
||||
// response["db_time"] = time;
|
||||
|
||||
response["account_data"] = toJson(sle);
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(account_data)] = toJson(sle);
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
// Return SignerList(s) if that is requested.
|
||||
if (request.contains("signer_lists") &&
|
||||
request.at("signer_lists").as_bool())
|
||||
if (request.contains(JS(signer_lists)) &&
|
||||
request.at(JS(signer_lists)).as_bool())
|
||||
{
|
||||
// We put the SignerList in an array because of an anticipated
|
||||
// future when we support multiple signer lists on one account.
|
||||
@@ -99,12 +105,12 @@ doAccountInfo(Context const& context)
|
||||
ripple::SerialIter{signers->data(), signers->size()},
|
||||
signersKey.key};
|
||||
if (!signersKey.check(sleSigners))
|
||||
return Status{Error::rpcDB_DESERIALIZATION};
|
||||
return Status{RippledError::rpcDB_DESERIALIZATION};
|
||||
|
||||
signerList.push_back(toJson(sleSigners));
|
||||
}
|
||||
|
||||
response["account_data"].as_object()["signer_lists"] =
|
||||
response[JS(account_data)].as_object()[JS(signer_lists)] =
|
||||
std::move(signerList);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/app/paths/TrustLine.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
@@ -39,7 +58,7 @@ addLine(
|
||||
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
|
||||
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
|
||||
|
||||
if (peerAccount and peerAccount != lineAccountIDPeer)
|
||||
if (peerAccount && peerAccount != lineAccountIDPeer)
|
||||
return;
|
||||
|
||||
if (!viewLowest)
|
||||
@@ -64,25 +83,25 @@ addLine(
|
||||
ripple::STAmount const& saLimitPeer(lineLimitPeer);
|
||||
|
||||
boost::json::object jPeer;
|
||||
jPeer["account"] = ripple::to_string(lineAccountIDPeer);
|
||||
jPeer["balance"] = saBalance.getText();
|
||||
jPeer["currency"] = ripple::to_string(saBalance.issue().currency);
|
||||
jPeer["limit"] = saLimit.getText();
|
||||
jPeer["limit_peer"] = saLimitPeer.getText();
|
||||
jPeer["quality_in"] = lineQualityIn;
|
||||
jPeer["quality_out"] = lineQualityOut;
|
||||
jPeer[JS(account)] = ripple::to_string(lineAccountIDPeer);
|
||||
jPeer[JS(balance)] = saBalance.getText();
|
||||
jPeer[JS(currency)] = ripple::to_string(saBalance.issue().currency);
|
||||
jPeer[JS(limit)] = saLimit.getText();
|
||||
jPeer[JS(limit_peer)] = saLimitPeer.getText();
|
||||
jPeer[JS(quality_in)] = lineQualityIn;
|
||||
jPeer[JS(quality_out)] = lineQualityOut;
|
||||
if (lineAuth)
|
||||
jPeer["authorized"] = true;
|
||||
jPeer[JS(authorized)] = true;
|
||||
if (lineAuthPeer)
|
||||
jPeer["peer_authorized"] = true;
|
||||
jPeer[JS(peer_authorized)] = true;
|
||||
if (lineNoRipple || !lineDefaultRipple)
|
||||
jPeer["no_ripple"] = lineNoRipple;
|
||||
jPeer[JS(no_ripple)] = lineNoRipple;
|
||||
if (lineNoRipple || !lineDefaultRipple)
|
||||
jPeer["no_ripple_peer"] = lineNoRipplePeer;
|
||||
jPeer[JS(no_ripple_peer)] = lineNoRipplePeer;
|
||||
if (lineFreeze)
|
||||
jPeer["freeze"] = true;
|
||||
jPeer[JS(freeze)] = true;
|
||||
if (lineFreezePeer)
|
||||
jPeer["freeze_peer"] = true;
|
||||
jPeer[JS(freeze_peer)] = true;
|
||||
|
||||
jsonLines.push_back(jPeer);
|
||||
}
|
||||
@@ -99,82 +118,91 @@ doAccountLines(Context const& context)
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
auto rawAcct = context.backend->fetchLedgerObject(
|
||||
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
if (!rawAcct)
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||
|
||||
std::optional<ripple::AccountID> peerAccount;
|
||||
if (request.contains("peer"))
|
||||
if (auto const status = getOptionalAccount(request, peerAccount, JS(peer));
|
||||
status)
|
||||
return status;
|
||||
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
std::optional<std::string> marker = {};
|
||||
if (request.contains(JS(marker)))
|
||||
{
|
||||
if (!request.at("peer").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "peerNotString"};
|
||||
if (not request.at(JS(marker)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
|
||||
|
||||
peerAccount =
|
||||
accountFromStringStrict(request.at("peer").as_string().c_str());
|
||||
|
||||
if (!peerAccount)
|
||||
return Status{Error::rpcINVALID_PARAMS, "peerMalformed"};
|
||||
marker = request.at(JS(marker)).as_string().c_str();
|
||||
}
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
auto ignoreDefault = false;
|
||||
if (request.contains(JS(ignore_default)))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
if (not request.at(JS(ignore_default)).is_bool())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "ignoreDefaultNotBool"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
ignoreDefault = request.at(JS(ignore_default)).as_bool();
|
||||
}
|
||||
|
||||
std::optional<std::string> cursor = {};
|
||||
if (request.contains("marker"))
|
||||
{
|
||||
if (!request.at("marker").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||
response[JS(account)] = ripple::to_string(accountID);
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
response[JS(limit)] = limit;
|
||||
response[JS(lines)] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonLines = response.at(JS(lines)).as_array();
|
||||
|
||||
cursor = request.at("marker").as_string().c_str();
|
||||
}
|
||||
|
||||
response["account"] = ripple::to_string(*accountID);
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response["lines"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonLines = response.at("lines").as_array();
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) -> void {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) -> void {
|
||||
if (sle.getType() == ripple::ltRIPPLE_STATE)
|
||||
{
|
||||
addLine(jsonLines, sle, *accountID, peerAccount);
|
||||
auto ignore = false;
|
||||
if (ignoreDefault)
|
||||
{
|
||||
if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() ==
|
||||
accountID)
|
||||
ignore =
|
||||
!(sle.getFieldU32(ripple::sfFlags) &
|
||||
ripple::lsfLowReserve);
|
||||
else
|
||||
ignore =
|
||||
!(sle.getFieldU32(ripple::sfFlags) &
|
||||
ripple::lsfHighReserve);
|
||||
}
|
||||
|
||||
if (!ignore)
|
||||
addLine(jsonLines, sle, accountID, peerAccount);
|
||||
}
|
||||
};
|
||||
|
||||
auto next = traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
limit,
|
||||
cursor,
|
||||
marker,
|
||||
context.yield,
|
||||
addToResponse);
|
||||
|
||||
if (auto status = std::get_if<RPC::Status>(&next))
|
||||
return *status;
|
||||
|
||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
||||
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||
|
||||
if (nextCursor.isNonZero())
|
||||
response["marker"] = nextCursor.toString();
|
||||
if (nextMarker.isNonZero())
|
||||
response[JS(marker)] = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace RPC
|
||||
|
||||
@@ -1,10 +1,31 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/app/paths/TrustLine.h>
|
||||
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <ripple/protocol/nftPageMask.h>
|
||||
#include <boost/json.hpp>
|
||||
#include <algorithm>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
@@ -23,7 +44,113 @@ std::unordered_map<std::string, ripple::LedgerEntryType> types{
|
||||
{"escrow", ripple::ltESCROW},
|
||||
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
|
||||
{"check", ripple::ltCHECK},
|
||||
};
|
||||
{"nft_page", ripple::ltNFTOKEN_PAGE},
|
||||
{"nft_offer", ripple::ltNFTOKEN_OFFER}};
|
||||
|
||||
Result
|
||||
doAccountNFTs(Context const& context)
|
||||
{
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
auto v = ledgerInfoFromRequest(context);
|
||||
if (auto status = std::get_if<Status>(&v))
|
||||
return *status;
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!accountID)
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
|
||||
auto rawAcct = context.backend->fetchLedgerObject(
|
||||
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||
|
||||
if (!rawAcct)
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
ripple::uint256 marker;
|
||||
if (auto const status = getHexMarker(request, marker); status)
|
||||
return status;
|
||||
|
||||
response[JS(account)] = ripple::toBase58(accountID);
|
||||
response[JS(validated)] = true;
|
||||
response[JS(limit)] = limit;
|
||||
|
||||
std::uint32_t numPages = 0;
|
||||
response[JS(account_nfts)] = boost::json::value(boost::json::array_kind);
|
||||
auto& nfts = response.at(JS(account_nfts)).as_array();
|
||||
|
||||
// if a marker was passed, start at the page specified in marker. Else,
|
||||
// start at the max page
|
||||
auto const pageKey =
|
||||
marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker;
|
||||
|
||||
auto const blob =
|
||||
context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield);
|
||||
if (!blob)
|
||||
return response;
|
||||
std::optional<ripple::SLE const> page{
|
||||
ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}};
|
||||
|
||||
// Continue iteration from the current page
|
||||
while (page)
|
||||
{
|
||||
auto arr = page->getFieldArray(ripple::sfNFTokens);
|
||||
|
||||
for (auto const& o : arr)
|
||||
{
|
||||
ripple::uint256 const nftokenID = o[ripple::sfNFTokenID];
|
||||
|
||||
{
|
||||
nfts.push_back(
|
||||
toBoostJson(o.getJson(ripple::JsonOptions::none)));
|
||||
auto& obj = nfts.back().as_object();
|
||||
|
||||
// Pull out the components of the nft ID.
|
||||
obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID);
|
||||
obj[SFS(sfIssuer)] =
|
||||
to_string(ripple::nft::getIssuer(nftokenID));
|
||||
obj[SFS(sfNFTokenTaxon)] =
|
||||
ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID));
|
||||
obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID);
|
||||
|
||||
if (std::uint16_t xferFee = {
|
||||
ripple::nft::getTransferFee(nftokenID)})
|
||||
obj[SFS(sfTransferFee)] = xferFee;
|
||||
}
|
||||
}
|
||||
|
||||
++numPages;
|
||||
if (auto npm = (*page)[~ripple::sfPreviousPageMin])
|
||||
{
|
||||
auto const nextKey = ripple::Keylet(ripple::ltNFTOKEN_PAGE, *npm);
|
||||
if (numPages == limit)
|
||||
{
|
||||
response[JS(marker)] = to_string(nextKey.key);
|
||||
response[JS(limit)] = numPages;
|
||||
return response;
|
||||
}
|
||||
auto const nextBlob = context.backend->fetchLedgerObject(
|
||||
nextKey.key, lgrInfo.seq, context.yield);
|
||||
|
||||
page.emplace(ripple::SLE{
|
||||
ripple::SerialIter{nextBlob->data(), nextBlob->size()},
|
||||
nextKey.key});
|
||||
}
|
||||
else
|
||||
page.reset();
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
Result
|
||||
doAccountObjects(Context const& context)
|
||||
@@ -37,56 +164,42 @@ doAccountObjects(Context const& context)
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
}
|
||||
|
||||
std::optional<std::string> cursor = {};
|
||||
std::optional<std::string> marker = {};
|
||||
if (request.contains("marker"))
|
||||
{
|
||||
if (!request.at("marker").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
|
||||
|
||||
cursor = request.at("marker").as_string().c_str();
|
||||
marker = request.at("marker").as_string().c_str();
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerEntryType> objectType = {};
|
||||
if (request.contains("type"))
|
||||
if (request.contains(JS(type)))
|
||||
{
|
||||
if (!request.at("type").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "typeNotString"};
|
||||
if (!request.at(JS(type)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "typeNotString"};
|
||||
|
||||
std::string typeAsString = request.at("type").as_string().c_str();
|
||||
std::string typeAsString = request.at(JS(type)).as_string().c_str();
|
||||
if (types.find(typeAsString) == types.end())
|
||||
return Status{Error::rpcINVALID_PARAMS, "typeInvalid"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "typeInvalid"};
|
||||
|
||||
objectType = types[typeAsString];
|
||||
}
|
||||
|
||||
response["account"] = ripple::to_string(*accountID);
|
||||
response["account_objects"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonObjects = response.at("account_objects").as_array();
|
||||
response[JS(account)] = ripple::to_string(accountID);
|
||||
response[JS(account_objects)] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonObjects =
|
||||
response.at(JS(account_objects)).as_array();
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) {
|
||||
if (!objectType || objectType == sle.getType())
|
||||
{
|
||||
jsonObjects.push_back(toJson(sle));
|
||||
@@ -95,23 +208,22 @@ doAccountObjects(Context const& context)
|
||||
|
||||
auto next = traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
limit,
|
||||
cursor,
|
||||
marker,
|
||||
context.yield,
|
||||
addToResponse);
|
||||
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
if (auto status = std::get_if<RPC::Status>(&next))
|
||||
return *status;
|
||||
|
||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
||||
|
||||
if (nextCursor.isNonZero())
|
||||
response["marker"] = nextCursor.toString();
|
||||
auto const& nextMarker = std::get<RPC::AccountCursor>(next);
|
||||
if (nextMarker.isNonZero())
|
||||
response[JS(marker)] = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/app/paths/TrustLine.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
@@ -27,37 +46,39 @@ addOffer(boost::json::array& offersJson, ripple::SLE const& offer)
|
||||
|
||||
if (!takerPays.native())
|
||||
{
|
||||
obj["taker_pays"] = boost::json::value(boost::json::object_kind);
|
||||
boost::json::object& takerPaysJson = obj.at("taker_pays").as_object();
|
||||
obj[JS(taker_pays)] = boost::json::value(boost::json::object_kind);
|
||||
boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object();
|
||||
|
||||
takerPaysJson["value"] = takerPays.getText();
|
||||
takerPaysJson["currency"] = ripple::to_string(takerPays.getCurrency());
|
||||
takerPaysJson["issuer"] = ripple::to_string(takerPays.getIssuer());
|
||||
takerPaysJson[JS(value)] = takerPays.getText();
|
||||
takerPaysJson[JS(currency)] =
|
||||
ripple::to_string(takerPays.getCurrency());
|
||||
takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer());
|
||||
}
|
||||
else
|
||||
{
|
||||
obj["taker_pays"] = takerPays.getText();
|
||||
obj[JS(taker_pays)] = takerPays.getText();
|
||||
}
|
||||
|
||||
if (!takerGets.native())
|
||||
{
|
||||
obj["taker_gets"] = boost::json::value(boost::json::object_kind);
|
||||
boost::json::object& takerGetsJson = obj.at("taker_gets").as_object();
|
||||
obj[JS(taker_gets)] = boost::json::value(boost::json::object_kind);
|
||||
boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object();
|
||||
|
||||
takerGetsJson["value"] = takerGets.getText();
|
||||
takerGetsJson["currency"] = ripple::to_string(takerGets.getCurrency());
|
||||
takerGetsJson["issuer"] = ripple::to_string(takerGets.getIssuer());
|
||||
takerGetsJson[JS(value)] = takerGets.getText();
|
||||
takerGetsJson[JS(currency)] =
|
||||
ripple::to_string(takerGets.getCurrency());
|
||||
takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer());
|
||||
}
|
||||
else
|
||||
{
|
||||
obj["taker_gets"] = takerGets.getText();
|
||||
obj[JS(taker_gets)] = takerGets.getText();
|
||||
}
|
||||
|
||||
obj["seq"] = offer.getFieldU32(ripple::sfSequence);
|
||||
obj["flags"] = offer.getFieldU32(ripple::sfFlags);
|
||||
obj["quality"] = rate.getText();
|
||||
obj[JS(seq)] = offer.getFieldU32(ripple::sfSequence);
|
||||
obj[JS(flags)] = offer.getFieldU32(ripple::sfFlags);
|
||||
obj[JS(quality)] = rate.getText();
|
||||
if (offer.isFieldPresent(ripple::sfExpiration))
|
||||
obj["expiration"] = offer.getFieldU32(ripple::sfExpiration);
|
||||
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
|
||||
|
||||
offersJson.push_back(obj);
|
||||
};
|
||||
@@ -74,52 +95,39 @@ doAccountOffers(Context const& context)
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
auto rawAcct = context.backend->fetchLedgerObject(
|
||||
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
if (!rawAcct)
|
||||
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
std::optional<std::string> marker = {};
|
||||
if (request.contains(JS(marker)))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
if (!request.at(JS(marker)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
marker = request.at(JS(marker)).as_string().c_str();
|
||||
}
|
||||
|
||||
std::optional<std::string> cursor = {};
|
||||
if (request.contains("marker"))
|
||||
{
|
||||
if (!request.at("marker").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||
response[JS(account)] = ripple::to_string(accountID);
|
||||
response[JS(limit)] = limit;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
response[JS(offers)] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonLines = response.at(JS(offers)).as_array();
|
||||
|
||||
cursor = request.at("marker").as_string().c_str();
|
||||
}
|
||||
|
||||
response["account"] = ripple::to_string(*accountID);
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response["offers"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonLines = response.at("offers").as_array();
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) {
|
||||
if (sle.getType() == ripple::ltOFFER)
|
||||
{
|
||||
if (limit-- == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
addOffer(jsonLines, sle);
|
||||
}
|
||||
|
||||
@@ -128,22 +136,22 @@ doAccountOffers(Context const& context)
|
||||
|
||||
auto next = traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
limit,
|
||||
cursor,
|
||||
marker,
|
||||
context.yield,
|
||||
addToResponse);
|
||||
|
||||
if (auto status = std::get_if<RPC::Status>(&next))
|
||||
return *status;
|
||||
|
||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
||||
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||
|
||||
if (nextCursor.isNonZero())
|
||||
response["marker"] = nextCursor.toString();
|
||||
if (nextMarker.isNonZero())
|
||||
response[JS(marker)] = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace RPC
|
||||
|
||||
@@ -1,256 +1,67 @@
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/Pg.h>
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
|
||||
using boost::json::value_to;
|
||||
|
||||
Result
|
||||
doAccountTx(Context const& context)
|
||||
{
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(context.params, accountID); status)
|
||||
return status;
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
constexpr std::string_view outerFuncName = __func__;
|
||||
auto const maybeResponse = traverseTransactions(
|
||||
context,
|
||||
[&accountID, &outerFuncName](
|
||||
std::shared_ptr<Backend::BackendInterface const> const& backend,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<Backend::TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) {
|
||||
auto [txnsAndCursor, timeDiff] = util::timed([&]() {
|
||||
return backend->fetchAccountTransactions(
|
||||
accountID, limit, forward, cursorIn, yield);
|
||||
});
|
||||
gLog.info() << outerFuncName << " db fetch took " << timeDiff
|
||||
<< " milliseconds - num blobs = "
|
||||
<< txnsAndCursor.txns.size();
|
||||
return txnsAndCursor;
|
||||
});
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
|
||||
bool binary = false;
|
||||
if (request.contains("binary"))
|
||||
{
|
||||
if (!request.at("binary").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
|
||||
binary = request.at("binary").as_bool();
|
||||
}
|
||||
bool forward = false;
|
||||
if (request.contains("forward"))
|
||||
{
|
||||
if (!request.at("forward").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "forwardNotBool"};
|
||||
|
||||
forward = request.at("forward").as_bool();
|
||||
}
|
||||
|
||||
std::optional<Backend::AccountTransactionsCursor> cursor;
|
||||
|
||||
if (request.contains("marker"))
|
||||
{
|
||||
auto const& obj = request.at("marker").as_object();
|
||||
|
||||
std::optional<std::uint32_t> transactionIndex = {};
|
||||
if (obj.contains("seq"))
|
||||
{
|
||||
if (!obj.at("seq").is_int64())
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "transactionIndexNotInt"};
|
||||
|
||||
transactionIndex =
|
||||
boost::json::value_to<std::uint32_t>(obj.at("seq"));
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t> ledgerIndex = {};
|
||||
if (obj.contains("ledger"))
|
||||
{
|
||||
if (!obj.at("ledger").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotInt"};
|
||||
|
||||
ledgerIndex =
|
||||
boost::json::value_to<std::uint32_t>(obj.at("ledger"));
|
||||
}
|
||||
|
||||
if (!transactionIndex || !ledgerIndex)
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingLedgerOrSeq"};
|
||||
|
||||
cursor = {*ledgerIndex, *transactionIndex};
|
||||
}
|
||||
|
||||
auto minIndex = context.range.minSequence;
|
||||
if (request.contains("ledger_index_min"))
|
||||
{
|
||||
auto& min = request.at("ledger_index_min");
|
||||
|
||||
if (!min.is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"};
|
||||
|
||||
if (min.as_int64() != -1)
|
||||
{
|
||||
if (context.range.maxSequence < min.as_int64() ||
|
||||
context.range.minSequence > min.as_int64())
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "ledgerSeqMaxOutOfRange"};
|
||||
else
|
||||
minIndex = value_to<std::uint32_t>(min);
|
||||
}
|
||||
|
||||
if (forward && !cursor)
|
||||
cursor = {minIndex, 0};
|
||||
}
|
||||
|
||||
auto maxIndex = context.range.maxSequence;
|
||||
if (request.contains("ledger_index_max"))
|
||||
{
|
||||
auto& max = request.at("ledger_index_max");
|
||||
|
||||
if (!max.is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"};
|
||||
|
||||
if (max.as_int64() != -1)
|
||||
{
|
||||
if (context.range.maxSequence < max.as_int64() ||
|
||||
context.range.minSequence > max.as_int64())
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "ledgerSeqMaxOutOfRange"};
|
||||
else
|
||||
maxIndex = value_to<std::uint32_t>(max);
|
||||
}
|
||||
|
||||
if (minIndex > maxIndex)
|
||||
return Status{Error::rpcINVALID_PARAMS, "invalidIndex"};
|
||||
|
||||
if (!forward && !cursor)
|
||||
cursor = {maxIndex, INT32_MAX};
|
||||
}
|
||||
|
||||
if (request.contains("ledger_index"))
|
||||
{
|
||||
if (!request.at("ledger_index").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotNumber"};
|
||||
|
||||
auto ledgerIndex =
|
||||
boost::json::value_to<std::uint32_t>(request.at("ledger_index"));
|
||||
maxIndex = minIndex = ledgerIndex;
|
||||
}
|
||||
|
||||
if (request.contains("ledger_hash"))
|
||||
{
|
||||
if (!request.at("ledger_hash").is_string())
|
||||
return RPC::Status{
|
||||
RPC::Error::rpcINVALID_PARAMS, "ledgerHashNotString"};
|
||||
|
||||
ripple::uint256 ledgerHash;
|
||||
if (!ledgerHash.parseHex(request.at("ledger_hash").as_string().c_str()))
|
||||
return RPC::Status{
|
||||
RPC::Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
|
||||
|
||||
auto lgrInfo =
|
||||
context.backend->fetchLedgerByHash(ledgerHash, context.yield);
|
||||
maxIndex = minIndex = lgrInfo->seq;
|
||||
}
|
||||
|
||||
if (!cursor)
|
||||
{
|
||||
if (forward)
|
||||
cursor = {minIndex, 0};
|
||||
else
|
||||
cursor = {maxIndex, INT32_MAX};
|
||||
}
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
|
||||
response["limit"] = limit;
|
||||
}
|
||||
|
||||
boost::json::array txns;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto [blobs, retCursor] = context.backend->fetchAccountTransactions(
|
||||
*accountID, limit, forward, cursor, context.yield);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
|
||||
<< ((end - start).count() / 1000000000.0)
|
||||
<< " num blobs = " << blobs.size();
|
||||
|
||||
response["account"] = ripple::to_string(*accountID);
|
||||
|
||||
if (retCursor)
|
||||
{
|
||||
boost::json::object cursorJson;
|
||||
cursorJson["ledger"] = retCursor->ledgerSequence;
|
||||
cursorJson["seq"] = retCursor->transactionIndex;
|
||||
response["marker"] = cursorJson;
|
||||
}
|
||||
|
||||
std::optional<size_t> maxReturnedIndex;
|
||||
std::optional<size_t> minReturnedIndex;
|
||||
for (auto const& txnPlusMeta : blobs)
|
||||
{
|
||||
if (txnPlusMeta.ledgerSequence < minIndex ||
|
||||
txnPlusMeta.ledgerSequence > maxIndex)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__
|
||||
<< " skipping over transactions from incomplete ledger";
|
||||
continue;
|
||||
}
|
||||
|
||||
boost::json::object obj;
|
||||
|
||||
if (!binary)
|
||||
{
|
||||
auto [txn, meta] = toExpandedJson(txnPlusMeta);
|
||||
obj["meta"] = meta;
|
||||
obj["tx"] = txn;
|
||||
obj["tx"].as_object()["ledger_index"] = txnPlusMeta.ledgerSequence;
|
||||
obj["tx"].as_object()["date"] = txnPlusMeta.date;
|
||||
}
|
||||
else
|
||||
{
|
||||
obj["meta"] = ripple::strHex(txnPlusMeta.metadata);
|
||||
obj["tx_blob"] = ripple::strHex(txnPlusMeta.transaction);
|
||||
obj["ledger_index"] = txnPlusMeta.ledgerSequence;
|
||||
obj["date"] = txnPlusMeta.date;
|
||||
}
|
||||
|
||||
txns.push_back(obj);
|
||||
if (!minReturnedIndex || txnPlusMeta.ledgerSequence < *minReturnedIndex)
|
||||
minReturnedIndex = txnPlusMeta.ledgerSequence;
|
||||
if (!maxReturnedIndex || txnPlusMeta.ledgerSequence > *maxReturnedIndex)
|
||||
maxReturnedIndex = txnPlusMeta.ledgerSequence;
|
||||
}
|
||||
|
||||
assert(cursor);
|
||||
if (forward)
|
||||
{
|
||||
response["ledger_index_min"] = cursor->ledgerSequence;
|
||||
if (blobs.size() >= limit)
|
||||
response["ledger_index_max"] = *maxReturnedIndex;
|
||||
else
|
||||
response["ledger_index_max"] = maxIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
response["ledger_index_max"] = cursor->ledgerSequence;
|
||||
if (blobs.size() >= limit)
|
||||
response["ledger_index_min"] = *minReturnedIndex;
|
||||
else
|
||||
response["ledger_index_min"] = minIndex;
|
||||
}
|
||||
|
||||
response["transactions"] = txns;
|
||||
|
||||
auto end2 = std::chrono::system_clock::now();
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "
|
||||
<< ((end2 - end).count() / 1000000000.0);
|
||||
if (auto const status = std::get_if<Status>(&maybeResponse); status)
|
||||
return *status;
|
||||
auto response = std::get<boost::json::object>(maybeResponse);
|
||||
|
||||
response[JS(account)] = ripple::to_string(accountID);
|
||||
return response;
|
||||
} // namespace RPC
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
|
||||
275
src/rpc/handlers/BookChanges.cpp
Normal file
275
src/rpc/handlers/BookChanges.cpp
Normal file
@@ -0,0 +1,275 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/basics/ToString.h>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <algorithm>
|
||||
|
||||
namespace json = boost::json;
|
||||
using namespace ripple;
|
||||
|
||||
namespace RPC {
|
||||
|
||||
/**
|
||||
* @brief Represents an entry in the book_changes' changes array.
|
||||
*/
|
||||
struct BookChange
|
||||
{
|
||||
STAmount sideAVolume;
|
||||
STAmount sideBVolume;
|
||||
STAmount highRate;
|
||||
STAmount lowRate;
|
||||
STAmount openRate;
|
||||
STAmount closeRate;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Encapsulates the book_changes computations and transformations.
|
||||
*/
|
||||
class BookChanges final
|
||||
{
|
||||
public:
|
||||
BookChanges() = delete; // only accessed via static handle function
|
||||
|
||||
/**
|
||||
* @brief Computes all book_changes for the given transactions.
|
||||
*
|
||||
* @param transactions The transactions to compute book changes for
|
||||
* @return std::vector<BookChange> Book changes
|
||||
*/
|
||||
[[nodiscard]] static std::vector<BookChange>
|
||||
compute(std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
return HandlerImpl{}(transactions);
|
||||
}
|
||||
|
||||
private:
|
||||
class HandlerImpl final
|
||||
{
|
||||
std::map<std::string, BookChange> tally_ = {};
|
||||
std::optional<uint32_t> offerCancel_ = {};
|
||||
|
||||
public:
|
||||
[[nodiscard]] std::vector<BookChange>
|
||||
operator()(
|
||||
std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
for (auto const& tx : transactions)
|
||||
handleBookChange(tx);
|
||||
|
||||
// TODO: rewrite this with std::ranges when compilers catch up
|
||||
std::vector<BookChange> changes;
|
||||
std::transform(
|
||||
std::make_move_iterator(std::begin(tally_)),
|
||||
std::make_move_iterator(std::end(tally_)),
|
||||
std::back_inserter(changes),
|
||||
[](auto obj) { return obj.second; });
|
||||
return changes;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
handleAffectedNode(STObject const& node)
|
||||
{
|
||||
auto const& metaType = node.getFName();
|
||||
auto const nodeType = node.getFieldU16(sfLedgerEntryType);
|
||||
|
||||
// we only care about ltOFFER objects being modified or
|
||||
// deleted
|
||||
if (nodeType != ltOFFER || metaType == sfCreatedNode)
|
||||
return;
|
||||
|
||||
// if either FF or PF are missing we can't compute
|
||||
// but generally these are cancelled rather than crossed
|
||||
// so skipping them is consistent
|
||||
if (!node.isFieldPresent(sfFinalFields) ||
|
||||
!node.isFieldPresent(sfPreviousFields))
|
||||
return;
|
||||
|
||||
auto const& finalFields =
|
||||
node.peekAtField(sfFinalFields).downcast<STObject>();
|
||||
auto const& previousFields =
|
||||
node.peekAtField(sfPreviousFields).downcast<STObject>();
|
||||
|
||||
// defensive case that should never be hit
|
||||
if (!finalFields.isFieldPresent(sfTakerGets) ||
|
||||
!finalFields.isFieldPresent(sfTakerPays) ||
|
||||
!previousFields.isFieldPresent(sfTakerGets) ||
|
||||
!previousFields.isFieldPresent(sfTakerPays))
|
||||
return;
|
||||
|
||||
// filter out any offers deleted by explicit offer cancels
|
||||
if (metaType == sfDeletedNode && offerCancel_ &&
|
||||
finalFields.getFieldU32(sfSequence) == *offerCancel_)
|
||||
return;
|
||||
|
||||
// compute the difference in gets and pays actually
|
||||
// affected onto the offer
|
||||
auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) -
|
||||
previousFields.getFieldAmount(sfTakerGets);
|
||||
auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) -
|
||||
previousFields.getFieldAmount(sfTakerPays);
|
||||
|
||||
transformAndStore(deltaGets, deltaPays);
|
||||
}
|
||||
|
||||
void
|
||||
transformAndStore(
|
||||
ripple::STAmount const& deltaGets,
|
||||
ripple::STAmount const& deltaPays)
|
||||
{
|
||||
auto const g = to_string(deltaGets.issue());
|
||||
auto const p = to_string(deltaPays.issue());
|
||||
|
||||
auto const noswap =
|
||||
isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p));
|
||||
|
||||
auto first = noswap ? deltaGets : deltaPays;
|
||||
auto second = noswap ? deltaPays : deltaGets;
|
||||
|
||||
// defensively programmed, should (probably) never happen
|
||||
if (second == beast::zero)
|
||||
return;
|
||||
|
||||
auto const rate = divide(first, second, noIssue());
|
||||
|
||||
if (first < beast::zero)
|
||||
first = -first;
|
||||
|
||||
if (second < beast::zero)
|
||||
second = -second;
|
||||
|
||||
auto const key = noswap ? (g + '|' + p) : (p + '|' + g);
|
||||
if (tally_.contains(key))
|
||||
{
|
||||
auto& entry = tally_.at(key);
|
||||
|
||||
entry.sideAVolume += first;
|
||||
entry.sideBVolume += second;
|
||||
|
||||
if (entry.highRate < rate)
|
||||
entry.highRate = rate;
|
||||
|
||||
if (entry.lowRate > rate)
|
||||
entry.lowRate = rate;
|
||||
|
||||
entry.closeRate = rate;
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: use paranthesized initialization when clang catches up
|
||||
tally_[key] = {
|
||||
first, // sideAVolume
|
||||
second, // sideBVolume
|
||||
rate, // highRate
|
||||
rate, // lowRate
|
||||
rate, // openRate
|
||||
rate, // closeRate
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleBookChange(Backend::TransactionAndMetadata const& blob)
|
||||
{
|
||||
auto const [tx, meta] = deserializeTxPlusMeta(blob);
|
||||
if (!tx || !meta || !tx->isFieldPresent(sfTransactionType))
|
||||
return;
|
||||
|
||||
offerCancel_ = shouldCancelOffer(tx);
|
||||
for (auto const& node : meta->getFieldArray(sfAffectedNodes))
|
||||
handleAffectedNode(node);
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
|
||||
{
|
||||
switch (tx->getFieldU16(sfTransactionType))
|
||||
{
|
||||
// in future if any other ways emerge to cancel an offer
|
||||
// this switch makes them easy to add
|
||||
case ttOFFER_CANCEL:
|
||||
case ttOFFER_CREATE:
|
||||
if (tx->isFieldPresent(sfOfferSequence))
|
||||
return tx->getFieldU32(sfOfferSequence);
|
||||
default:
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
void
|
||||
tag_invoke(json::value_from_tag, json::value& jv, BookChange const& change)
|
||||
{
|
||||
auto amountStr = [](STAmount const& amount) -> std::string {
|
||||
return isXRP(amount) ? to_string(amount.xrp())
|
||||
: to_string(amount.iou());
|
||||
};
|
||||
|
||||
auto currencyStr = [](STAmount const& amount) -> std::string {
|
||||
return isXRP(amount) ? "XRP_drops" : to_string(amount.issue());
|
||||
};
|
||||
|
||||
jv = {
|
||||
{JS(currency_a), currencyStr(change.sideAVolume)},
|
||||
{JS(currency_b), currencyStr(change.sideBVolume)},
|
||||
{JS(volume_a), amountStr(change.sideAVolume)},
|
||||
{JS(volume_b), amountStr(change.sideBVolume)},
|
||||
{JS(high), to_string(change.highRate.iou())},
|
||||
{JS(low), to_string(change.lowRate.iou())},
|
||||
{JS(open), to_string(change.openRate.iou())},
|
||||
{JS(close), to_string(change.closeRate.iou())},
|
||||
};
|
||||
}
|
||||
|
||||
json::object const
|
||||
computeBookChanges(
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
return {
|
||||
{JS(type), "bookChanges"},
|
||||
{JS(ledger_index), lgrInfo.seq},
|
||||
{JS(ledger_hash), to_string(lgrInfo.hash)},
|
||||
{JS(ledger_time), lgrInfo.closeTime.time_since_epoch().count()},
|
||||
{JS(changes), json::value_from(BookChanges::compute(transactions))},
|
||||
};
|
||||
}
|
||||
|
||||
Result
|
||||
doBookChanges(Context const& context)
|
||||
{
|
||||
auto const request = context.params;
|
||||
auto const info = ledgerInfoFromRequest(context);
|
||||
if (auto const status = std::get_if<Status>(&info))
|
||||
return *status;
|
||||
|
||||
auto const lgrInfo = std::get<ripple::LedgerInfo>(info);
|
||||
auto const transactions = context.backend->fetchAllTransactionsInLedger(
|
||||
lgrInfo.seq, context.yield);
|
||||
return computeBookChanges(lgrInfo, transactions);
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
@@ -1,16 +1,43 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <boost/json.hpp>
|
||||
#include <algorithm>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/Pg.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
|
||||
@@ -31,10 +58,10 @@ doBookOffers(Context const& context)
|
||||
if (request.contains("book"))
|
||||
{
|
||||
if (!request.at("book").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "bookNotString"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "bookNotString"};
|
||||
|
||||
if (!bookBase.parseHex(request.at("book").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "invalidBook"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "invalidBook"};
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -48,66 +75,45 @@ doBookOffers(Context const& context)
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t limit = 200;
|
||||
if (request.contains("limit"))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||
|
||||
limit = request.at("limit").as_int64();
|
||||
if (limit <= 0)
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||
}
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
ripple::AccountID takerID = beast::zero;
|
||||
if (request.contains("taker"))
|
||||
{
|
||||
auto parsed = parseTaker(request["taker"]);
|
||||
if (auto status = std::get_if<Status>(&parsed))
|
||||
return *status;
|
||||
else
|
||||
{
|
||||
takerID = std::get<ripple::AccountID>(parsed);
|
||||
}
|
||||
}
|
||||
if (auto const status = getTaker(request, takerID); status)
|
||||
return status;
|
||||
|
||||
ripple::uint256 cursor = beast::zero;
|
||||
if (request.contains("cursor"))
|
||||
{
|
||||
if (!request.at("cursor").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "cursorNotString"};
|
||||
|
||||
if (!cursor.parseHex(request.at("cursor").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
|
||||
}
|
||||
ripple::uint256 marker = beast::zero;
|
||||
if (auto const status = getHexMarker(request, marker); status)
|
||||
return status;
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto [offers, retCursor] = context.backend->fetchBookOffers(
|
||||
bookBase, lgrInfo.seq, limit, cursor, context.yield);
|
||||
auto [offers, retMarker] = context.backend->fetchBookOffers(
|
||||
bookBase, lgrInfo.seq, limit, marker, context.yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Time loading books: "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
|
||||
.count()
|
||||
<< " milliseconds - request = " << request;
|
||||
gLog.warn() << "Time loading books: "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
end - start)
|
||||
.count()
|
||||
<< " milliseconds - request = " << request;
|
||||
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
response["offers"] = postProcessOrderBook(
|
||||
response[JS(offers)] = postProcessOrderBook(
|
||||
offers, book, takerID, *context.backend, lgrInfo.seq, context.yield);
|
||||
|
||||
auto end2 = std::chrono::system_clock::now();
|
||||
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Time transforming to json: "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end2 - end)
|
||||
.count()
|
||||
<< " milliseconds - request = " << request;
|
||||
gLog.warn() << "Time transforming to json: "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
end2 - end)
|
||||
.count()
|
||||
<< " milliseconds - request = " << request;
|
||||
|
||||
if (retCursor)
|
||||
response["marker"] = ripple::strHex(*retCursor);
|
||||
if (retMarker)
|
||||
response["marker"] = ripple::strHex(*retMarker);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <ripple/protocol/PayChan.h>
|
||||
@@ -27,20 +46,15 @@ doChannelAuthorize(Context const& context)
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
if (!request.contains("channel_id"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
|
||||
if (!request.contains(JS(amount)))
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "missingAmount"};
|
||||
|
||||
if (!request.at("channel_id").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
|
||||
if (!request.at(JS(amount)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "amountNotString"};
|
||||
|
||||
if (!request.contains("amount"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
||||
|
||||
if (!request.at("amount").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
||||
|
||||
if (!request.contains("key_type") && !request.contains("secret"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"};
|
||||
if (!request.contains(JS(key_type)) && !request.contains(JS(secret)))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"};
|
||||
|
||||
auto v = keypairFromRequst(request);
|
||||
if (auto status = std::get_if<Status>(&v))
|
||||
@@ -50,13 +64,15 @@ doChannelAuthorize(Context const& context)
|
||||
std::get<std::pair<ripple::PublicKey, ripple::SecretKey>>(v);
|
||||
|
||||
ripple::uint256 channelId;
|
||||
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
|
||||
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
|
||||
if (auto const status = getChannelId(request, channelId); status)
|
||||
return status;
|
||||
|
||||
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
|
||||
auto optDrops =
|
||||
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
|
||||
|
||||
if (!optDrops)
|
||||
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||
return Status{
|
||||
RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||
|
||||
std::uint64_t drops = *optDrops;
|
||||
|
||||
@@ -67,11 +83,11 @@ doChannelAuthorize(Context const& context)
|
||||
try
|
||||
{
|
||||
auto const buf = ripple::sign(pk, sk, msg.slice());
|
||||
response["signature"] = ripple::strHex(buf);
|
||||
response[JS(signature)] = ripple::strHex(buf);
|
||||
}
|
||||
catch (std::exception&)
|
||||
{
|
||||
return Status{Error::rpcINTERNAL};
|
||||
return Status{RippledError::rpcINTERNAL};
|
||||
}
|
||||
|
||||
return response;
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/ErrorCodes.h>
|
||||
#include <ripple/protocol/PayChan.h>
|
||||
@@ -16,33 +35,28 @@ doChannelVerify(Context const& context)
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
if (!request.contains("channel_id"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
|
||||
if (!request.contains(JS(amount)))
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "missingAmount"};
|
||||
|
||||
if (!request.at("channel_id").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
|
||||
if (!request.at(JS(amount)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "amountNotString"};
|
||||
|
||||
if (!request.contains("amount"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
||||
if (!request.contains(JS(signature)))
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "missingSignature"};
|
||||
|
||||
if (!request.at("amount").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
||||
if (!request.at(JS(signature)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "signatureNotString"};
|
||||
|
||||
if (!request.contains("signature"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingSignature"};
|
||||
if (!request.contains(JS(public_key)))
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "missingPublicKey"};
|
||||
|
||||
if (!request.at("signature").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "signatureNotString"};
|
||||
|
||||
if (!request.contains("public_key"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingPublicKey"};
|
||||
|
||||
if (!request.at("public_key").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "publicKeyNotString"};
|
||||
if (!request.at(JS(public_key)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "publicKeyNotString"};
|
||||
|
||||
std::optional<ripple::PublicKey> pk;
|
||||
{
|
||||
std::string const strPk = request.at("public_key").as_string().c_str();
|
||||
std::string const strPk =
|
||||
request.at(JS(public_key)).as_string().c_str();
|
||||
pk = ripple::parseBase58<ripple::PublicKey>(
|
||||
ripple::TokenType::AccountPublic, strPk);
|
||||
|
||||
@@ -50,38 +64,42 @@ doChannelVerify(Context const& context)
|
||||
{
|
||||
auto pkHex = ripple::strUnHex(strPk);
|
||||
if (!pkHex)
|
||||
return Status{Error::rpcPUBLIC_MALFORMED, "malformedPublicKey"};
|
||||
return Status{
|
||||
RippledError::rpcPUBLIC_MALFORMED, "malformedPublicKey"};
|
||||
|
||||
auto const pkType =
|
||||
ripple::publicKeyType(ripple::makeSlice(*pkHex));
|
||||
if (!pkType)
|
||||
return Status{Error::rpcPUBLIC_MALFORMED, "invalidKeyType"};
|
||||
return Status{
|
||||
RippledError::rpcPUBLIC_MALFORMED, "invalidKeyType"};
|
||||
|
||||
pk.emplace(ripple::makeSlice(*pkHex));
|
||||
}
|
||||
}
|
||||
|
||||
ripple::uint256 channelId;
|
||||
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
|
||||
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
|
||||
if (auto const status = getChannelId(request, channelId); status)
|
||||
return status;
|
||||
|
||||
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
|
||||
auto optDrops =
|
||||
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
|
||||
|
||||
if (!optDrops)
|
||||
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||
return Status{
|
||||
RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||
|
||||
std::uint64_t drops = *optDrops;
|
||||
|
||||
auto sig = ripple::strUnHex(request.at("signature").as_string().c_str());
|
||||
auto sig = ripple::strUnHex(request.at(JS(signature)).as_string().c_str());
|
||||
|
||||
if (!sig || !sig->size())
|
||||
return Status{Error::rpcINVALID_PARAMS, "invalidSignature"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "invalidSignature"};
|
||||
|
||||
ripple::Serializer msg;
|
||||
ripple::serializePayChanAuthorization(
|
||||
msg, channelId, ripple::XRPAmount(drops));
|
||||
|
||||
response["signature_verified"] =
|
||||
response[JS(signature_verified)] =
|
||||
ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true);
|
||||
|
||||
return response;
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
@@ -9,17 +28,9 @@ doGatewayBalances(Context const& context)
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
if (!request.contains("account"))
|
||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
||||
|
||||
if (!request.at("account").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
||||
|
||||
auto accountID =
|
||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
||||
|
||||
if (!accountID)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
ripple::AccountID accountID;
|
||||
if (auto const status = getAccount(request, accountID); status)
|
||||
return status;
|
||||
|
||||
auto v = ledgerInfoFromRequest(context);
|
||||
if (auto status = std::get_if<Status>(&v))
|
||||
@@ -33,7 +44,7 @@ doGatewayBalances(Context const& context)
|
||||
std::map<ripple::AccountID, std::vector<ripple::STAmount>> frozenBalances;
|
||||
std::set<ripple::AccountID> hotWallets;
|
||||
|
||||
if (request.contains("hot_wallet"))
|
||||
if (request.contains(JS(hotwallet)))
|
||||
{
|
||||
auto getAccountID =
|
||||
[](auto const& j) -> std::optional<ripple::AccountID> {
|
||||
@@ -52,7 +63,7 @@ doGatewayBalances(Context const& context)
|
||||
return {};
|
||||
};
|
||||
|
||||
auto const& hw = request.at("hot_wallet");
|
||||
auto const& hw = request.at(JS(hotwallet));
|
||||
bool valid = true;
|
||||
|
||||
// null is treated as a valid 0-sized array of hotwallet
|
||||
@@ -81,13 +92,13 @@ doGatewayBalances(Context const& context)
|
||||
|
||||
if (!valid)
|
||||
{
|
||||
response["error"] = "invalidHotWallet";
|
||||
response[JS(error)] = "invalidHotWallet";
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
// Traverse the cold wallet's trust lines
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
auto const addToResponse = [&](ripple::SLE&& sle) {
|
||||
if (sle.getType() == ripple::ltRIPPLE_STATE)
|
||||
{
|
||||
ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance);
|
||||
@@ -118,7 +129,7 @@ doGatewayBalances(Context const& context)
|
||||
if (hotWallets.count(peer) > 0)
|
||||
{
|
||||
// This is a specified hot wallet
|
||||
hotBalances[peer].push_back(balance);
|
||||
hotBalances[peer].push_back(-balance);
|
||||
}
|
||||
else if (balSign > 0)
|
||||
{
|
||||
@@ -128,7 +139,7 @@ doGatewayBalances(Context const& context)
|
||||
else if (freeze)
|
||||
{
|
||||
// An obligation the gateway has frozen
|
||||
frozenBalances[peer].push_back(balance);
|
||||
frozenBalances[peer].push_back(-balance);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -146,14 +157,16 @@ doGatewayBalances(Context const& context)
|
||||
return true;
|
||||
};
|
||||
|
||||
traverseOwnedNodes(
|
||||
auto result = traverseOwnedNodes(
|
||||
*context.backend,
|
||||
*accountID,
|
||||
accountID,
|
||||
lgrInfo.seq,
|
||||
std::numeric_limits<std::uint32_t>::max(),
|
||||
{},
|
||||
context.yield,
|
||||
addToResponse);
|
||||
if (auto status = std::get_if<RPC::Status>(&result))
|
||||
return *status;
|
||||
|
||||
if (!sums.empty())
|
||||
{
|
||||
@@ -162,7 +175,7 @@ doGatewayBalances(Context const& context)
|
||||
{
|
||||
obj[ripple::to_string(k)] = v.getText();
|
||||
}
|
||||
response["obligations"] = std::move(obj);
|
||||
response[JS(obligations)] = std::move(obj);
|
||||
}
|
||||
|
||||
auto toJson =
|
||||
@@ -177,9 +190,9 @@ doGatewayBalances(Context const& context)
|
||||
for (auto const& balance : accBalances)
|
||||
{
|
||||
boost::json::object entry;
|
||||
entry["currency"] =
|
||||
entry[JS(currency)] =
|
||||
ripple::to_string(balance.issue().currency);
|
||||
entry["value"] = balance.getText();
|
||||
entry[JS(value)] = balance.getText();
|
||||
arr.push_back(std::move(entry));
|
||||
}
|
||||
obj[ripple::to_string(accId)] = std::move(arr);
|
||||
@@ -188,15 +201,22 @@ doGatewayBalances(Context const& context)
|
||||
return obj;
|
||||
};
|
||||
|
||||
auto containsHotWallet = [&](auto const& hw) {
|
||||
return hotBalances.contains(hw);
|
||||
};
|
||||
if (not std::all_of(
|
||||
hotWallets.begin(), hotWallets.end(), containsHotWallet))
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "invalidHotWallet"};
|
||||
|
||||
if (auto balances = toJson(hotBalances); balances.size())
|
||||
response["balances"] = balances;
|
||||
response[JS(balances)] = balances;
|
||||
if (auto balances = toJson(frozenBalances); balances.size())
|
||||
response["frozen_balances"] = balances;
|
||||
response[JS(frozen_balances)] = balances;
|
||||
if (auto balances = toJson(assets); assets.size())
|
||||
response["assets"] = toJson(assets);
|
||||
response["account"] = request.at("account");
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(assets)] = toJson(assets);
|
||||
response[JS(account)] = request.at(JS(account));
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
return response;
|
||||
}
|
||||
} // namespace RPC
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
@@ -10,41 +29,48 @@ doLedger(Context const& context)
|
||||
boost::json::object response = {};
|
||||
|
||||
bool binary = false;
|
||||
if (params.contains("binary"))
|
||||
if (params.contains(JS(binary)))
|
||||
{
|
||||
if (!params.at("binary").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
if (!params.at(JS(binary)).is_bool())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
|
||||
binary = params.at("binary").as_bool();
|
||||
binary = params.at(JS(binary)).as_bool();
|
||||
}
|
||||
|
||||
bool transactions = false;
|
||||
if (params.contains("transactions"))
|
||||
if (params.contains(JS(transactions)))
|
||||
{
|
||||
if (!params.at("transactions").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "transactionsFlagNotBool"};
|
||||
if (!params.at(JS(transactions)).is_bool())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "transactionsFlagNotBool"};
|
||||
|
||||
transactions = params.at("transactions").as_bool();
|
||||
transactions = params.at(JS(transactions)).as_bool();
|
||||
}
|
||||
|
||||
bool expand = false;
|
||||
if (params.contains("expand"))
|
||||
if (params.contains(JS(expand)))
|
||||
{
|
||||
if (!params.at("expand").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "expandFlagNotBool"};
|
||||
if (!params.at(JS(expand)).is_bool())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "expandFlagNotBool"};
|
||||
|
||||
expand = params.at("expand").as_bool();
|
||||
expand = params.at(JS(expand)).as_bool();
|
||||
}
|
||||
|
||||
bool diff = false;
|
||||
if (params.contains("diff"))
|
||||
{
|
||||
if (!params.at("diff").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "diffFlagNotBool"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "diffFlagNotBool"};
|
||||
|
||||
diff = params.at("diff").as_bool();
|
||||
}
|
||||
|
||||
if (params.contains(JS(full)))
|
||||
return Status{RippledError::rpcNOT_SUPPORTED};
|
||||
|
||||
if (params.contains(JS(accounts)))
|
||||
return Status{RippledError::rpcNOT_SUPPORTED};
|
||||
|
||||
auto v = ledgerInfoFromRequest(context);
|
||||
if (auto status = std::get_if<Status>(&v))
|
||||
return *status;
|
||||
@@ -54,35 +80,34 @@ doLedger(Context const& context)
|
||||
boost::json::object header;
|
||||
if (binary)
|
||||
{
|
||||
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||
}
|
||||
else
|
||||
{
|
||||
header["accepted"] = true;
|
||||
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
|
||||
header["close_flags"] = lgrInfo.closeFlags;
|
||||
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
||||
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
|
||||
;
|
||||
header["close_time_resolution"] = lgrInfo.closeTimeResolution.count();
|
||||
header["closed"] = true;
|
||||
header["hash"] = ripple::strHex(lgrInfo.hash);
|
||||
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
header["ledger_index"] = std::to_string(lgrInfo.seq);
|
||||
header["parent_close_time"] =
|
||||
header[JS(accepted)] = true;
|
||||
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
|
||||
header[JS(close_flags)] = lgrInfo.closeFlags;
|
||||
header[JS(close_time)] = lgrInfo.closeTime.time_since_epoch().count();
|
||||
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
|
||||
header[JS(close_time_resolution)] = lgrInfo.closeTimeResolution.count();
|
||||
header[JS(closed)] = true;
|
||||
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
|
||||
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
|
||||
header[JS(parent_close_time)] =
|
||||
lgrInfo.parentCloseTime.time_since_epoch().count();
|
||||
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
|
||||
header["seqNum"] = std::to_string(lgrInfo.seq);
|
||||
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
|
||||
header["total_coins"] = ripple::to_string(lgrInfo.drops);
|
||||
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
|
||||
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
|
||||
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
|
||||
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
|
||||
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
|
||||
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
|
||||
}
|
||||
header["closed"] = true;
|
||||
header[JS(closed)] = true;
|
||||
|
||||
if (transactions)
|
||||
{
|
||||
header["transactions"] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonTxs = header.at("transactions").as_array();
|
||||
header[JS(transactions)] = boost::json::value(boost::json::array_kind);
|
||||
boost::json::array& jsonTxs = header.at(JS(transactions)).as_array();
|
||||
if (expand)
|
||||
{
|
||||
auto txns = context.backend->fetchAllTransactionsInLedger(
|
||||
@@ -98,14 +123,14 @@ doLedger(Context const& context)
|
||||
{
|
||||
auto [txn, meta] = toExpandedJson(obj);
|
||||
entry = txn;
|
||||
entry["metaData"] = meta;
|
||||
entry[JS(metaData)] = meta;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry["tx_blob"] = ripple::strHex(obj.transaction);
|
||||
entry["meta"] = ripple::strHex(obj.metadata);
|
||||
entry[JS(tx_blob)] = ripple::strHex(obj.transaction);
|
||||
entry[JS(meta)] = ripple::strHex(obj.metadata);
|
||||
}
|
||||
// entry["ledger_index"] = obj.ledgerSequence;
|
||||
// entry[JS(ledger_index)] = obj.ledgerSequence;
|
||||
return entry;
|
||||
});
|
||||
}
|
||||
@@ -133,7 +158,7 @@ doLedger(Context const& context)
|
||||
for (auto const& obj : diff)
|
||||
{
|
||||
boost::json::object entry;
|
||||
entry["id"] = ripple::strHex(obj.key);
|
||||
entry["object_id"] = ripple::strHex(obj.key);
|
||||
if (binary)
|
||||
entry["object"] = ripple::strHex(obj.blob);
|
||||
else if (obj.blob.size())
|
||||
@@ -149,9 +174,9 @@ doLedger(Context const& context)
|
||||
}
|
||||
}
|
||||
|
||||
response["ledger"] = header;
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(ledger)] = header;
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,30 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/LedgerToJson.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
// Get state nodes from a ledger
|
||||
// Inputs:
|
||||
// limit: integer, maximum number of entries
|
||||
@@ -18,6 +39,13 @@
|
||||
//
|
||||
//
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
|
||||
using boost::json::value_to;
|
||||
@@ -28,54 +56,49 @@ doLedgerData(Context const& context)
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
bool binary = false;
|
||||
if (request.contains("binary"))
|
||||
{
|
||||
if (!request.at("binary").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
bool const binary = getBool(request, "binary", false);
|
||||
|
||||
binary = request.at("binary").as_bool();
|
||||
}
|
||||
std::uint32_t limit;
|
||||
if (auto const status = getLimit(context, limit); status)
|
||||
return status;
|
||||
|
||||
std::size_t limit = binary ? 2048 : 256;
|
||||
if (request.contains("limit"))
|
||||
{
|
||||
if (!request.at("limit").is_int64())
|
||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInteger"};
|
||||
if (!binary)
|
||||
limit = std::clamp(limit, {1}, {256});
|
||||
|
||||
limit = boost::json::value_to<int>(request.at("limit"));
|
||||
}
|
||||
bool outOfOrder = false;
|
||||
if (request.contains("out_of_order"))
|
||||
{
|
||||
if (!request.at("out_of_order").is_bool())
|
||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||
outOfOrder = request.at("out_of_order").as_bool();
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256> cursor;
|
||||
std::optional<uint32_t> diffCursor;
|
||||
if (request.contains("marker"))
|
||||
std::optional<ripple::uint256> marker;
|
||||
std::optional<uint32_t> diffMarker;
|
||||
if (request.contains(JS(marker)))
|
||||
{
|
||||
if (!request.at("marker").is_string())
|
||||
if (!request.at(JS(marker)).is_string())
|
||||
{
|
||||
if (outOfOrder)
|
||||
{
|
||||
if (!request.at("marker").is_int64())
|
||||
if (!request.at(JS(marker)).is_int64())
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "markerNotStringOrInt"};
|
||||
diffCursor = value_to<uint32_t>(request.at("marker"));
|
||||
RippledError::rpcINVALID_PARAMS,
|
||||
"markerNotStringOrInt"};
|
||||
diffMarker = value_to<uint32_t>(request.at(JS(marker)));
|
||||
}
|
||||
else
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "markerNotString"};
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker";
|
||||
gLog.debug() << "Parsing marker";
|
||||
|
||||
cursor = ripple::uint256{};
|
||||
if (!cursor->parseHex(request.at("marker").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "markerMalformed"};
|
||||
marker = ripple::uint256{};
|
||||
if (!marker->parseHex(request.at(JS(marker)).as_string().c_str()))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "markerMalformed"};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,49 +107,59 @@ doLedgerData(Context const& context)
|
||||
return *status;
|
||||
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
boost::json::object header;
|
||||
// no cursor means this is the first call, so we return header info
|
||||
if (!cursor)
|
||||
// no marker means this is the first call, so we return header info
|
||||
if (!request.contains(JS(marker)))
|
||||
{
|
||||
if (binary)
|
||||
{
|
||||
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||
}
|
||||
else
|
||||
{
|
||||
header["accepted"] = true;
|
||||
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
|
||||
header["close_flags"] = lgrInfo.closeFlags;
|
||||
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
||||
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
|
||||
;
|
||||
header["close_time_resolution"] =
|
||||
header[JS(accepted)] = true;
|
||||
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
|
||||
header[JS(close_flags)] = lgrInfo.closeFlags;
|
||||
header[JS(close_time)] =
|
||||
lgrInfo.closeTime.time_since_epoch().count();
|
||||
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
|
||||
header[JS(close_time_resolution)] =
|
||||
lgrInfo.closeTimeResolution.count();
|
||||
header["closed"] = true;
|
||||
header["hash"] = ripple::strHex(lgrInfo.hash);
|
||||
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
header["ledger_index"] = std::to_string(lgrInfo.seq);
|
||||
header["parent_close_time"] =
|
||||
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
|
||||
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
|
||||
header[JS(parent_close_time)] =
|
||||
lgrInfo.parentCloseTime.time_since_epoch().count();
|
||||
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
|
||||
header["seqNum"] = std::to_string(lgrInfo.seq);
|
||||
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
|
||||
header["total_coins"] = ripple::to_string(lgrInfo.drops);
|
||||
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
|
||||
|
||||
response["ledger"] = header;
|
||||
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
|
||||
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
|
||||
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
|
||||
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
|
||||
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
|
||||
}
|
||||
|
||||
header[JS(closed)] = true;
|
||||
response[JS(ledger)] = header;
|
||||
}
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
else
|
||||
{
|
||||
if (!outOfOrder &&
|
||||
!context.backend->fetchLedgerObject(
|
||||
*marker, lgrInfo.seq, context.yield))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "markerDoesNotExist"};
|
||||
}
|
||||
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
std::vector<Backend::LedgerObject> results;
|
||||
if (diffCursor)
|
||||
if (diffMarker)
|
||||
{
|
||||
assert(outOfOrder);
|
||||
auto diff =
|
||||
context.backend->fetchLedgerDiff(*diffCursor, context.yield);
|
||||
context.backend->fetchLedgerDiff(*diffMarker, context.yield);
|
||||
std::vector<ripple::uint256> keys;
|
||||
for (auto&& [key, object] : diff)
|
||||
{
|
||||
@@ -143,13 +176,13 @@ doLedgerData(Context const& context)
|
||||
if (obj.size())
|
||||
results.push_back({std::move(keys[i]), std::move(obj)});
|
||||
}
|
||||
if (*diffCursor > lgrInfo.seq)
|
||||
response["marker"] = *diffCursor - 1;
|
||||
if (*diffMarker > lgrInfo.seq)
|
||||
response["marker"] = *diffMarker - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto page = context.backend->fetchLedgerPage(
|
||||
cursor, lgrInfo.seq, limit, outOfOrder, context.yield);
|
||||
marker, lgrInfo.seq, limit, outOfOrder, context.yield);
|
||||
results = std::move(page.objects);
|
||||
if (page.cursor)
|
||||
response["marker"] = ripple::strHex(*(page.cursor));
|
||||
@@ -163,9 +196,8 @@ doLedgerData(Context const& context)
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
|
||||
.count();
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " number of results = " << results.size()
|
||||
<< " fetched in " << time << " microseconds";
|
||||
gLog.debug() << "Number of results = " << results.size() << " fetched in "
|
||||
<< time << " microseconds";
|
||||
boost::json::array objects;
|
||||
objects.reserve(results.size());
|
||||
for (auto const& [key, object] : results)
|
||||
@@ -175,21 +207,22 @@ doLedgerData(Context const& context)
|
||||
if (binary)
|
||||
{
|
||||
boost::json::object entry;
|
||||
entry["data"] = ripple::serializeHex(sle);
|
||||
entry["index"] = ripple::to_string(sle.key());
|
||||
entry[JS(data)] = ripple::serializeHex(sle);
|
||||
entry[JS(index)] = ripple::to_string(sle.key());
|
||||
objects.push_back(std::move(entry));
|
||||
}
|
||||
else
|
||||
objects.push_back(toJson(sle));
|
||||
}
|
||||
response["state"] = std::move(objects);
|
||||
response[JS(state)] = std::move(objects);
|
||||
if (outOfOrder)
|
||||
response["cache_full"] = context.backend->cache().isFull();
|
||||
auto end2 = std::chrono::system_clock::now();
|
||||
|
||||
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)
|
||||
.count();
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " number of results = " << results.size()
|
||||
<< " serialized in " << time << " microseconds";
|
||||
gLog.debug() << "Number of results = " << results.size()
|
||||
<< " serialized in " << time << " microseconds";
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/json.hpp>
|
||||
@@ -20,8 +39,7 @@ doLedgerEntry(Context const& context)
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
bool binary =
|
||||
request.contains("binary") ? request.at("binary").as_bool() : false;
|
||||
bool const binary = getBool(request, "binary", false);
|
||||
|
||||
auto v = ledgerInfoFromRequest(context);
|
||||
if (auto status = std::get_if<Status>(&v))
|
||||
@@ -30,137 +48,153 @@ doLedgerEntry(Context const& context)
|
||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||
|
||||
ripple::uint256 key;
|
||||
if (request.contains("index"))
|
||||
{
|
||||
if (!request.at("index").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "indexNotString"};
|
||||
|
||||
if (!key.parseHex(request.at("index").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedIndex"};
|
||||
}
|
||||
else if (request.contains("account_root"))
|
||||
// Note: according to docs, only 1 of the below should be specified at any
|
||||
// time. see https://xrpl.org/ledger_entry.html#ledger_entry
|
||||
if (request.contains(JS(index)))
|
||||
{
|
||||
if (!request.at("account_root").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "account_rootNotString"};
|
||||
if (!request.at(JS(index)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "indexNotString"};
|
||||
|
||||
if (!key.parseHex(request.at(JS(index)).as_string().c_str()))
|
||||
return Status{ClioError::rpcMALFORMED_REQUEST};
|
||||
}
|
||||
else if (request.contains(JS(account_root)))
|
||||
{
|
||||
if (!request.at(JS(account_root)).is_string())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "account_rootNotString"};
|
||||
|
||||
auto const account = ripple::parseBase58<ripple::AccountID>(
|
||||
request.at("account_root").as_string().c_str());
|
||||
request.at(JS(account_root)).as_string().c_str());
|
||||
if (!account || account->isZero())
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAddress"};
|
||||
return Status{ClioError::rpcMALFORMED_ADDRESS};
|
||||
else
|
||||
key = ripple::keylet::account(*account).key;
|
||||
}
|
||||
else if (request.contains("check"))
|
||||
else if (request.contains(JS(check)))
|
||||
{
|
||||
if (!request.at("check").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "checkNotString"};
|
||||
if (!request.at(JS(check)).is_string())
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "checkNotString"};
|
||||
|
||||
if (!key.parseHex(request.at("check").as_string().c_str()))
|
||||
if (!key.parseHex(request.at(JS(check)).as_string().c_str()))
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "checkMalformed"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "checkMalformed"};
|
||||
}
|
||||
}
|
||||
else if (request.contains("deposit_preauth"))
|
||||
else if (request.contains(JS(deposit_preauth)))
|
||||
{
|
||||
if (!request.at("deposit_preauth").is_object())
|
||||
if (!request.at(JS(deposit_preauth)).is_object())
|
||||
{
|
||||
if (!request.at("deposit_preauth").is_string() ||
|
||||
if (!request.at(JS(deposit_preauth)).is_string() ||
|
||||
!key.parseHex(
|
||||
request.at("deposit_preauth").as_string().c_str()))
|
||||
request.at(JS(deposit_preauth)).as_string().c_str()))
|
||||
{
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "deposit_preauthMalformed"};
|
||||
RippledError::rpcINVALID_PARAMS,
|
||||
"deposit_preauthMalformed"};
|
||||
}
|
||||
}
|
||||
else if (
|
||||
!request.at("deposit_preauth").as_object().contains("owner") ||
|
||||
!request.at("deposit_preauth").as_object().at("owner").is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "ownerNotString"};
|
||||
}
|
||||
else if (
|
||||
!request.at("deposit_preauth").as_object().contains("authorized") ||
|
||||
!request.at("deposit_preauth")
|
||||
!request.at(JS(deposit_preauth)).as_object().contains(JS(owner)) ||
|
||||
!request.at(JS(deposit_preauth))
|
||||
.as_object()
|
||||
.at("authorized")
|
||||
.at(JS(owner))
|
||||
.is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "authorizedNotString"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
}
|
||||
else if (
|
||||
!request.at(JS(deposit_preauth))
|
||||
.as_object()
|
||||
.contains(JS(authorized)) ||
|
||||
!request.at(JS(deposit_preauth))
|
||||
.as_object()
|
||||
.at(JS(authorized))
|
||||
.is_string())
|
||||
{
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "authorizedNotString"};
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object const& deposit_preauth =
|
||||
request.at("deposit_preauth").as_object();
|
||||
request.at(JS(deposit_preauth)).as_object();
|
||||
|
||||
auto const owner = ripple::parseBase58<ripple::AccountID>(
|
||||
deposit_preauth.at("owner").as_string().c_str());
|
||||
deposit_preauth.at(JS(owner)).as_string().c_str());
|
||||
|
||||
auto const authorized = ripple::parseBase58<ripple::AccountID>(
|
||||
deposit_preauth.at("authorized").as_string().c_str());
|
||||
deposit_preauth.at(JS(authorized)).as_string().c_str());
|
||||
|
||||
if (!owner)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
else if (!authorized)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAuthorized"};
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedAuthorized"};
|
||||
else
|
||||
key = ripple::keylet::depositPreauth(*owner, *authorized).key;
|
||||
}
|
||||
}
|
||||
else if (request.contains("directory"))
|
||||
else if (request.contains(JS(directory)))
|
||||
{
|
||||
if (!request.at("directory").is_object())
|
||||
if (!request.at(JS(directory)).is_object())
|
||||
{
|
||||
if (!request.at("directory").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "directoryNotString"};
|
||||
if (!request.at(JS(directory)).is_string())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "directoryNotString"};
|
||||
|
||||
if (!key.parseHex(request.at("directory").as_string().c_str()))
|
||||
if (!key.parseHex(request.at(JS(directory)).as_string().c_str()))
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedDirectory"};
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedDirectory"};
|
||||
}
|
||||
}
|
||||
else if (
|
||||
request.at("directory").as_object().contains("sub_index") &&
|
||||
!request.at("directory").as_object().at("sub_index").is_int64())
|
||||
request.at(JS(directory)).as_object().contains(JS(sub_index)) &&
|
||||
!request.at(JS(directory)).as_object().at(JS(sub_index)).is_int64())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "sub_indexNotInt"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "sub_indexNotInt"};
|
||||
}
|
||||
else
|
||||
{
|
||||
auto directory = request.at("directory").as_object();
|
||||
std::uint64_t subIndex = directory.contains("sub_index")
|
||||
auto directory = request.at(JS(directory)).as_object();
|
||||
std::uint64_t subIndex = directory.contains(JS(sub_index))
|
||||
? boost::json::value_to<std::uint64_t>(
|
||||
directory.at("sub_index"))
|
||||
directory.at(JS(sub_index)))
|
||||
: 0;
|
||||
|
||||
if (directory.contains("dir_root"))
|
||||
if (directory.contains(JS(dir_root)))
|
||||
{
|
||||
ripple::uint256 uDirRoot;
|
||||
|
||||
if (directory.contains("owner"))
|
||||
if (directory.contains(JS(owner)))
|
||||
{
|
||||
// May not specify both dir_root and owner.
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS,
|
||||
RippledError::rpcINVALID_PARAMS,
|
||||
"mayNotSpecifyBothDirRootAndOwner"};
|
||||
}
|
||||
else if (!uDirRoot.parseHex(
|
||||
directory.at("dir_root").as_string().c_str()))
|
||||
directory.at(JS(dir_root)).as_string().c_str()))
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedDirRoot"};
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedDirRoot"};
|
||||
}
|
||||
else
|
||||
{
|
||||
key = ripple::keylet::page(uDirRoot, subIndex).key;
|
||||
}
|
||||
}
|
||||
else if (directory.contains("owner"))
|
||||
else if (directory.contains(JS(owner)))
|
||||
{
|
||||
auto const ownerID = ripple::parseBase58<ripple::AccountID>(
|
||||
directory.at("owner").as_string().c_str());
|
||||
directory.at(JS(owner)).as_string().c_str());
|
||||
|
||||
if (!ownerID)
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAddress"};
|
||||
return Status{ClioError::rpcMALFORMED_ADDRESS};
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -172,167 +206,180 @@ doLedgerEntry(Context const& context)
|
||||
else
|
||||
{
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"};
|
||||
RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"};
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (request.contains("escrow"))
|
||||
else if (request.contains(JS(escrow)))
|
||||
{
|
||||
if (!request.at("escrow").is_object())
|
||||
if (!request.at(JS(escrow)).is_object())
|
||||
{
|
||||
if (!key.parseHex(request.at("escrow").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedEscrow"};
|
||||
if (!key.parseHex(request.at(JS(escrow)).as_string().c_str()))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedEscrow"};
|
||||
}
|
||||
else if (
|
||||
!request.at("escrow").as_object().contains("owner") ||
|
||||
!request.at("escrow").as_object().at("owner").is_string())
|
||||
!request.at(JS(escrow)).as_object().contains(JS(owner)) ||
|
||||
!request.at(JS(escrow)).as_object().at(JS(owner)).is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
}
|
||||
else if (
|
||||
!request.at("escrow").as_object().contains("seq") ||
|
||||
!request.at("escrow").as_object().at("seq").is_int64())
|
||||
!request.at(JS(escrow)).as_object().contains(JS(seq)) ||
|
||||
!request.at(JS(escrow)).as_object().at(JS(seq)).is_int64())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedSeq"};
|
||||
}
|
||||
else
|
||||
{
|
||||
auto const id =
|
||||
ripple::parseBase58<ripple::AccountID>(request.at("escrow")
|
||||
ripple::parseBase58<ripple::AccountID>(request.at(JS(escrow))
|
||||
.as_object()
|
||||
.at("owner")
|
||||
.at(JS(owner))
|
||||
.as_string()
|
||||
.c_str());
|
||||
|
||||
if (!id)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
||||
return Status{ClioError::rpcMALFORMED_ADDRESS};
|
||||
else
|
||||
{
|
||||
std::uint32_t seq =
|
||||
request.at("escrow").as_object().at("seq").as_int64();
|
||||
request.at(JS(escrow)).as_object().at(JS(seq)).as_int64();
|
||||
key = ripple::keylet::escrow(*id, seq).key;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (request.contains("offer"))
|
||||
else if (request.contains(JS(offer)))
|
||||
{
|
||||
if (!request.at("offer").is_object())
|
||||
if (!request.at(JS(offer)).is_object())
|
||||
{
|
||||
if (!key.parseHex(request.at("offer").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedOffer"};
|
||||
if (!key.parseHex(request.at(JS(offer)).as_string().c_str()))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedOffer"};
|
||||
}
|
||||
else if (
|
||||
!request.at("offer").as_object().contains("account") ||
|
||||
!request.at("offer").as_object().at("account").is_string())
|
||||
!request.at(JS(offer)).as_object().contains(JS(account)) ||
|
||||
!request.at(JS(offer)).as_object().at(JS(account)).is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
}
|
||||
else if (
|
||||
!request.at("offer").as_object().contains("seq") ||
|
||||
!request.at("offer").as_object().at("seq").is_int64())
|
||||
!request.at(JS(offer)).as_object().contains(JS(seq)) ||
|
||||
!request.at(JS(offer)).as_object().at(JS(seq)).is_int64())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedSeq"};
|
||||
}
|
||||
else
|
||||
{
|
||||
auto offer = request.at("offer").as_object();
|
||||
auto offer = request.at(JS(offer)).as_object();
|
||||
auto const id = ripple::parseBase58<ripple::AccountID>(
|
||||
offer.at("account").as_string().c_str());
|
||||
offer.at(JS(account)).as_string().c_str());
|
||||
|
||||
if (!id)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||
return Status{ClioError::rpcMALFORMED_ADDRESS};
|
||||
else
|
||||
{
|
||||
std::uint32_t seq =
|
||||
boost::json::value_to<std::uint32_t>(offer.at("seq"));
|
||||
boost::json::value_to<std::uint32_t>(offer.at(JS(seq)));
|
||||
key = ripple::keylet::offer(*id, seq).key;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (request.contains("payment_channel"))
|
||||
else if (request.contains(JS(payment_channel)))
|
||||
{
|
||||
if (!request.at("payment_channel").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "paymentChannelNotString"};
|
||||
if (!request.at(JS(payment_channel)).is_string())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "paymentChannelNotString"};
|
||||
|
||||
if (!key.parseHex(request.at("payment_channel").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedPaymentChannel"};
|
||||
if (!key.parseHex(request.at(JS(payment_channel)).as_string().c_str()))
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "malformedPaymentChannel"};
|
||||
}
|
||||
else if (request.contains("ripple_state"))
|
||||
else if (request.contains(JS(ripple_state)))
|
||||
{
|
||||
if (!request.at("ripple_state").is_object())
|
||||
return Status{Error::rpcINVALID_PARAMS, "rippleStateNotObject"};
|
||||
if (!request.at(JS(ripple_state)).is_object())
|
||||
return Status{
|
||||
RippledError::rpcINVALID_PARAMS, "rippleStateNotObject"};
|
||||
|
||||
ripple::Currency currency;
|
||||
boost::json::object const& state =
|
||||
request.at("ripple_state").as_object();
|
||||
request.at(JS(ripple_state)).as_object();
|
||||
|
||||
if (!state.contains("currency") || !state.at("currency").is_string())
|
||||
if (!state.contains(JS(currency)) ||
|
||||
!state.at(JS(currency)).is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "currencyNotString"};
|
||||
}
|
||||
|
||||
if (!state.contains("accounts") || !state.at("accounts").is_array() ||
|
||||
2 != state.at("accounts").as_array().size() ||
|
||||
!state.at("accounts").as_array().at(0).is_string() ||
|
||||
!state.at("accounts").as_array().at(1).is_string() ||
|
||||
(state.at("accounts").as_array().at(0).as_string() ==
|
||||
state.at("accounts").as_array().at(1).as_string()))
|
||||
if (!state.contains(JS(accounts)) ||
|
||||
!state.at(JS(accounts)).is_array() ||
|
||||
2 != state.at(JS(accounts)).as_array().size() ||
|
||||
!state.at(JS(accounts)).as_array().at(0).is_string() ||
|
||||
!state.at(JS(accounts)).as_array().at(1).is_string() ||
|
||||
(state.at(JS(accounts)).as_array().at(0).as_string() ==
|
||||
state.at(JS(accounts)).as_array().at(1).as_string()))
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "malformedAccounts"};
|
||||
}
|
||||
|
||||
auto const id1 = ripple::parseBase58<ripple::AccountID>(
|
||||
state.at("accounts").as_array().at(0).as_string().c_str());
|
||||
state.at(JS(accounts)).as_array().at(0).as_string().c_str());
|
||||
auto const id2 = ripple::parseBase58<ripple::AccountID>(
|
||||
state.at("accounts").as_array().at(1).as_string().c_str());
|
||||
state.at(JS(accounts)).as_array().at(1).as_string().c_str());
|
||||
|
||||
if (!id1 || !id2)
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
||||
return Status{
|
||||
ClioError::rpcMALFORMED_ADDRESS, "malformedAddresses"};
|
||||
|
||||
else if (!ripple::to_currency(
|
||||
currency, state.at("currency").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
||||
currency, state.at(JS(currency)).as_string().c_str()))
|
||||
return Status{
|
||||
ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"};
|
||||
|
||||
key = ripple::keylet::line(*id1, *id2, currency).key;
|
||||
}
|
||||
else if (request.contains("ticket"))
|
||||
else if (request.contains(JS(ticket)))
|
||||
{
|
||||
if (!request.at("ticket").is_object())
|
||||
if (!request.at(JS(ticket)).is_object())
|
||||
{
|
||||
if (!request.at("ticket").is_string())
|
||||
return Status{Error::rpcINVALID_PARAMS, "ticketNotString"};
|
||||
if (!request.at(JS(ticket)).is_string())
|
||||
return Status{
|
||||
ClioError::rpcMALFORMED_REQUEST, "ticketNotString"};
|
||||
|
||||
if (!key.parseHex(request.at("ticket").as_string().c_str()))
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicket"};
|
||||
if (!key.parseHex(request.at(JS(ticket)).as_string().c_str()))
|
||||
return Status{
|
||||
ClioError::rpcMALFORMED_REQUEST, "malformedTicket"};
|
||||
}
|
||||
else if (
|
||||
!request.at("ticket").as_object().contains("account") ||
|
||||
!request.at("ticket").as_object().at("account").is_string())
|
||||
!request.at(JS(ticket)).as_object().contains(JS(owner)) ||
|
||||
!request.at(JS(ticket)).as_object().at(JS(owner)).is_string())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
||||
return Status{ClioError::rpcMALFORMED_REQUEST};
|
||||
}
|
||||
else if (
|
||||
!request.at("ticket").as_object().contains("ticket_seq") ||
|
||||
!request.at("ticket").as_object().at("ticket_seq").is_int64())
|
||||
!request.at(JS(ticket)).as_object().contains(JS(ticket_seq)) ||
|
||||
!request.at(JS(ticket)).as_object().at(JS(ticket_seq)).is_int64())
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicketSeq"};
|
||||
return Status{
|
||||
ClioError::rpcMALFORMED_REQUEST, "malformedTicketSeq"};
|
||||
}
|
||||
else
|
||||
{
|
||||
auto const id =
|
||||
ripple::parseBase58<ripple::AccountID>(request.at("ticket")
|
||||
ripple::parseBase58<ripple::AccountID>(request.at(JS(ticket))
|
||||
.as_object()
|
||||
.at("account")
|
||||
.at(JS(owner))
|
||||
.as_string()
|
||||
.c_str());
|
||||
|
||||
if (!id)
|
||||
return Status{
|
||||
Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
||||
return Status{ClioError::rpcMALFORMED_OWNER};
|
||||
else
|
||||
{
|
||||
std::uint32_t seq =
|
||||
request.at("offer").as_object().at("ticket_seq").as_int64();
|
||||
std::uint32_t seq = request.at(JS(offer))
|
||||
.as_object()
|
||||
.at(JS(ticket_seq))
|
||||
.as_int64();
|
||||
|
||||
key = ripple::getTicketIndex(*id, seq);
|
||||
}
|
||||
@@ -340,30 +387,28 @@ doLedgerEntry(Context const& context)
|
||||
}
|
||||
else
|
||||
{
|
||||
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
|
||||
return Status{RippledError::rpcINVALID_PARAMS, "unknownOption"};
|
||||
}
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto dbResponse =
|
||||
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
if (!dbResponse or dbResponse->size() == 0)
|
||||
return Status{Error::rpcOBJECT_NOT_FOUND, "entryNotFound"};
|
||||
return Status{"entryNotFound"};
|
||||
|
||||
response["index"] = ripple::strHex(key);
|
||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||
response["ledger_index"] = lgrInfo.seq;
|
||||
response[JS(index)] = ripple::strHex(key);
|
||||
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||
response[JS(ledger_index)] = lgrInfo.seq;
|
||||
|
||||
if (binary)
|
||||
{
|
||||
response["node_binary"] = ripple::strHex(*dbResponse);
|
||||
response[JS(node_binary)] = ripple::strHex(*dbResponse);
|
||||
}
|
||||
else
|
||||
{
|
||||
ripple::STLedgerEntry sle{
|
||||
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
|
||||
response["node"] = toJson(sle);
|
||||
response[JS(node)] = toJson(sle);
|
||||
}
|
||||
|
||||
return response;
|
||||
|
||||
@@ -1,3 +1,21 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
@@ -12,15 +30,15 @@ doLedgerRange(Context const& context)
|
||||
auto range = context.backend->fetchLedgerRange();
|
||||
if (!range)
|
||||
{
|
||||
return Status{Error::rpcNOT_READY, "rangeNotFound"};
|
||||
return Status{RippledError::rpcNOT_READY, "rangeNotFound"};
|
||||
}
|
||||
else
|
||||
{
|
||||
response["ledger_index_min"] = range->minSequence;
|
||||
response["ledger_index_max"] = range->maxSequence;
|
||||
response[JS(ledger_index_min)] = range->minSequence;
|
||||
response[JS(ledger_index_max)] = range->maxSequence;
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace RPC
|
||||
|
||||
70
src/rpc/handlers/NFTHistory.cpp
Normal file
70
src/rpc/handlers/NFTHistory.cpp
Normal file
@@ -0,0 +1,70 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace RPC {
|
||||
|
||||
Result
|
||||
doNFTHistory(Context const& context)
|
||||
{
|
||||
auto const maybeTokenID = getNFTID(context.params);
|
||||
if (auto const status = std::get_if<Status>(&maybeTokenID); status)
|
||||
return *status;
|
||||
auto const tokenID = std::get<ripple::uint256>(maybeTokenID);
|
||||
|
||||
constexpr std::string_view outerFuncName = __func__;
|
||||
auto const maybeResponse = traverseTransactions(
|
||||
context,
|
||||
[&tokenID, &outerFuncName](
|
||||
std::shared_ptr<Backend::BackendInterface const> const& backend,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<Backend::TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield)
|
||||
-> Backend::TransactionsAndCursor {
|
||||
auto const [txnsAndCursor, timeDiff] =
|
||||
util::timed([&, &tokenID = tokenID]() {
|
||||
return backend->fetchNFTTransactions(
|
||||
tokenID, limit, forward, cursorIn, yield);
|
||||
});
|
||||
gLog.info() << outerFuncName << " db fetch took " << timeDiff
|
||||
<< " milliseconds - num blobs = "
|
||||
<< txnsAndCursor.txns.size();
|
||||
return txnsAndCursor;
|
||||
});
|
||||
|
||||
if (auto const status = std::get_if<Status>(&maybeResponse); status)
|
||||
return *status;
|
||||
auto response = std::get<boost::json::object>(maybeResponse);
|
||||
|
||||
response[JS(nft_id)] = ripple::to_string(tokenID);
|
||||
return response;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
157
src/rpc/handlers/NFTInfo.cpp
Normal file
157
src/rpc/handlers/NFTInfo.cpp
Normal file
@@ -0,0 +1,157 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
// {
|
||||
// nft_id: <ident>
|
||||
// ledger_hash: <ledger>
|
||||
// ledger_index: <ledger_index>
|
||||
// }
|
||||
|
||||
namespace RPC {
|
||||
|
||||
std::variant<std::monostate, std::string, Status>
|
||||
getURI(Backend::NFT const& dbResponse, Context const& context)
|
||||
{
|
||||
// Fetch URI from ledger
|
||||
// The correct page will be > bookmark and <= last. We need to calculate
|
||||
// the first possible page however, since bookmark is not guaranteed to
|
||||
// exist.
|
||||
auto const bookmark = ripple::keylet::nftpage(
|
||||
ripple::keylet::nftpage_min(dbResponse.owner), dbResponse.tokenID);
|
||||
auto const last = ripple::keylet::nftpage_max(dbResponse.owner);
|
||||
|
||||
ripple::uint256 nextKey = last.key;
|
||||
std::optional<ripple::STLedgerEntry> sle;
|
||||
|
||||
// when this loop terminates, `sle` will contain the correct page for
|
||||
// this NFT.
|
||||
//
|
||||
// 1) We start at the last NFTokenPage, which is guaranteed to exist,
|
||||
// grab the object from the DB and deserialize it.
|
||||
//
|
||||
// 2) If that NFTokenPage has a PreviousPageMin value and the
|
||||
// PreviousPageMin value is > bookmark, restart loop. Otherwise
|
||||
// terminate and use the `sle` from this iteration.
|
||||
do
|
||||
{
|
||||
auto const blob = context.backend->fetchLedgerObject(
|
||||
ripple::Keylet(ripple::ltNFTOKEN_PAGE, nextKey).key,
|
||||
dbResponse.ledgerSequence,
|
||||
context.yield);
|
||||
|
||||
if (!blob || blob->size() == 0)
|
||||
return Status{
|
||||
RippledError::rpcINTERNAL,
|
||||
"Cannot find NFTokenPage for this NFT"};
|
||||
|
||||
sle = ripple::STLedgerEntry(
|
||||
ripple::SerialIter{blob->data(), blob->size()}, nextKey);
|
||||
|
||||
if (sle->isFieldPresent(ripple::sfPreviousPageMin))
|
||||
nextKey = sle->getFieldH256(ripple::sfPreviousPageMin);
|
||||
|
||||
} while (sle && sle->key() != nextKey && nextKey > bookmark.key);
|
||||
|
||||
if (!sle)
|
||||
return Status{
|
||||
RippledError::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
|
||||
|
||||
auto const nfts = sle->getFieldArray(ripple::sfNFTokens);
|
||||
auto const nft = std::find_if(
|
||||
nfts.begin(),
|
||||
nfts.end(),
|
||||
[&dbResponse](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) ==
|
||||
dbResponse.tokenID;
|
||||
});
|
||||
|
||||
if (nft == nfts.end())
|
||||
return Status{
|
||||
RippledError::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
|
||||
|
||||
ripple::Blob const uriField = nft->getFieldVL(ripple::sfURI);
|
||||
|
||||
// NOTE this cannot use a ternary or value_or because then the
|
||||
// expression's type is unclear. We want to explicitly set the `uri`
|
||||
// field to null when not present to avoid any confusion.
|
||||
if (std::string const uri = std::string(uriField.begin(), uriField.end());
|
||||
uri.size() > 0)
|
||||
return uri;
|
||||
return std::monostate{};
|
||||
}
|
||||
|
||||
Result
|
||||
doNFTInfo(Context const& context)
|
||||
{
|
||||
auto request = context.params;
|
||||
boost::json::object response = {};
|
||||
|
||||
auto const maybeTokenID = getNFTID(request);
|
||||
if (auto const status = std::get_if<Status>(&maybeTokenID); status)
|
||||
return *status;
|
||||
auto const tokenID = std::get<ripple::uint256>(maybeTokenID);
|
||||
|
||||
auto const maybeLedgerInfo = ledgerInfoFromRequest(context);
|
||||
if (auto status = std::get_if<Status>(&maybeLedgerInfo); status)
|
||||
return *status;
|
||||
auto const lgrInfo = std::get<ripple::LedgerInfo>(maybeLedgerInfo);
|
||||
|
||||
std::optional<Backend::NFT> dbResponse =
|
||||
context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield);
|
||||
if (!dbResponse)
|
||||
return Status{RippledError::rpcOBJECT_NOT_FOUND, "NFT not found"};
|
||||
|
||||
response["nft_id"] = ripple::strHex(dbResponse->tokenID);
|
||||
response["ledger_index"] = dbResponse->ledgerSequence;
|
||||
response["owner"] = ripple::toBase58(dbResponse->owner);
|
||||
response["is_burned"] = dbResponse->isBurned;
|
||||
|
||||
response["flags"] = ripple::nft::getFlags(dbResponse->tokenID);
|
||||
response["transfer_fee"] = ripple::nft::getTransferFee(dbResponse->tokenID);
|
||||
response["issuer"] =
|
||||
ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID));
|
||||
response["nft_taxon"] =
|
||||
ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID));
|
||||
response["nft_sequence"] = ripple::nft::getSerial(dbResponse->tokenID);
|
||||
|
||||
if (!dbResponse->isBurned)
|
||||
{
|
||||
auto const maybeURI = getURI(*dbResponse, context);
|
||||
// An error occurred
|
||||
if (Status const* status = std::get_if<Status>(&maybeURI); status)
|
||||
return *status;
|
||||
// A URI was found
|
||||
if (std::string const* uri = std::get_if<std::string>(&maybeURI); uri)
|
||||
response["uri"] = *uri;
|
||||
// A URI was not found, explicitly set to null
|
||||
else
|
||||
response["uri"] = nullptr;
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
} // namespace RPC
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user