mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2c870db92 | ||
|
|
8e17039586 | ||
|
|
1310e5dde9 | ||
|
|
777ae24f62 | ||
|
|
1ada879072 | ||
|
|
e2792f5a0c | ||
|
|
97c431680a | ||
|
|
0b454a2316 | ||
|
|
b7cae53fcd | ||
|
|
ac45cce5bd | ||
|
|
ef39c04e1e | ||
|
|
83a099a547 | ||
|
|
73337d0819 | ||
|
|
816625c44e | ||
|
|
48e87d7c07 | ||
|
|
dfe18ed682 | ||
|
|
92a072d7a8 | ||
|
|
24fca61b56 | ||
|
|
ae8303fdc8 | ||
|
|
709a8463b8 | ||
|
|
84d31986d1 | ||
|
|
d50f229631 | ||
|
|
379c89fb02 | ||
|
|
81f7171368 | ||
|
|
629b35d1dd | ||
|
|
6fc4cee195 | ||
|
|
b01813ac3d | ||
|
|
6bf8c5bc4e | ||
|
|
2ffd98f895 | ||
|
|
3edead32ba | ||
|
|
28980734ae | ||
|
|
ce60c8f64d | ||
|
|
39ef2ae33c | ||
|
|
d83975e750 | ||
|
|
4468302852 | ||
|
|
a704cf7cfe | ||
|
|
05d09cc352 | ||
|
|
ae96ac7baf | ||
|
|
4579fa2f26 | ||
|
|
1e7645419f | ||
|
|
35db5d3da9 | ||
|
|
4e581e659f | ||
|
|
55f0536dca | ||
|
|
a3a15754b4 | ||
|
|
59d7d1bc49 | ||
|
|
5f5648470a | ||
|
|
13afe9373d | ||
|
|
9a79bdc50b | ||
|
|
7d5415e8b0 | ||
|
|
54669420bf | ||
|
|
a62849b89a | ||
|
|
20c2654abc | ||
|
|
37c810f6fa | ||
|
|
d64753c0dd | ||
|
|
92d6687151 | ||
|
|
fa8405df83 | ||
|
|
3d3b8e91b6 | ||
|
|
14a972c8e2 | ||
|
|
166ff63dbc | ||
|
|
b7ae6a0495 | ||
|
|
d0ea9d20ab | ||
|
|
b45b34edb1 | ||
|
|
7ecb894632 | ||
|
|
8de39739fa | ||
|
|
f16a05ae7a | ||
|
|
458fac776c | ||
|
|
af575b1bcf | ||
|
|
ee615a290b | ||
|
|
31cc06d4f4 | ||
|
|
f90dac2f85 | ||
|
|
8a5be14ba8 | ||
|
|
ba6b764e38 | ||
|
|
9939f6e6f4 | ||
|
|
a72aa73afe | ||
|
|
3d02803135 | ||
|
|
3f47b85e3b |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
build/
|
||||||
@@ -6,3 +6,4 @@
|
|||||||
|
|
||||||
# clang-format
|
# clang-format
|
||||||
e41150248a97e4bdc1cf21b54650c4bb7c63928e
|
e41150248a97e4bdc1cf21b54650c4bb7c63928e
|
||||||
|
2e542e7b0d94451a933c88778461cc8d3d7e6417
|
||||||
|
|||||||
13
.github/actions/lint/action.yml
vendored
Normal file
13
.github/actions/lint/action.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
# Github's ubuntu-20.04 image already has clang-format-11 installed
|
||||||
|
- run: |
|
||||||
|
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
- name: Check for differences
|
||||||
|
id: assert
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
git diff --color --exit-code | tee "clang-format.patch"
|
||||||
21
.github/actions/sign/action.yml
vendored
Normal file
21
.github/actions/sign/action.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
name: 'Sign packages'
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Sign
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -ex -o pipefail
|
||||||
|
echo "$GPG_KEY_B64"| base64 -d | gpg --batch --no-tty --allow-secret-key-import --import -
|
||||||
|
unset GPG_KEY_B64
|
||||||
|
export GPG_PASSPHRASE=$(echo $GPG_KEY_PASS_B64 | base64 -di)
|
||||||
|
unset GPG_KEY_PASS_B64
|
||||||
|
export GPG_KEYID=$(gpg --with-colon --list-secret-keys | head -n1 | cut -d : -f 5)
|
||||||
|
for PKG in $(ls *.deb); do
|
||||||
|
dpkg-sig \
|
||||||
|
-g "--no-tty --digest-algo 'sha512' --passphrase '${GPG_PASSPHRASE}' --pinentry-mode=loopback" \
|
||||||
|
-k "${GPG_KEYID}" \
|
||||||
|
--sign builder \
|
||||||
|
$PKG
|
||||||
|
done
|
||||||
6
.github/actions/test/Dockerfile
vendored
Normal file
6
.github/actions/test/Dockerfile
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
FROM cassandra:4.0.4
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y postgresql
|
||||||
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
8
.github/actions/test/entrypoint.sh
vendored
Executable file
8
.github/actions/test/entrypoint.sh
vendored
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
pg_ctlcluster 12 main start
|
||||||
|
su postgres -c"psql -c\"alter user postgres with password 'postgres'\""
|
||||||
|
su cassandra -c "/opt/cassandra/bin/cassandra -R"
|
||||||
|
sleep 90
|
||||||
|
chmod +x ./clio_tests
|
||||||
|
./clio_tests
|
||||||
144
.github/workflows/build.yml
vendored
144
.github/workflows/build.yml
vendored
@@ -1,9 +1,9 @@
|
|||||||
name: Build Clio
|
name: Build Clio
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [master, develop, develop-next]
|
branches: [master, release, develop, develop-next]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [master, develop, develop-next]
|
branches: [master, release, develop, develop-next]
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -11,40 +11,132 @@ jobs:
|
|||||||
name: Lint
|
name: Lint
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: Get source
|
- uses: actions/checkout@v3
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Run clang-format
|
- name: Run clang-format
|
||||||
uses: XRPLF/clio-gha/lint@main
|
uses: ./.github/actions/lint
|
||||||
|
|
||||||
build_clio:
|
build_clio:
|
||||||
name: Build
|
name: Build Clio
|
||||||
runs-on: [self-hosted, Linux]
|
runs-on: [self-hosted, Linux]
|
||||||
needs: lint
|
needs: lint
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
type:
|
||||||
|
- suffix: deb
|
||||||
|
image: rippleci/clio-dpkg-builder:2022-09-17
|
||||||
|
script: dpkg
|
||||||
|
- suffix: rpm
|
||||||
|
image: rippleci/clio-rpm-builder:2022-09-17
|
||||||
|
script: rpm
|
||||||
|
container:
|
||||||
|
image: ${{ matrix.type.image }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Get Clio repo
|
- uses: actions/checkout@v3
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
with:
|
||||||
path: clio_src
|
path: clio
|
||||||
ref: 'develop-next'
|
|
||||||
|
|
||||||
- name: Get Clio CI repo
|
- name: Clone Clio packaging repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
path: clio_ci
|
path: clio-packages
|
||||||
repository: 'XRPLF/clio-ci'
|
repository: XRPLF/clio-packages
|
||||||
|
|
||||||
- name: Get GitHub actions repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: XRPLF/clio-gha
|
|
||||||
path: gha # must be the same as defined in XRPLF/clio-gha
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: XRPLF/clio-gha/build@main
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export CLIO_ROOT=$(realpath clio)
|
||||||
|
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
|
||||||
|
source /opt/rh/devtoolset-11/enable
|
||||||
|
fi
|
||||||
|
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
|
||||||
|
cmake --build clio-packages/build --parallel $(nproc)
|
||||||
|
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
|
||||||
|
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
|
||||||
|
|
||||||
# - name: Artifact clio_tests
|
- name: Artifact packages
|
||||||
# uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
# with:
|
with:
|
||||||
# name: clio_output
|
name: clio_${{ matrix.type.suffix }}_packages
|
||||||
# path: clio_src/build/clio_tests
|
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
|
||||||
|
|
||||||
|
- name: Artifact clio_tests
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: clio_tests-${{ matrix.type.suffix }}
|
||||||
|
path: ${{ github.workspace }}/clio_tests
|
||||||
|
|
||||||
|
sign:
|
||||||
|
name: Sign packages
|
||||||
|
needs: build_clio
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/develop'
|
||||||
|
env:
|
||||||
|
GPG_KEY_B64: ${{ secrets.GPG_KEY_B64 }}
|
||||||
|
GPG_KEY_PASS_B64: ${{ secrets.GPG_KEY_PASS_B64 }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
type:
|
||||||
|
- suffix: deb
|
||||||
|
image: ubuntu:20.04
|
||||||
|
script: dpkg
|
||||||
|
# - suffix: rpm
|
||||||
|
# image: centos:7
|
||||||
|
# script: rpm
|
||||||
|
container:
|
||||||
|
image: ${{ matrix.type.image }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install dpkg-sig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y dpkg-sig gnupg
|
||||||
|
- name: Get package artifact
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: clio_${{ matrix.type.suffix }}_packages
|
||||||
|
|
||||||
|
- name: find packages
|
||||||
|
run: find . -name "*.${{ matrix.type.suffix }}"
|
||||||
|
|
||||||
|
- name: Sign packages
|
||||||
|
uses: ./.github/actions/sign
|
||||||
|
|
||||||
|
|
||||||
|
- name: Verify the signature
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
for PKG in $(ls *.deb); do
|
||||||
|
gpg --verify "${PKG}"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Get short SHA
|
||||||
|
id: shortsha
|
||||||
|
run: echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)"
|
||||||
|
|
||||||
|
- name: Artifact signed packages
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: signed-clio-deb-packages-${{ steps.shortsha.outputs.sha8 }}
|
||||||
|
path: ${{ github.workspace }}/*.deb
|
||||||
|
|
||||||
|
test_clio:
|
||||||
|
name: Test Clio
|
||||||
|
runs-on: [self-hosted, Linux]
|
||||||
|
needs: build_clio
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
suffix: [rpm, deb]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Get clio_tests artifact
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: clio_tests-${{ matrix.suffix }}
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
timeout-minutes: 10
|
||||||
|
uses: ./.github/actions/test
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
|||||||
|
*clio*.log
|
||||||
build/
|
build/
|
||||||
|
.vscode
|
||||||
.python-version
|
.python-version
|
||||||
|
|||||||
15
CMake/ClioVersion.cmake
Normal file
15
CMake/ClioVersion.cmake
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#[===================================================================[
|
||||||
|
read version from source
|
||||||
|
#]===================================================================]
|
||||||
|
|
||||||
|
file (STRINGS src/main/impl/Build.cpp BUILD_INFO)
|
||||||
|
foreach (line_ ${BUILD_INFO})
|
||||||
|
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||||
|
set (clio_version ${CMAKE_MATCH_1})
|
||||||
|
endif ()
|
||||||
|
endforeach ()
|
||||||
|
if (clio_version)
|
||||||
|
message (STATUS "clio version: ${clio_version}")
|
||||||
|
else ()
|
||||||
|
message (FATAL_ERROR "unable to determine clio version")
|
||||||
|
endif ()
|
||||||
24
CMake/deps/Remove-bitset-operator.patch
Normal file
24
CMake/deps/Remove-bitset-operator.patch
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
|
||||||
|
From: CJ Cobb <ccobb@ripple.com>
|
||||||
|
Date: Wed, 10 Aug 2022 12:30:01 -0400
|
||||||
|
Subject: [PATCH] Remove bitset operator !=
|
||||||
|
|
||||||
|
---
|
||||||
|
src/ripple/protocol/Feature.h | 1 -
|
||||||
|
1 file changed, 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
|
||||||
|
index b3ecb099b..6424be411 100644
|
||||||
|
--- a/src/ripple/protocol/Feature.h
|
||||||
|
+++ b/src/ripple/protocol/Feature.h
|
||||||
|
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
|
||||||
|
public:
|
||||||
|
using base::bitset;
|
||||||
|
using base::operator==;
|
||||||
|
- using base::operator!=;
|
||||||
|
|
||||||
|
using base::all;
|
||||||
|
using base::any;
|
||||||
|
--
|
||||||
|
2.32.0
|
||||||
|
|
||||||
@@ -10,7 +10,7 @@ if(NOT cassandra)
|
|||||||
ExternalProject_Add(zlib_src
|
ExternalProject_Add(zlib_src
|
||||||
PREFIX ${nih_cache_path}
|
PREFIX ${nih_cache_path}
|
||||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||||
GIT_TAG master
|
GIT_TAG v1.2.12
|
||||||
INSTALL_COMMAND ""
|
INSTALL_COMMAND ""
|
||||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||||
)
|
)
|
||||||
@@ -33,7 +33,7 @@ if(NOT cassandra)
|
|||||||
ExternalProject_Add(krb5_src
|
ExternalProject_Add(krb5_src
|
||||||
PREFIX ${nih_cache_path}
|
PREFIX ${nih_cache_path}
|
||||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||||
GIT_TAG master
|
GIT_TAG krb5-1.20
|
||||||
UPDATE_COMMAND ""
|
UPDATE_COMMAND ""
|
||||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
||||||
BUILD_IN_SOURCE 1
|
BUILD_IN_SOURCE 1
|
||||||
@@ -66,7 +66,7 @@ if(NOT cassandra)
|
|||||||
ExternalProject_Add(libuv_src
|
ExternalProject_Add(libuv_src
|
||||||
PREFIX ${nih_cache_path}
|
PREFIX ${nih_cache_path}
|
||||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||||
GIT_TAG v1.x
|
GIT_TAG v1.44.1
|
||||||
INSTALL_COMMAND ""
|
INSTALL_COMMAND ""
|
||||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||||
)
|
)
|
||||||
@@ -89,7 +89,7 @@ if(NOT cassandra)
|
|||||||
ExternalProject_Add(cassandra_src
|
ExternalProject_Add(cassandra_src
|
||||||
PREFIX ${nih_cache_path}
|
PREFIX ${nih_cache_path}
|
||||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||||
GIT_TAG master
|
GIT_TAG 2.16.2
|
||||||
CMAKE_ARGS
|
CMAKE_ARGS
|
||||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
||||||
set(RIPPLED_BRANCH "1.9.0")
|
set(RIPPLED_BRANCH "1.9.2")
|
||||||
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
||||||
|
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
|
||||||
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
||||||
FetchContent_Declare(rippled
|
FetchContent_Declare(rippled
|
||||||
GIT_REPOSITORY "${RIPPLED_REPO}"
|
GIT_REPOSITORY "${RIPPLED_REPO}"
|
||||||
GIT_TAG "${RIPPLED_BRANCH}"
|
GIT_TAG "${RIPPLED_BRANCH}"
|
||||||
GIT_SHALLOW ON
|
GIT_SHALLOW ON
|
||||||
|
PATCH_COMMAND "${patch_command}"
|
||||||
)
|
)
|
||||||
|
|
||||||
FetchContent_GetProperties(rippled)
|
FetchContent_GetProperties(rippled)
|
||||||
|
|||||||
@@ -3,8 +3,14 @@ set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
|||||||
|
|
||||||
install(TARGETS clio_server DESTINATION bin)
|
install(TARGETS clio_server DESTINATION bin)
|
||||||
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
||||||
install(FILES example-config.json DESTINATION etc RENAME config.json)
|
|
||||||
|
#install(FILES example-config.json DESTINATION etc RENAME config.json)
|
||||||
|
file(READ example-config.json config)
|
||||||
|
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||||
|
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||||
|
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||||
|
|
||||||
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||||
|
|
||||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||||
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
#define VERSION "@PROJECT_VERSION@"
|
|
||||||
@@ -1,6 +1,10 @@
|
|||||||
cmake_minimum_required(VERSION 3.16.3)
|
cmake_minimum_required(VERSION 3.16.3)
|
||||||
|
|
||||||
project(clio VERSION 0.2.0)
|
project(clio)
|
||||||
|
|
||||||
|
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||||
|
message(FATAL_ERROR "GCC 11+ required for building clio")
|
||||||
|
endif()
|
||||||
|
|
||||||
option(BUILD_TESTS "Build tests" TRUE)
|
option(BUILD_TESTS "Build tests" TRUE)
|
||||||
|
|
||||||
@@ -10,6 +14,22 @@ if(VERBOSE)
|
|||||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(NOT GIT_COMMIT_HASH)
|
||||||
|
if(VERBOSE)
|
||||||
|
message("GIT_COMMIT_HASH not provided...looking for git")
|
||||||
|
endif()
|
||||||
|
find_package(Git)
|
||||||
|
if(Git_FOUND)
|
||||||
|
execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=8
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||||
|
if(gch)
|
||||||
|
set(GIT_COMMIT_HASH "${gch}")
|
||||||
|
message(STATUS "Git commit: ${GIT_COMMIT_HASH}")
|
||||||
|
add_definitions(-DCLIO_GIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
endif() #git
|
||||||
|
|
||||||
add_library(clio)
|
add_library(clio)
|
||||||
target_compile_features(clio PUBLIC cxx_std_20)
|
target_compile_features(clio PUBLIC cxx_std_20)
|
||||||
target_include_directories(clio PUBLIC src)
|
target_include_directories(clio PUBLIC src)
|
||||||
@@ -17,23 +37,25 @@ target_include_directories(clio PUBLIC src)
|
|||||||
include(FetchContent)
|
include(FetchContent)
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
include(CMake/settings.cmake)
|
include(CMake/settings.cmake)
|
||||||
|
include(CMake/ClioVersion.cmake)
|
||||||
include(CMake/deps/rippled.cmake)
|
include(CMake/deps/rippled.cmake)
|
||||||
include(CMake/deps/Boost.cmake)
|
include(CMake/deps/Boost.cmake)
|
||||||
include(CMake/deps/cassandra.cmake)
|
include(CMake/deps/cassandra.cmake)
|
||||||
include(CMake/deps/Postgres.cmake)
|
include(CMake/deps/Postgres.cmake)
|
||||||
|
|
||||||
# configure_file(CMake/version-config.h include/version.h) # NOTE: Not used, but an idea how to handle versioning.
|
|
||||||
|
|
||||||
target_sources(clio PRIVATE
|
target_sources(clio PRIVATE
|
||||||
|
## Main
|
||||||
|
src/main/impl/Build.cpp
|
||||||
## Backend
|
## Backend
|
||||||
src/backend/BackendInterface.cpp
|
src/backend/BackendInterface.cpp
|
||||||
src/backend/CassandraBackend.cpp
|
src/backend/CassandraBackend.cpp
|
||||||
src/backend/LayeredCache.cpp
|
|
||||||
src/backend/Pg.cpp
|
src/backend/Pg.cpp
|
||||||
src/backend/PostgresBackend.cpp
|
src/backend/PostgresBackend.cpp
|
||||||
src/backend/SimpleCache.cpp
|
src/backend/SimpleCache.cpp
|
||||||
## ETL
|
## ETL
|
||||||
src/etl/ETLSource.cpp
|
src/etl/ETLSource.cpp
|
||||||
|
src/etl/ProbingETLSource.cpp
|
||||||
|
src/etl/NFTHelpers.cpp
|
||||||
src/etl/ReportingETL.cpp
|
src/etl/ReportingETL.cpp
|
||||||
## Subscriptions
|
## Subscriptions
|
||||||
src/subscriptions/SubscriptionManager.cpp
|
src/subscriptions/SubscriptionManager.cpp
|
||||||
@@ -41,6 +63,7 @@ target_sources(clio PRIVATE
|
|||||||
src/rpc/RPC.cpp
|
src/rpc/RPC.cpp
|
||||||
src/rpc/RPCHelpers.cpp
|
src/rpc/RPCHelpers.cpp
|
||||||
src/rpc/Counters.cpp
|
src/rpc/Counters.cpp
|
||||||
|
src/rpc/WorkQueue.cpp
|
||||||
## RPC Methods
|
## RPC Methods
|
||||||
# Account
|
# Account
|
||||||
src/rpc/handlers/AccountChannels.cpp
|
src/rpc/handlers/AccountChannels.cpp
|
||||||
@@ -51,6 +74,8 @@ target_sources(clio PRIVATE
|
|||||||
src/rpc/handlers/AccountObjects.cpp
|
src/rpc/handlers/AccountObjects.cpp
|
||||||
src/rpc/handlers/GatewayBalances.cpp
|
src/rpc/handlers/GatewayBalances.cpp
|
||||||
src/rpc/handlers/NoRippleCheck.cpp
|
src/rpc/handlers/NoRippleCheck.cpp
|
||||||
|
# NFT
|
||||||
|
src/rpc/handlers/NFTInfo.cpp
|
||||||
# Ledger
|
# Ledger
|
||||||
src/rpc/handlers/Ledger.cpp
|
src/rpc/handlers/Ledger.cpp
|
||||||
src/rpc/handlers/LedgerData.cpp
|
src/rpc/handlers/LedgerData.cpp
|
||||||
@@ -61,7 +86,10 @@ target_sources(clio PRIVATE
|
|||||||
src/rpc/handlers/TransactionEntry.cpp
|
src/rpc/handlers/TransactionEntry.cpp
|
||||||
src/rpc/handlers/AccountTx.cpp
|
src/rpc/handlers/AccountTx.cpp
|
||||||
# Dex
|
# Dex
|
||||||
|
src/rpc/handlers/BookChanges.cpp
|
||||||
src/rpc/handlers/BookOffers.cpp
|
src/rpc/handlers/BookOffers.cpp
|
||||||
|
# NFT
|
||||||
|
src/rpc/handlers/NFTOffers.cpp
|
||||||
# Payment Channel
|
# Payment Channel
|
||||||
src/rpc/handlers/ChannelAuthorize.cpp
|
src/rpc/handlers/ChannelAuthorize.cpp
|
||||||
src/rpc/handlers/ChannelVerify.cpp
|
src/rpc/handlers/ChannelVerify.cpp
|
||||||
@@ -72,7 +100,7 @@ target_sources(clio PRIVATE
|
|||||||
# Utility
|
# Utility
|
||||||
src/rpc/handlers/Random.cpp)
|
src/rpc/handlers/Random.cpp)
|
||||||
|
|
||||||
add_executable(clio_server src/main.cpp)
|
add_executable(clio_server src/main/main.cpp)
|
||||||
target_link_libraries(clio_server PUBLIC clio)
|
target_link_libraries(clio_server PUBLIC clio)
|
||||||
|
|
||||||
if(BUILD_TESTS)
|
if(BUILD_TESTS)
|
||||||
|
|||||||
123
CONTRIBUTING.md
Normal file
123
CONTRIBUTING.md
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
# Contributing
|
||||||
|
Thank you for your interest in contributing to the `clio` project 🙏
|
||||||
|
|
||||||
|
To contribute, please:
|
||||||
|
1. Fork the repository under your own user.
|
||||||
|
2. Create a new branch on which to write your changes.
|
||||||
|
3. Write and test your code.
|
||||||
|
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||||
|
5. Where applicable, write test cases for your code and include those in `unittests`.
|
||||||
|
6. Ensure your code passes automated checks (e.g. clang-format)
|
||||||
|
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.). See below for more details.
|
||||||
|
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||||
|
|
||||||
|
> **Note:** Please make sure you read the [Style guide](#style-guide).
|
||||||
|
|
||||||
|
## Git commands
|
||||||
|
This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
|
||||||
|
Please note that there are more than one way to do this and these commands are only provided for your convenience.
|
||||||
|
At this point it's assumed that you have already finished working on your feature/bug.
|
||||||
|
|
||||||
|
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up to date with the main `clio` repository.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# Create a backup of your branch
|
||||||
|
git branch <your feature branch>_bk
|
||||||
|
|
||||||
|
# Rebase and squash commits into one
|
||||||
|
git checkout develop
|
||||||
|
git pull origin develop
|
||||||
|
git checkout <your feature branch>
|
||||||
|
git rebase -i develop
|
||||||
|
```
|
||||||
|
For each commit in the list other than the first one please select `s` to squash.
|
||||||
|
After this is done you will have the opportunity to write a message for the squashed commit.
|
||||||
|
|
||||||
|
> **Hint:** Please use **imperative mood** commit message capitalizing the first word of the subject.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# You should now have a single commit on top of a commit in `develop`
|
||||||
|
git log
|
||||||
|
```
|
||||||
|
> **Todo:** In case there are merge conflicts, please resolve them now
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# Use the same commit message as you did above
|
||||||
|
git commit -m 'Your message'
|
||||||
|
git rebase --continue
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Important:** If you have no GPG keys setup please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# Sign the commit with your GPG key and finally push your changes to the repo
|
||||||
|
git commit --amend -S
|
||||||
|
git push --force
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fixing issues found during code review
|
||||||
|
While your code is in review it's possible that some changes will be requested by the reviewer.
|
||||||
|
This section describes the process of adding your fixes.
|
||||||
|
|
||||||
|
We assume that you already made the required changes on your feature branch.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# Add the changed code
|
||||||
|
git add <paths to add>
|
||||||
|
|
||||||
|
# Add a folded commit message (so you can squash them later)
|
||||||
|
# while also signing it with your GPG key
|
||||||
|
git commit -S -m "[FOLD] Your commit message"
|
||||||
|
|
||||||
|
# And finally push your changes
|
||||||
|
git push
|
||||||
|
```
|
||||||
|
## After code review
|
||||||
|
Last but not least, when your PR is approved you still have to `Squash and merge` your code.
|
||||||
|
Luckily there is a button for that towards the bottom of the PR's page on github.
|
||||||
|
|
||||||
|
> **Important:** Please leave the automatically generated link to PR in the subject line **and** in the description field please add `"Fixes #ISSUE_ID"` (replacing `ISSUE_ID` with yours).
|
||||||
|
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
|
||||||
|
|
||||||
|
# Style guide
|
||||||
|
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments.
|
||||||
|
|
||||||
|
## Formatting
|
||||||
|
All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||||
|
To change your code to conform use `clang-format -i <your changed files>`.
|
||||||
|
|
||||||
|
## Avoid
|
||||||
|
* Proliferation of nearly identical code.
|
||||||
|
* Proliferation of new files and classes unless it improves readability or/and compilation time.
|
||||||
|
* Unmanaged memory allocation and raw pointers.
|
||||||
|
* Macros (unless they add significant value.)
|
||||||
|
* Lambda patterns (unless these add significant value.)
|
||||||
|
* CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
|
||||||
|
* Importing new libraries unless there is a very good reason to do so.
|
||||||
|
|
||||||
|
## Seek to
|
||||||
|
* Extend functionality of existing code rather than creating new code.
|
||||||
|
* Prefer readability over terseness where important logic is concerned.
|
||||||
|
* Inline functions that are not used or are not likely to be used elsewhere in the codebase.
|
||||||
|
* Use clear and self-explanatory names for functions, variables, structs and classes.
|
||||||
|
* Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
|
||||||
|
* Provide as many comments as you feel that a competent programmer would need to understand what your code does.
|
||||||
|
|
||||||
|
# Maintainers
|
||||||
|
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
|
||||||
|
|
||||||
|
## Code Review
|
||||||
|
PRs must be reviewed by at least one of the maintainers.
|
||||||
|
|
||||||
|
## Adding and Removing
|
||||||
|
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
|
||||||
|
|
||||||
|
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
|
||||||
|
|
||||||
|
## Existing Maintainers
|
||||||
|
|
||||||
|
* [cjcobb23](https://github.com/cjcobb23) (Ripple)
|
||||||
|
* [natenichols](https://github.com/natenichols) (Ripple)
|
||||||
|
* [legleux](https://github.com/legleux) (Ripple)
|
||||||
|
* [undertome](https://github.com/undertome) (Ripple)
|
||||||
|
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||||
77
README.md
77
README.md
@@ -1,9 +1,6 @@
|
|||||||
**Status:** This software is in beta mode. We encourage anyone to try it out and
|
|
||||||
report any issues they discover. Version 1.0 coming soon.
|
|
||||||
|
|
||||||
# Clio
|
# Clio
|
||||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
|
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
|
||||||
historical ledger and transaction data is stored in a more space efficient format,
|
historical ledger and transaction data are stored in a more space-efficient format,
|
||||||
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
||||||
allowing for scalable read throughput. Multiple Clio nodes can share
|
allowing for scalable read throughput. Multiple Clio nodes can share
|
||||||
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||||
@@ -12,9 +9,9 @@ without the need for redundant data storage or computation.
|
|||||||
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
||||||
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
||||||
Other non-validated data is also not returned, such as information about queued transactions.
|
Other non-validated data is also not returned, such as information about queued transactions.
|
||||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||||
|
|
||||||
Clio does not connect to the peer to peer network. Instead, Clio extracts data from a specified rippled node. Running Clio requires access to a rippled node
|
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||||
|
|
||||||
|
|
||||||
@@ -25,11 +22,11 @@ from which data can be extracted. The rippled node does not need to be running o
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
Clio is built with cmake. Clio requires c++20, and boost 1.75.0 or later.
|
Clio is built with CMake. Clio requires at least GCC-11 (C++20), and Boost 1.75.0 or later.
|
||||||
|
|
||||||
Use these instructions to build a Clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
|
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||||
|
|
||||||
```
|
```sh
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
|
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
|
||||||
|
|
||||||
@@ -49,28 +46,30 @@ Use these instructions to build a Clio executable from source. These instruction
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Running
|
## Running
|
||||||
`./clio_server config.json`
|
```sh
|
||||||
|
./clio_server config.json
|
||||||
|
```
|
||||||
|
|
||||||
Clio needs access to a rippled server. The config files of rippled and Clio need
|
Clio needs access to a rippled server. The config files of rippled and Clio need
|
||||||
to match in a certain sense.
|
to match in a certain sense.
|
||||||
Clio needs to know:
|
Clio needs to know:
|
||||||
- the ip of rippled
|
- the IP of rippled
|
||||||
- the port on which rippled is accepting unencrypted websocket connections
|
- the port on which rippled is accepting unencrypted WebSocket connections
|
||||||
- the port on which rippled is handling gRPC requests
|
- the port on which rippled is handling gRPC requests
|
||||||
|
|
||||||
rippled needs to open:
|
rippled needs to open:
|
||||||
- a port to accept unencrypted websocket connections
|
- a port to accept unencrypted websocket connections
|
||||||
- a port to handle gRPC requests, with the ip(s) of Clio specified in the `secure_gateway` entry
|
- a port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry
|
||||||
|
|
||||||
The example configs of rippled and Clio are setup such that minimal changes are
|
The example configs of rippled and Clio are setups such that minimal changes are
|
||||||
required. When running locally, the only change needed is to uncomment the `port_grpc`
|
required. When running locally, the only change needed is to uncomment the `port_grpc`
|
||||||
section of the rippled config. When running Clio and rippled on separate machines,
|
section of the rippled config. When running Clio and rippled on separate machines,
|
||||||
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
|
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
|
||||||
1. change the `ip` of the first entry of `etl_sources` to the ip where your rippled
|
1. change the `ip` of the first entry of `etl_sources` to the IP where your rippled
|
||||||
server is running
|
server is running
|
||||||
2. open a public, unencrypted websocket port on your rippled server
|
2. open a public, unencrypted WebSocket port on your rippled server
|
||||||
3. change the ip specified in `secure_gateway` of `port_grpc` section of the rippled config
|
3. change the IP specified in `secure_gateway` of `port_grpc` section of the rippled config
|
||||||
to the ip of your Clio server. This entry can take the form of a comma separated list if
|
to the IP of your Clio server. This entry can take the form of a comma-separated list if
|
||||||
you are running multiple Clio nodes.
|
you are running multiple Clio nodes.
|
||||||
|
|
||||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||||
@@ -84,7 +83,7 @@ the most recent ledger on the network, and then backfill. If Clio is extracting
|
|||||||
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
|
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
|
||||||
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
|
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
|
||||||
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
|
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
|
||||||
is dependent on the online_delete and ledger_history config values; if these values
|
are dependent on the online_delete and ledger_history config values; if these values
|
||||||
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
|
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
|
||||||
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
|
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
|
||||||
proportional to the amount of time that you expect rippled to be offline. For example, if you
|
proportional to the amount of time that you expect rippled to be offline. For example, if you
|
||||||
@@ -106,7 +105,7 @@ This can take some time, and depends on database throughput. With a moderately f
|
|||||||
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
|
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
|
||||||
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
|
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
|
||||||
is fully downloaded, Clio only needs to extract the changed data for each ledger,
|
is fully downloaded, Clio only needs to extract the changed data for each ledger,
|
||||||
so extraction is much faster and Clio can keep up with rippled in real time. Even under
|
so extraction is much faster and Clio can keep up with rippled in real-time. Even under
|
||||||
intense load, Clio should not lag behind the network, as Clio is not processing the data,
|
intense load, Clio should not lag behind the network, as Clio is not processing the data,
|
||||||
and is simply writing to a database. The throughput of Clio is dependent on the throughput
|
and is simply writing to a database. The throughput of Clio is dependent on the throughput
|
||||||
of your database, but a standard Cassandra or Scylla deployment can handle
|
of your database, but a standard Cassandra or Scylla deployment can handle
|
||||||
@@ -140,3 +139,39 @@ are doing this, be aware that database traffic will be flowing across regions,
|
|||||||
which can cause high latencies. A possible alternative to this is to just deploy
|
which can cause high latencies. A possible alternative to this is to just deploy
|
||||||
a database in each region, and the Clio nodes in each region use their region's database.
|
a database in each region, and the Clio nodes in each region use their region's database.
|
||||||
This is effectively two systems.
|
This is effectively two systems.
|
||||||
|
|
||||||
|
## Developing against `rippled` in standalone mode
|
||||||
|
|
||||||
|
If you wish you develop against a `rippled` instance running in standalone
|
||||||
|
mode there are a few quirks of both clio and rippled you need to keep in mind.
|
||||||
|
You must:
|
||||||
|
|
||||||
|
1. Advance the `rippled` ledger to at least ledger 256
|
||||||
|
2. Wait 10 minutes before first starting clio against this standalone node.
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
Clio provides several logging options, all are configurable via the config file and are detailed below.
|
||||||
|
|
||||||
|
`log_level`: The minimum level of severity at which the log message will be outputted.
|
||||||
|
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
|
||||||
|
|
||||||
|
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`. Defaults to true.
|
||||||
|
|
||||||
|
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it. If not specified, logs are not written to a file.
|
||||||
|
|
||||||
|
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
|
||||||
|
|
||||||
|
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
|
||||||
|
deleted to free up space. Defaults to 50GB.
|
||||||
|
|
||||||
|
`log_rotation_hour_interval`: The time interval in **hours** after the last log rotation to automatically
|
||||||
|
rotate the current log file. Defaults to 12 hours.
|
||||||
|
|
||||||
|
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
|
||||||
|
size-based log rotation occurs, the timer for the time-based rotation will reset.
|
||||||
|
|
||||||
|
## Cassandra / Scylla Administration
|
||||||
|
|
||||||
|
Since Clio relies on either Cassandra or Scylla for its database backend, here are some important considerations:
|
||||||
|
|
||||||
|
- Scylla, by default, will reserve all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument: https://docs.scylladb.com/getting-started/scylla-in-a-shared-environment/
|
||||||
|
|||||||
49
docker/centos/Dockerfile
Normal file
49
docker/centos/Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# FROM centos:7 as deps
|
||||||
|
FROM centos:7 as build
|
||||||
|
|
||||||
|
ENV CLIO_DIR=/opt/clio/
|
||||||
|
# ENV OPENSSL_DIR=/opt/openssl
|
||||||
|
|
||||||
|
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||||
|
RUN yum install -y devtoolset-11
|
||||||
|
ENV version=3.16
|
||||||
|
ENV build=3
|
||||||
|
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||||
|
COPY docker/shared/install_cmake.sh /install_cmake.sh
|
||||||
|
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||||
|
RUN source /opt/rh/devtoolset-11/enable
|
||||||
|
WORKDIR /tmp
|
||||||
|
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||||
|
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
|
||||||
|
|
||||||
|
RUN ./build_git_centos7.sh
|
||||||
|
RUN git clone https://github.com/openssl/openssl
|
||||||
|
WORKDIR /tmp/openssl
|
||||||
|
RUN git checkout OpenSSL_1_1_1q
|
||||||
|
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||||
|
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make install_sw
|
||||||
|
WORKDIR /tmp
|
||||||
|
# FROM centos:7 as build
|
||||||
|
|
||||||
|
RUN git clone https://github.com/xrplf/clio.git
|
||||||
|
COPY docker/shared/build_boost.sh build_boost.sh
|
||||||
|
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||||
|
ENV BOOST_ROOT=/boost
|
||||||
|
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||||
|
RUN yum install -y bison flex
|
||||||
|
RUN yum install -y rpmdevtools rpmlint
|
||||||
|
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
|
||||||
|
cmake -B build -DBUILD_TESTS=1 && \
|
||||||
|
cmake --build build --parallel $(nproc)
|
||||||
|
RUN mkdir output
|
||||||
|
RUN strip clio/build/clio_server && strip clio/build/clio_tests
|
||||||
|
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
|
||||||
|
RUN cp clio/example-config.json output/example-config.json
|
||||||
|
|
||||||
|
FROM centos:7
|
||||||
|
COPY --from=build /tmp/output /clio
|
||||||
|
RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json
|
||||||
|
|
||||||
|
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]
|
||||||
18
docker/centos/build_git_centos7.sh
Executable file
18
docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
GIT_VERSION="2.37.1"
|
||||||
|
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||||
|
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||||
|
cd git-${GIT_VERSION}
|
||||||
|
|
||||||
|
yum install -y centos-release-scl epel-release
|
||||||
|
yum update -y
|
||||||
|
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||||
|
|
||||||
|
source /opt/rh/devtoolset-11/enable
|
||||||
|
make configure
|
||||||
|
./configure
|
||||||
|
make git -j$(nproc)
|
||||||
|
make install git
|
||||||
|
git --version | cut -d ' ' -f3
|
||||||
11
docker/centos/install_cmake.sh
Executable file
11
docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
CMAKE_VERSION=${1:-"3.16.3"}
|
||||||
|
cd /tmp
|
||||||
|
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||||
|
curl -OJLs $URL
|
||||||
|
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||||
|
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||||
|
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||||
13
docker/clio_docker/centos/build_boost.sh
Executable file
13
docker/clio_docker/centos/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||||
|
# it's either those or link=static that halves the failures. probably link=static
|
||||||
|
BOOST_VERSION=$1
|
||||||
|
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||||
|
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||||
|
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||||
|
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||||
|
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||||
|
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||||
|
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||||
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
GIT_VERSION="2.37.1"
|
||||||
|
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||||
|
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||||
|
cd git-${GIT_VERSION}
|
||||||
|
|
||||||
|
yum install -y centos-release-scl epel-release
|
||||||
|
yum update -y
|
||||||
|
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||||
|
|
||||||
|
source /opt/rh/devtoolset-11/enable
|
||||||
|
make configure
|
||||||
|
./configure
|
||||||
|
make git -j$(nproc)
|
||||||
|
make install git
|
||||||
|
git --version | cut -d ' ' -f3
|
||||||
34
docker/clio_docker/centos/dockerfile
Normal file
34
docker/clio_docker/centos/dockerfile
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
FROM centos:7
|
||||||
|
|
||||||
|
ENV CLIO_DIR=/opt/clio/
|
||||||
|
# ENV OPENSSL_DIR=/opt/openssl
|
||||||
|
|
||||||
|
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||||
|
RUN yum install -y devtoolset-11
|
||||||
|
ENV version=3.16
|
||||||
|
ENV build=3
|
||||||
|
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||||
|
COPY install_cmake.sh /install_cmake.sh
|
||||||
|
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||||
|
RUN source /opt/rh/devtoolset-11/enable
|
||||||
|
WORKDIR /tmp
|
||||||
|
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||||
|
COPY build_git_centos7.sh build_git_centos7.sh
|
||||||
|
|
||||||
|
RUN ./build_git_centos7.sh
|
||||||
|
RUN git clone https://github.com/openssl/openssl
|
||||||
|
WORKDIR /tmp/openssl
|
||||||
|
RUN git checkout OpenSSL_1_1_1q
|
||||||
|
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||||
|
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make install_sw
|
||||||
|
WORKDIR /tmp
|
||||||
|
RUN git clone https://github.com/xrplf/clio.git
|
||||||
|
COPY build_boost.sh build_boost.sh
|
||||||
|
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||||
|
ENV BOOST_ROOT=/boost
|
||||||
|
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||||
|
RUN yum install -y bison flex
|
||||||
|
RUN source /opt/rh/devtoolset-11/enable && \
|
||||||
|
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)
|
||||||
11
docker/clio_docker/centos/install_cmake.sh
Executable file
11
docker/clio_docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
CMAKE_VERSION=${1:-"3.16.3"}
|
||||||
|
cd /tmp
|
||||||
|
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||||
|
curl -OJLs $URL
|
||||||
|
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||||
|
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||||
|
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||||
13
docker/shared/build_boost.sh
Executable file
13
docker/shared/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||||
|
# it's either those or link=static that halves the failures. probably link=static
|
||||||
|
BOOST_VERSION=$1
|
||||||
|
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||||
|
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||||
|
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||||
|
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||||
|
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||||
|
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||||
|
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||||
11
docker/shared/install_cmake.sh
Executable file
11
docker/shared/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
CMAKE_VERSION=${1:-"3.16.3"}
|
||||||
|
cd /tmp
|
||||||
|
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||||
|
curl -OJLs $URL
|
||||||
|
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||||
|
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||||
|
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||||
3
docker/shared/install_openssl.sh
Executable file
3
docker/shared/install_openssl.sh
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
24
docker/ubuntu/Dockerfile
Normal file
24
docker/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
FROM ubuntu:20.04 AS boost
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y build-essential
|
||||||
|
ARG BOOST_VERSION_=1_75_0
|
||||||
|
ARG BOOST_VERSION=1.75.0
|
||||||
|
COPY docker/shared/build_boost.sh .
|
||||||
|
RUN apt install -y curl
|
||||||
|
RUN ./build_boost.sh ${BOOST_VERSION}
|
||||||
|
ENV BOOST_ROOT=/boost
|
||||||
|
|
||||||
|
FROM ubuntu:20.04 AS build
|
||||||
|
ENV BOOST_ROOT=/boost
|
||||||
|
COPY --from=boost /boost /boost
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
|
||||||
|
RUN apt install -y gpg-agent
|
||||||
|
RUN wget https://apt.llvm.org/llvm.sh
|
||||||
|
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
|
||||||
|
# COPY . /clio
|
||||||
|
## Install cmake
|
||||||
|
ARG CMAKE_VERSION=3.16.3
|
||||||
|
COPY docker/shared/install_cmake.sh .
|
||||||
|
RUN ./install_cmake.sh ${CMAKE_VERSION}
|
||||||
|
ENV PATH="/opt/local/cmake/bin:$PATH"
|
||||||
@@ -30,8 +30,11 @@
|
|||||||
"port":51233
|
"port":51233
|
||||||
},
|
},
|
||||||
"log_level":"debug",
|
"log_level":"debug",
|
||||||
"log_file":"./clio.log",
|
"log_to_console": true,
|
||||||
"online_delete":0,
|
"log_directory":"./clio_log",
|
||||||
|
"log_rotation_size": 2048,
|
||||||
|
"log_directory_max_size": 51200,
|
||||||
|
"log_rotation_hour_interval": 12,
|
||||||
"extractor_threads":8,
|
"extractor_threads":8,
|
||||||
"read_only":false
|
"read_only":false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ BackendInterface::writeLedgerObject(
|
|||||||
std::string&& blob)
|
std::string&& blob)
|
||||||
{
|
{
|
||||||
assert(key.size() == sizeof(ripple::uint256));
|
assert(key.size() == sizeof(ripple::uint256));
|
||||||
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
|
|
||||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +258,8 @@ BackendInterface::fetchLedgerPage(
|
|||||||
ripple::uint256 const& curCursor = keys.size() ? keys.back()
|
ripple::uint256 const& curCursor = keys.size() ? keys.back()
|
||||||
: cursor ? *cursor
|
: cursor ? *cursor
|
||||||
: firstKey;
|
: firstKey;
|
||||||
uint32_t seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
std::uint32_t const seq =
|
||||||
|
outOfOrder ? range->maxSequence : ledgerSequence;
|
||||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||||
if (!succ)
|
if (!succ)
|
||||||
reachedEnd = true;
|
reachedEnd = true;
|
||||||
@@ -275,7 +275,8 @@ BackendInterface::fetchLedgerPage(
|
|||||||
else if (!outOfOrder)
|
else if (!outOfOrder)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(error)
|
BOOST_LOG_TRIVIAL(error)
|
||||||
<< __func__ << " incorrect successor table. key = "
|
<< __func__
|
||||||
|
<< " deleted or non-existent object in successor table. key = "
|
||||||
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
|
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
|
||||||
std::stringstream msg;
|
std::stringstream msg;
|
||||||
for (size_t j = 0; j < objects.size(); ++j)
|
for (size_t j = 0; j < objects.size(); ++j)
|
||||||
@@ -283,7 +284,6 @@ BackendInterface::fetchLedgerPage(
|
|||||||
msg << " - " << ripple::strHex(keys[j]);
|
msg << " - " << ripple::strHex(keys[j]);
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
|
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
|
||||||
assert(false);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (keys.size() && !reachedEnd)
|
if (keys.size() && !reachedEnd)
|
||||||
|
|||||||
@@ -162,12 +162,12 @@ public:
|
|||||||
std::vector<ripple::uint256> const& hashes,
|
std::vector<ripple::uint256> const& hashes,
|
||||||
boost::asio::yield_context& yield) const = 0;
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
|
||||||
virtual AccountTransactions
|
virtual TransactionsAndCursor
|
||||||
fetchAccountTransactions(
|
fetchAccountTransactions(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::uint32_t const limit,
|
std::uint32_t const limit,
|
||||||
bool forward,
|
bool forward,
|
||||||
std::optional<AccountTransactionsCursor> const& cursor,
|
std::optional<TransactionsCursor> const& cursor,
|
||||||
boost::asio::yield_context& yield) const = 0;
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
|
||||||
virtual std::vector<TransactionAndMetadata>
|
virtual std::vector<TransactionAndMetadata>
|
||||||
@@ -180,6 +180,21 @@ public:
|
|||||||
std::uint32_t const ledgerSequence,
|
std::uint32_t const ledgerSequence,
|
||||||
boost::asio::yield_context& yield) const = 0;
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
|
||||||
|
// *** NFT methods
|
||||||
|
virtual std::optional<NFT>
|
||||||
|
fetchNFT(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const ledgerSequence,
|
||||||
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
|
||||||
|
virtual TransactionsAndCursor
|
||||||
|
fetchNFTTransactions(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const limit,
|
||||||
|
bool const forward,
|
||||||
|
std::optional<TransactionsCursor> const& cursorIn,
|
||||||
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
|
||||||
// *** state data methods
|
// *** state data methods
|
||||||
std::optional<Blob>
|
std::optional<Blob>
|
||||||
fetchLedgerObject(
|
fetchLedgerObject(
|
||||||
@@ -285,9 +300,15 @@ public:
|
|||||||
std::string&& transaction,
|
std::string&& transaction,
|
||||||
std::string&& metadata) = 0;
|
std::string&& metadata) = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||||
|
|
||||||
virtual void
|
virtual void
|
||||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||||
|
|
||||||
virtual void
|
virtual void
|
||||||
writeSuccessor(
|
writeSuccessor(
|
||||||
std::string&& key,
|
std::string&& key,
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
|
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||||
#include <backend/CassandraBackend.h>
|
#include <backend/CassandraBackend.h>
|
||||||
#include <backend/DBHelpers.h>
|
#include <backend/DBHelpers.h>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
namespace Backend {
|
namespace Backend {
|
||||||
|
|
||||||
// Type alias for async completion handlers
|
// Type alias for async completion handlers
|
||||||
@@ -178,7 +180,7 @@ CassandraBackend::doWriteLedgerObject(
|
|||||||
if (range)
|
if (range)
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(seq, key)),
|
std::make_tuple(seq, key),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
auto& [sequence, key] = params.data;
|
auto& [sequence, key] = params.data;
|
||||||
|
|
||||||
@@ -190,7 +192,7 @@ CassandraBackend::doWriteLedgerObject(
|
|||||||
"ledger_diff");
|
"ledger_diff");
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(std::move(key), seq, std::move(blob))),
|
std::make_tuple(std::move(key), seq, std::move(blob)),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
auto& [key, sequence, blob] = params.data;
|
auto& [key, sequence, blob] = params.data;
|
||||||
|
|
||||||
@@ -215,7 +217,7 @@ CassandraBackend::writeSuccessor(
|
|||||||
assert(successor.size() != 0);
|
assert(successor.size() != 0);
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(std::move(key), seq, std::move(successor))),
|
std::make_tuple(std::move(key), seq, std::move(successor)),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
auto& [key, sequence, successor] = params.data;
|
auto& [key, sequence, successor] = params.data;
|
||||||
|
|
||||||
@@ -234,7 +236,7 @@ CassandraBackend::writeLedger(
|
|||||||
{
|
{
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(ledgerInfo.seq, std::move(header))),
|
std::make_tuple(ledgerInfo.seq, std::move(header)),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
auto& [sequence, header] = params.data;
|
auto& [sequence, header] = params.data;
|
||||||
CassandraStatement statement{insertLedgerHeader_};
|
CassandraStatement statement{insertLedgerHeader_};
|
||||||
@@ -245,7 +247,7 @@ CassandraBackend::writeLedger(
|
|||||||
"ledger");
|
"ledger");
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(ledgerInfo.hash, ledgerInfo.seq)),
|
std::make_tuple(ledgerInfo.hash, ledgerInfo.seq),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
auto& [hash, sequence] = params.data;
|
auto& [hash, sequence] = params.data;
|
||||||
CassandraStatement statement{insertLedgerHash_};
|
CassandraStatement statement{insertLedgerHash_};
|
||||||
@@ -256,6 +258,7 @@ CassandraBackend::writeLedger(
|
|||||||
"ledger_hash");
|
"ledger_hash");
|
||||||
ledgerSequence_ = ledgerInfo.seq;
|
ledgerSequence_ = ledgerInfo.seq;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CassandraBackend::writeAccountTransactions(
|
CassandraBackend::writeAccountTransactions(
|
||||||
std::vector<AccountTransactionsData>&& data)
|
std::vector<AccountTransactionsData>&& data)
|
||||||
@@ -266,11 +269,11 @@ CassandraBackend::writeAccountTransactions(
|
|||||||
{
|
{
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(
|
std::make_tuple(
|
||||||
std::move(account),
|
std::move(account),
|
||||||
record.ledgerSequence,
|
record.ledgerSequence,
|
||||||
record.transactionIndex,
|
record.transactionIndex,
|
||||||
record.txHash)),
|
record.txHash),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
CassandraStatement statement(insertAccountTx_);
|
CassandraStatement statement(insertAccountTx_);
|
||||||
auto& [account, lgrSeq, txnIdx, hash] = params.data;
|
auto& [account, lgrSeq, txnIdx, hash] = params.data;
|
||||||
@@ -283,6 +286,31 @@ CassandraBackend::writeAccountTransactions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CassandraBackend::writeNFTTransactions(std::vector<NFTTransactionsData>&& data)
|
||||||
|
{
|
||||||
|
for (NFTTransactionsData const& record : data)
|
||||||
|
{
|
||||||
|
makeAndExecuteAsyncWrite(
|
||||||
|
this,
|
||||||
|
std::make_tuple(
|
||||||
|
record.tokenID,
|
||||||
|
record.ledgerSequence,
|
||||||
|
record.transactionIndex,
|
||||||
|
record.txHash),
|
||||||
|
[this](auto const& params) {
|
||||||
|
CassandraStatement statement(insertNFTTx_);
|
||||||
|
auto const& [tokenID, lgrSeq, txnIdx, txHash] = params.data;
|
||||||
|
statement.bindNextBytes(tokenID);
|
||||||
|
statement.bindNextIntTuple(lgrSeq, txnIdx);
|
||||||
|
statement.bindNextBytes(txHash);
|
||||||
|
return statement;
|
||||||
|
},
|
||||||
|
"nf_token_transactions");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CassandraBackend::writeTransaction(
|
CassandraBackend::writeTransaction(
|
||||||
std::string&& hash,
|
std::string&& hash,
|
||||||
@@ -296,7 +324,7 @@ CassandraBackend::writeTransaction(
|
|||||||
|
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_pair(seq, hash)),
|
std::make_pair(seq, hash),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
CassandraStatement statement{insertLedgerTransaction_};
|
CassandraStatement statement{insertLedgerTransaction_};
|
||||||
statement.bindNextInt(params.data.first);
|
statement.bindNextInt(params.data.first);
|
||||||
@@ -306,12 +334,12 @@ CassandraBackend::writeTransaction(
|
|||||||
"ledger_transaction");
|
"ledger_transaction");
|
||||||
makeAndExecuteAsyncWrite(
|
makeAndExecuteAsyncWrite(
|
||||||
this,
|
this,
|
||||||
std::move(std::make_tuple(
|
std::make_tuple(
|
||||||
std::move(hash),
|
std::move(hash),
|
||||||
seq,
|
seq,
|
||||||
date,
|
date,
|
||||||
std::move(transaction),
|
std::move(transaction),
|
||||||
std::move(metadata))),
|
std::move(metadata)),
|
||||||
[this](auto& params) {
|
[this](auto& params) {
|
||||||
CassandraStatement statement{insertTransaction_};
|
CassandraStatement statement{insertTransaction_};
|
||||||
auto& [hash, sequence, date, transaction, metadata] = params.data;
|
auto& [hash, sequence, date, transaction, metadata] = params.data;
|
||||||
@@ -325,6 +353,43 @@ CassandraBackend::writeTransaction(
|
|||||||
"transaction");
|
"transaction");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CassandraBackend::writeNFTs(std::vector<NFTsData>&& data)
|
||||||
|
{
|
||||||
|
for (NFTsData const& record : data)
|
||||||
|
{
|
||||||
|
makeAndExecuteAsyncWrite(
|
||||||
|
this,
|
||||||
|
std::make_tuple(
|
||||||
|
record.tokenID,
|
||||||
|
record.ledgerSequence,
|
||||||
|
record.owner,
|
||||||
|
record.isBurned),
|
||||||
|
[this](auto const& params) {
|
||||||
|
CassandraStatement statement{insertNFT_};
|
||||||
|
auto const& [tokenID, lgrSeq, owner, isBurned] = params.data;
|
||||||
|
statement.bindNextBytes(tokenID);
|
||||||
|
statement.bindNextInt(lgrSeq);
|
||||||
|
statement.bindNextBytes(owner);
|
||||||
|
statement.bindNextBoolean(isBurned);
|
||||||
|
return statement;
|
||||||
|
},
|
||||||
|
"nf_tokens");
|
||||||
|
|
||||||
|
makeAndExecuteAsyncWrite(
|
||||||
|
this,
|
||||||
|
std::make_tuple(record.tokenID),
|
||||||
|
[this](auto const& params) {
|
||||||
|
CassandraStatement statement{insertIssuerNFT_};
|
||||||
|
auto const& [tokenID] = params.data;
|
||||||
|
statement.bindNextBytes(ripple::nft::getIssuer(tokenID));
|
||||||
|
statement.bindNextBytes(tokenID);
|
||||||
|
return statement;
|
||||||
|
},
|
||||||
|
"issuer_nf_tokens");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<LedgerRange>
|
std::optional<LedgerRange>
|
||||||
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
|
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
|
||||||
{
|
{
|
||||||
@@ -502,21 +567,119 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
|
|||||||
return hashes;
|
return hashes;
|
||||||
}
|
}
|
||||||
|
|
||||||
AccountTransactions
|
std::optional<NFT>
|
||||||
|
CassandraBackend::fetchNFT(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const ledgerSequence,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
CassandraStatement statement{selectNFT_};
|
||||||
|
statement.bindNextBytes(tokenID);
|
||||||
|
statement.bindNextInt(ledgerSequence);
|
||||||
|
CassandraResult response = executeAsyncRead(statement, yield);
|
||||||
|
if (!response)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
NFT result;
|
||||||
|
result.tokenID = tokenID;
|
||||||
|
result.ledgerSequence = response.getUInt32();
|
||||||
|
result.owner = response.getBytes();
|
||||||
|
result.isBurned = response.getBool();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
TransactionsAndCursor
|
||||||
|
CassandraBackend::fetchNFTTransactions(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const limit,
|
||||||
|
bool const forward,
|
||||||
|
std::optional<TransactionsCursor> const& cursorIn,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
auto cursor = cursorIn;
|
||||||
|
auto rng = fetchLedgerRange();
|
||||||
|
if (!rng)
|
||||||
|
return {{}, {}};
|
||||||
|
|
||||||
|
CassandraStatement statement = forward
|
||||||
|
? CassandraStatement(selectNFTTxForward_)
|
||||||
|
: CassandraStatement(selectNFTTx_);
|
||||||
|
|
||||||
|
statement.bindNextBytes(tokenID);
|
||||||
|
|
||||||
|
if (cursor)
|
||||||
|
{
|
||||||
|
statement.bindNextIntTuple(
|
||||||
|
cursor->ledgerSequence, cursor->transactionIndex);
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << " token_id = " << ripple::strHex(tokenID)
|
||||||
|
<< " tuple = " << cursor->ledgerSequence
|
||||||
|
<< " : " << cursor->transactionIndex;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||||
|
int const placeHolder =
|
||||||
|
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||||
|
|
||||||
|
statement.bindNextIntTuple(placeHolder, placeHolder);
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< " token_id = " << ripple::strHex(tokenID) << " idx = " << seq
|
||||||
|
<< " tuple = " << placeHolder;
|
||||||
|
}
|
||||||
|
|
||||||
|
statement.bindNextUInt(limit);
|
||||||
|
|
||||||
|
CassandraResult result = executeAsyncRead(statement, yield);
|
||||||
|
|
||||||
|
if (!result.hasResult())
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<ripple::uint256> hashes = {};
|
||||||
|
auto numRows = result.numRows();
|
||||||
|
BOOST_LOG_TRIVIAL(info) << "num_rows = " << numRows;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
hashes.push_back(result.getUInt256());
|
||||||
|
if (--numRows == 0)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor";
|
||||||
|
auto const [lgrSeq, txnIdx] = result.getInt64Tuple();
|
||||||
|
cursor = {
|
||||||
|
static_cast<std::uint32_t>(lgrSeq),
|
||||||
|
static_cast<std::uint32_t>(txnIdx)};
|
||||||
|
|
||||||
|
if (forward)
|
||||||
|
++cursor->transactionIndex;
|
||||||
|
}
|
||||||
|
} while (result.nextRow());
|
||||||
|
|
||||||
|
auto txns = fetchTransactions(hashes, yield);
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " txns = " << txns.size();
|
||||||
|
|
||||||
|
if (txns.size() == limit)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor";
|
||||||
|
return {txns, cursor};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {txns, {}};
|
||||||
|
}
|
||||||
|
|
||||||
|
TransactionsAndCursor
|
||||||
CassandraBackend::fetchAccountTransactions(
|
CassandraBackend::fetchAccountTransactions(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::uint32_t const limit,
|
std::uint32_t const limit,
|
||||||
bool const forward,
|
bool const forward,
|
||||||
std::optional<AccountTransactionsCursor> const& cursorIn,
|
std::optional<TransactionsCursor> const& cursorIn,
|
||||||
boost::asio::yield_context& yield) const
|
boost::asio::yield_context& yield) const
|
||||||
{
|
{
|
||||||
auto rng = fetchLedgerRange();
|
auto rng = fetchLedgerRange();
|
||||||
if (!rng)
|
if (!rng)
|
||||||
return {{}, {}};
|
return {{}, {}};
|
||||||
|
|
||||||
auto keylet = ripple::keylet::account(account);
|
|
||||||
auto cursor = cursorIn;
|
|
||||||
|
|
||||||
CassandraStatement statement = [this, forward]() {
|
CassandraStatement statement = [this, forward]() {
|
||||||
if (forward)
|
if (forward)
|
||||||
return CassandraStatement{selectAccountTxForward_};
|
return CassandraStatement{selectAccountTxForward_};
|
||||||
@@ -524,6 +687,7 @@ CassandraBackend::fetchAccountTransactions(
|
|||||||
return CassandraStatement{selectAccountTx_};
|
return CassandraStatement{selectAccountTx_};
|
||||||
}();
|
}();
|
||||||
|
|
||||||
|
auto cursor = cursorIn;
|
||||||
statement.bindNextBytes(account);
|
statement.bindNextBytes(account);
|
||||||
if (cursor)
|
if (cursor)
|
||||||
{
|
{
|
||||||
@@ -535,8 +699,8 @@ CassandraBackend::fetchAccountTransactions(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
int seq = forward ? rng->minSequence : rng->maxSequence;
|
int const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||||
int placeHolder =
|
int const placeHolder =
|
||||||
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||||
|
|
||||||
statement.bindNextIntTuple(placeHolder, placeHolder);
|
statement.bindNextIntTuple(placeHolder, placeHolder);
|
||||||
@@ -584,6 +748,7 @@ CassandraBackend::fetchAccountTransactions(
|
|||||||
|
|
||||||
return {txns, {}};
|
return {txns, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ripple::uint256>
|
std::optional<ripple::uint256>
|
||||||
CassandraBackend::doFetchSuccessorKey(
|
CassandraBackend::doFetchSuccessorKey(
|
||||||
ripple::uint256 key,
|
ripple::uint256 key,
|
||||||
@@ -895,8 +1060,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
cass_cluster_set_credentials(
|
cass_cluster_set_credentials(
|
||||||
cluster, username.c_str(), getString("password").c_str());
|
cluster, username.c_str(), getString("password").c_str());
|
||||||
}
|
}
|
||||||
int threads = getInt("threads") ? *getInt("threads")
|
int threads =
|
||||||
: std::thread::hardware_concurrency();
|
getInt("threads").value_or(std::thread::hardware_concurrency());
|
||||||
|
|
||||||
rc = cass_cluster_set_num_threads_io(cluster, threads);
|
rc = cass_cluster_set_num_threads_io(cluster, threads);
|
||||||
if (rc != CASS_OK)
|
if (rc != CASS_OK)
|
||||||
@@ -1179,6 +1344,64 @@ CassandraBackend::open(bool readOnly)
|
|||||||
<< " LIMIT 1";
|
<< " LIMIT 1";
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "nf_tokens"
|
||||||
|
<< " ("
|
||||||
|
<< " token_id blob,"
|
||||||
|
<< " sequence bigint,"
|
||||||
|
<< " owner blob,"
|
||||||
|
<< " is_burned boolean,"
|
||||||
|
<< " PRIMARY KEY (token_id, sequence)"
|
||||||
|
<< " )"
|
||||||
|
<< " WITH CLUSTERING ORDER BY (sequence DESC)"
|
||||||
|
<< " AND default_time_to_live = " << ttl;
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT * FROM " << tablePrefix << "nf_tokens"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix
|
||||||
|
<< "issuer_nf_tokens"
|
||||||
|
<< " ("
|
||||||
|
<< " issuer blob,"
|
||||||
|
<< " token_id blob,"
|
||||||
|
<< " PRIMARY KEY (issuer, token_id)"
|
||||||
|
<< " )";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT * FROM " << tablePrefix << "issuer_nf_tokens"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix
|
||||||
|
<< "nf_token_transactions"
|
||||||
|
<< " ("
|
||||||
|
<< " token_id blob,"
|
||||||
|
<< " seq_idx tuple<bigint, bigint>,"
|
||||||
|
<< " hash blob,"
|
||||||
|
<< " PRIMARY KEY (token_id, seq_idx)"
|
||||||
|
<< " )"
|
||||||
|
<< " WITH CLUSTERING ORDER BY (seq_idx DESC)"
|
||||||
|
<< " AND default_time_to_live = " << ttl;
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT * FROM " << tablePrefix << "nf_token_transactions"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
setupSessionAndTable = true;
|
setupSessionAndTable = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1296,6 +1519,57 @@ CassandraBackend::open(bool readOnly)
|
|||||||
if (!selectAccountTxForward_.prepareStatement(query, session_.get()))
|
if (!selectAccountTxForward_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "INSERT INTO " << tablePrefix << "nf_tokens"
|
||||||
|
<< " (token_id,sequence,owner,is_burned)"
|
||||||
|
<< " VALUES (?,?,?,?)";
|
||||||
|
if (!insertNFT_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT sequence,owner,is_burned"
|
||||||
|
<< " FROM " << tablePrefix << "nf_tokens WHERE"
|
||||||
|
<< " token_id = ? AND"
|
||||||
|
<< " sequence <= ?"
|
||||||
|
<< " ORDER BY sequence DESC"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!selectNFT_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "INSERT INTO " << tablePrefix << "issuer_nf_tokens"
|
||||||
|
<< " (issuer,token_id)"
|
||||||
|
<< " VALUES (?,?)";
|
||||||
|
if (!insertIssuerNFT_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "INSERT INTO " << tablePrefix << "nf_token_transactions"
|
||||||
|
<< " (token_id,seq_idx,hash)"
|
||||||
|
<< " VALUES (?,?,?)";
|
||||||
|
if (!insertNFTTx_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT hash,seq_idx"
|
||||||
|
<< " FROM " << tablePrefix << "nf_token_transactions WHERE"
|
||||||
|
<< " token_id = ? AND"
|
||||||
|
<< " seq_idx < ?"
|
||||||
|
<< " ORDER BY seq_idx DESC"
|
||||||
|
<< " LIMIT ?";
|
||||||
|
if (!selectNFTTx_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query.str("");
|
||||||
|
query << "SELECT hash,seq_idx"
|
||||||
|
<< " FROM " << tablePrefix << "nf_token_transactions WHERE"
|
||||||
|
<< " token_id = ? AND"
|
||||||
|
<< " seq_idx >= ?"
|
||||||
|
<< " ORDER BY seq_idx ASC"
|
||||||
|
<< " LIMIT ?";
|
||||||
|
if (!selectNFTTxForward_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << " INSERT INTO " << tablePrefix << "ledgers "
|
query << " INSERT INTO " << tablePrefix << "ledgers "
|
||||||
<< " (sequence, header) VALUES(?,?)";
|
<< " (sequence, header) VALUES(?,?)";
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ public:
|
|||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"CassandraStatement::bindNextBoolean - statement_ is null");
|
"CassandraStatement::bindNextBoolean - statement_ is null");
|
||||||
CassError rc = cass_statement_bind_bool(
|
CassError rc = cass_statement_bind_bool(
|
||||||
statement_, 1, static_cast<cass_bool_t>(val));
|
statement_, curBindingIndex_, static_cast<cass_bool_t>(val));
|
||||||
if (rc != CASS_OK)
|
if (rc != CASS_OK)
|
||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
@@ -481,6 +481,33 @@ public:
|
|||||||
return {first, second};
|
return {first, second};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: should be replaced with a templated implementation as is very
|
||||||
|
// similar to other getters
|
||||||
|
bool
|
||||||
|
getBool()
|
||||||
|
{
|
||||||
|
if (!row_)
|
||||||
|
{
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - no result";
|
||||||
|
BOOST_LOG_TRIVIAL(error) << msg.str();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
cass_bool_t val;
|
||||||
|
CassError rc =
|
||||||
|
cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - error getting value: " << rc << ", "
|
||||||
|
<< cass_error_desc(rc);
|
||||||
|
BOOST_LOG_TRIVIAL(error) << msg.str();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
++curGetIndex_;
|
||||||
|
return val;
|
||||||
|
}
|
||||||
|
|
||||||
~CassandraResult()
|
~CassandraResult()
|
||||||
{
|
{
|
||||||
if (result_ != nullptr)
|
if (result_ != nullptr)
|
||||||
@@ -599,6 +626,12 @@ private:
|
|||||||
CassandraPreparedStatement insertAccountTx_;
|
CassandraPreparedStatement insertAccountTx_;
|
||||||
CassandraPreparedStatement selectAccountTx_;
|
CassandraPreparedStatement selectAccountTx_;
|
||||||
CassandraPreparedStatement selectAccountTxForward_;
|
CassandraPreparedStatement selectAccountTxForward_;
|
||||||
|
CassandraPreparedStatement insertNFT_;
|
||||||
|
CassandraPreparedStatement selectNFT_;
|
||||||
|
CassandraPreparedStatement insertIssuerNFT_;
|
||||||
|
CassandraPreparedStatement insertNFTTx_;
|
||||||
|
CassandraPreparedStatement selectNFTTx_;
|
||||||
|
CassandraPreparedStatement selectNFTTxForward_;
|
||||||
CassandraPreparedStatement insertLedgerHeader_;
|
CassandraPreparedStatement insertLedgerHeader_;
|
||||||
CassandraPreparedStatement insertLedgerHash_;
|
CassandraPreparedStatement insertLedgerHash_;
|
||||||
CassandraPreparedStatement updateLedgerRange_;
|
CassandraPreparedStatement updateLedgerRange_;
|
||||||
@@ -615,9 +648,6 @@ private:
|
|||||||
// maximum number of concurrent in flight requests. New requests will wait
|
// maximum number of concurrent in flight requests. New requests will wait
|
||||||
// for earlier requests to finish if this limit is exceeded
|
// for earlier requests to finish if this limit is exceeded
|
||||||
std::uint32_t maxRequestsOutstanding = 10000;
|
std::uint32_t maxRequestsOutstanding = 10000;
|
||||||
// we keep this small because the indexer runs in the background, and we
|
|
||||||
// don't want the database to be swamped when the indexer is running
|
|
||||||
std::uint32_t indexerMaxRequestsOutstanding = 10;
|
|
||||||
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
|
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
|
||||||
|
|
||||||
// mutex and condition_variable to limit the number of concurrent in flight
|
// mutex and condition_variable to limit the number of concurrent in flight
|
||||||
@@ -683,12 +713,12 @@ public:
|
|||||||
open_ = false;
|
open_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
AccountTransactions
|
TransactionsAndCursor
|
||||||
fetchAccountTransactions(
|
fetchAccountTransactions(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::uint32_t const limit,
|
std::uint32_t const limit,
|
||||||
bool forward,
|
bool forward,
|
||||||
std::optional<AccountTransactionsCursor> const& cursor,
|
std::optional<TransactionsCursor> const& cursor,
|
||||||
boost::asio::yield_context& yield) const override;
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -852,6 +882,20 @@ public:
|
|||||||
std::uint32_t const ledgerSequence,
|
std::uint32_t const ledgerSequence,
|
||||||
boost::asio::yield_context& yield) const override;
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
std::optional<NFT>
|
||||||
|
fetchNFT(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const ledgerSequence,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
TransactionsAndCursor
|
||||||
|
fetchNFTTransactions(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const limit,
|
||||||
|
bool const forward,
|
||||||
|
std::optional<TransactionsCursor> const& cursorIn,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
// Synchronously fetch the object with key key, as of ledger with sequence
|
// Synchronously fetch the object with key key, as of ledger with sequence
|
||||||
// sequence
|
// sequence
|
||||||
std::optional<Blob>
|
std::optional<Blob>
|
||||||
@@ -941,6 +985,9 @@ public:
|
|||||||
writeAccountTransactions(
|
writeAccountTransactions(
|
||||||
std::vector<AccountTransactionsData>&& data) override;
|
std::vector<AccountTransactionsData>&& data) override;
|
||||||
|
|
||||||
|
void
|
||||||
|
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
|
||||||
|
|
||||||
void
|
void
|
||||||
writeTransaction(
|
writeTransaction(
|
||||||
std::string&& hash,
|
std::string&& hash,
|
||||||
@@ -949,6 +996,9 @@ public:
|
|||||||
std::string&& transaction,
|
std::string&& transaction,
|
||||||
std::string&& metadata) override;
|
std::string&& metadata) override;
|
||||||
|
|
||||||
|
void
|
||||||
|
writeNFTs(std::vector<NFTsData>&& data) override;
|
||||||
|
|
||||||
void
|
void
|
||||||
startWrites() const override
|
startWrites() const override
|
||||||
{
|
{
|
||||||
@@ -1014,6 +1064,7 @@ public:
|
|||||||
{
|
{
|
||||||
return numRequestsOutstanding_ < maxRequestsOutstanding;
|
return numRequestsOutstanding_ < maxRequestsOutstanding;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool
|
inline bool
|
||||||
finishedAllRequests() const
|
finishedAllRequests() const
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -9,8 +9,8 @@
|
|||||||
#include <backend/Pg.h>
|
#include <backend/Pg.h>
|
||||||
#include <backend/Types.h>
|
#include <backend/Types.h>
|
||||||
|
|
||||||
/// Struct used to keep track of what to write to transactions and
|
/// Struct used to keep track of what to write to
|
||||||
/// account_transactions tables in Postgres
|
/// account_transactions/account_tx tables
|
||||||
struct AccountTransactionsData
|
struct AccountTransactionsData
|
||||||
{
|
{
|
||||||
boost::container::flat_set<ripple::AccountID> accounts;
|
boost::container::flat_set<ripple::AccountID> accounts;
|
||||||
@@ -32,6 +32,57 @@ struct AccountTransactionsData
|
|||||||
AccountTransactionsData() = default;
|
AccountTransactionsData() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Represents a link from a tx to an NFT that was targeted/modified/created
|
||||||
|
/// by it. Gets written to nf_token_transactions table and the like.
|
||||||
|
struct NFTTransactionsData
|
||||||
|
{
|
||||||
|
ripple::uint256 tokenID;
|
||||||
|
std::uint32_t ledgerSequence;
|
||||||
|
std::uint32_t transactionIndex;
|
||||||
|
ripple::uint256 txHash;
|
||||||
|
|
||||||
|
NFTTransactionsData(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
ripple::TxMeta const& meta,
|
||||||
|
ripple::uint256 const& txHash)
|
||||||
|
: tokenID(tokenID)
|
||||||
|
, ledgerSequence(meta.getLgrSeq())
|
||||||
|
, transactionIndex(meta.getIndex())
|
||||||
|
, txHash(txHash)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
|
||||||
|
/// table and the like.
|
||||||
|
struct NFTsData
|
||||||
|
{
|
||||||
|
ripple::uint256 tokenID;
|
||||||
|
std::uint32_t ledgerSequence;
|
||||||
|
|
||||||
|
// The transaction index is only stored because we want to store only the
|
||||||
|
// final state of an NFT per ledger. Since we pull this from transactions
|
||||||
|
// we keep track of which tx index created this so we can de-duplicate, as
|
||||||
|
// it is possible for one ledger to have multiple txs that change the
|
||||||
|
// state of the same NFT.
|
||||||
|
std::uint32_t transactionIndex;
|
||||||
|
ripple::AccountID owner;
|
||||||
|
bool isBurned;
|
||||||
|
|
||||||
|
NFTsData(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
ripple::AccountID const& owner,
|
||||||
|
ripple::TxMeta const& meta,
|
||||||
|
bool isBurned)
|
||||||
|
: tokenID(tokenID)
|
||||||
|
, ledgerSequence(meta.getLgrSeq())
|
||||||
|
, transactionIndex(meta.getIndex())
|
||||||
|
, owner(owner)
|
||||||
|
, isBurned(isBurned)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline bool
|
inline bool
|
||||||
isOffer(T const& object)
|
isOffer(T const& object)
|
||||||
|
|||||||
@@ -1,110 +0,0 @@
|
|||||||
#include <backend/LayeredCache.h>
|
|
||||||
namespace Backend {
|
|
||||||
|
|
||||||
void
|
|
||||||
LayeredCache::insert(
|
|
||||||
ripple::uint256 const& key,
|
|
||||||
Blob const& value,
|
|
||||||
uint32_t seq)
|
|
||||||
{
|
|
||||||
auto entry = map_[key];
|
|
||||||
// stale insert, do nothing
|
|
||||||
if (seq <= entry.recent.seq)
|
|
||||||
return;
|
|
||||||
entry.old = entry.recent;
|
|
||||||
entry.recent = {seq, value};
|
|
||||||
if (value.empty())
|
|
||||||
pendingDeletes_.push_back(key);
|
|
||||||
if (!entry.old.blob.empty())
|
|
||||||
pendingSweeps_.push_back(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<Blob>
|
|
||||||
LayeredCache::select(CacheEntry const& entry, uint32_t seq) const
|
|
||||||
{
|
|
||||||
if (seq < entry.old.seq)
|
|
||||||
return {};
|
|
||||||
if (seq < entry.recent.seq && !entry.old.blob.empty())
|
|
||||||
return entry.old.blob;
|
|
||||||
if (!entry.recent.blob.empty())
|
|
||||||
return entry.recent.blob;
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
void
|
|
||||||
LayeredCache::update(std::vector<LedgerObject> const& blobs, uint32_t seq)
|
|
||||||
{
|
|
||||||
std::unique_lock lck{mtx_};
|
|
||||||
if (seq > mostRecentSequence_)
|
|
||||||
mostRecentSequence_ = seq;
|
|
||||||
for (auto const& k : pendingSweeps_)
|
|
||||||
{
|
|
||||||
auto e = map_[k];
|
|
||||||
e.old = {};
|
|
||||||
}
|
|
||||||
for (auto const& k : pendingDeletes_)
|
|
||||||
{
|
|
||||||
map_.erase(k);
|
|
||||||
}
|
|
||||||
for (auto const& b : blobs)
|
|
||||||
{
|
|
||||||
insert(b.key, b.blob, seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::optional<LedgerObject>
|
|
||||||
LayeredCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
|
||||||
{
|
|
||||||
ripple::uint256 curKey = key;
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
std::shared_lock lck{mtx_};
|
|
||||||
if (seq < mostRecentSequence_ - 1)
|
|
||||||
return {};
|
|
||||||
auto e = map_.upper_bound(curKey);
|
|
||||||
if (e == map_.end())
|
|
||||||
return {};
|
|
||||||
auto const& entry = e->second;
|
|
||||||
auto blob = select(entry, seq);
|
|
||||||
if (!blob)
|
|
||||||
{
|
|
||||||
curKey = e->first;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return {{e->first, *blob}};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::optional<LedgerObject>
|
|
||||||
LayeredCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
|
||||||
{
|
|
||||||
ripple::uint256 curKey = key;
|
|
||||||
std::shared_lock lck{mtx_};
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
if (seq < mostRecentSequence_ - 1)
|
|
||||||
return {};
|
|
||||||
auto e = map_.lower_bound(curKey);
|
|
||||||
--e;
|
|
||||||
if (e == map_.begin())
|
|
||||||
return {};
|
|
||||||
auto const& entry = e->second;
|
|
||||||
auto blob = select(entry, seq);
|
|
||||||
if (!blob)
|
|
||||||
{
|
|
||||||
curKey = e->first;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return {{e->first, *blob}};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::optional<Blob>
|
|
||||||
LayeredCache::get(ripple::uint256 const& key, uint32_t seq) const
|
|
||||||
{
|
|
||||||
std::shared_lock lck{mtx_};
|
|
||||||
auto e = map_.find(key);
|
|
||||||
if (e == map_.end())
|
|
||||||
return {};
|
|
||||||
auto const& entry = e->second;
|
|
||||||
return select(entry, seq);
|
|
||||||
}
|
|
||||||
} // namespace Backend
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
#ifndef CLIO_LAYEREDCACHE_H_INCLUDED
|
|
||||||
#define CLIO_LAYEREDCACHE_H_INCLUDED
|
|
||||||
|
|
||||||
#include <ripple/basics/base_uint.h>
|
|
||||||
#include <backend/Types.h>
|
|
||||||
#include <map>
|
|
||||||
#include <mutex>
|
|
||||||
#include <shared_mutex>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
namespace Backend {
|
|
||||||
class LayeredCache
|
|
||||||
{
|
|
||||||
struct SeqBlobPair
|
|
||||||
{
|
|
||||||
uint32_t seq;
|
|
||||||
Blob blob;
|
|
||||||
};
|
|
||||||
struct CacheEntry
|
|
||||||
{
|
|
||||||
SeqBlobPair recent;
|
|
||||||
SeqBlobPair old;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::map<ripple::uint256, CacheEntry> map_;
|
|
||||||
std::vector<ripple::uint256> pendingDeletes_;
|
|
||||||
std::vector<ripple::uint256> pendingSweeps_;
|
|
||||||
mutable std::shared_mutex mtx_;
|
|
||||||
uint32_t mostRecentSequence_;
|
|
||||||
|
|
||||||
void
|
|
||||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq);
|
|
||||||
|
|
||||||
/*
|
|
||||||
void
|
|
||||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
|
||||||
{
|
|
||||||
map_.emplace(key,{{seq,value,{}});
|
|
||||||
}
|
|
||||||
void
|
|
||||||
update(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
|
||||||
{
|
|
||||||
auto& entry = map_.find(key);
|
|
||||||
entry.old = entry.recent;
|
|
||||||
entry.recent = {seq, value};
|
|
||||||
pendingSweeps_.push_back(key);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
erase(ripple::uint256 const& key, uint32_t seq)
|
|
||||||
{
|
|
||||||
update(key, {}, seq);
|
|
||||||
pendingDeletes_.push_back(key);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
std::optional<Blob>
|
|
||||||
select(CacheEntry const& entry, uint32_t seq) const;
|
|
||||||
|
|
||||||
public:
|
|
||||||
void
|
|
||||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq);
|
|
||||||
|
|
||||||
std::optional<Blob>
|
|
||||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
|
||||||
|
|
||||||
std::optional<LedgerObject>
|
|
||||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
|
||||||
|
|
||||||
std::optional<LedgerObject>
|
|
||||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Backend
|
|
||||||
#endif
|
|
||||||
@@ -833,7 +833,7 @@ PgPool::checkout()
|
|||||||
else if (connections_ < config_.max_connections)
|
else if (connections_ < config_.max_connections)
|
||||||
{
|
{
|
||||||
++connections_;
|
++connections_;
|
||||||
ret = std::make_unique<Pg>(config_, ioc_, stop_, mutex_);
|
ret = std::make_unique<Pg>(config_, ioc_);
|
||||||
}
|
}
|
||||||
// Otherwise, wait until a connection becomes available or we stop.
|
// Otherwise, wait until a connection becomes available or we stop.
|
||||||
else
|
else
|
||||||
@@ -1680,7 +1680,6 @@ getLedger(
|
|||||||
whichLedger,
|
whichLedger,
|
||||||
std::shared_ptr<PgPool>& pgPool)
|
std::shared_ptr<PgPool>& pgPool)
|
||||||
{
|
{
|
||||||
ripple::LedgerInfo lgrInfo;
|
|
||||||
std::stringstream sql;
|
std::stringstream sql;
|
||||||
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
|
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
|
||||||
"total_coins, closing_time, prev_closing_time, close_time_res, "
|
"total_coins, closing_time, prev_closing_time, close_time_res, "
|
||||||
|
|||||||
@@ -262,8 +262,6 @@ class Pg
|
|||||||
|
|
||||||
PgConfig const& config_;
|
PgConfig const& config_;
|
||||||
boost::asio::io_context::strand strand_;
|
boost::asio::io_context::strand strand_;
|
||||||
bool& stop_;
|
|
||||||
std::mutex& mutex_;
|
|
||||||
|
|
||||||
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
|
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
|
||||||
|
|
||||||
@@ -364,14 +362,9 @@ public:
|
|||||||
*
|
*
|
||||||
* @param config Config parameters.
|
* @param config Config parameters.
|
||||||
* @param j Logger object.
|
* @param j Logger object.
|
||||||
* @param stop Reference to connection pool's stop flag.
|
|
||||||
* @param mutex Reference to connection pool's mutex.
|
|
||||||
*/
|
*/
|
||||||
Pg(PgConfig const& config,
|
Pg(PgConfig const& config, boost::asio::io_context& ctx)
|
||||||
boost::asio::io_context& ctx,
|
: config_(config), strand_(ctx)
|
||||||
bool& stop,
|
|
||||||
std::mutex& mutex)
|
|
||||||
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
#include <backend/PostgresBackend.h>
|
#include <backend/PostgresBackend.h>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
namespace Backend {
|
namespace Backend {
|
||||||
|
|
||||||
// Type alias for async completion handlers
|
// Type alias for async completion handlers
|
||||||
@@ -77,6 +78,12 @@ PostgresBackend::writeAccountTransactions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
PostgresBackend::writeNFTTransactions(std::vector<NFTTransactionsData>&& data)
|
||||||
|
{
|
||||||
|
throw std::runtime_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
PostgresBackend::doWriteLedgerObject(
|
PostgresBackend::doWriteLedgerObject(
|
||||||
std::string&& key,
|
std::string&& key,
|
||||||
@@ -152,6 +159,12 @@ PostgresBackend::writeTransaction(
|
|||||||
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
|
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
PostgresBackend::writeNFTs(std::vector<NFTsData>&& data)
|
||||||
|
{
|
||||||
|
throw std::runtime_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
std::uint32_t
|
std::uint32_t
|
||||||
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
|
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
|
||||||
{
|
{
|
||||||
@@ -419,6 +432,15 @@ PostgresBackend::fetchAllTransactionHashesInLedger(
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<NFT>
|
||||||
|
PostgresBackend::fetchNFT(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const ledgerSequence,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
throw std::runtime_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<ripple::uint256>
|
std::optional<ripple::uint256>
|
||||||
PostgresBackend::doFetchSuccessorKey(
|
PostgresBackend::doFetchSuccessorKey(
|
||||||
ripple::uint256 key,
|
ripple::uint256 key,
|
||||||
@@ -637,12 +659,25 @@ PostgresBackend::fetchLedgerDiff(
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
AccountTransactions
|
// TODO this implementation and fetchAccountTransactions should be
|
||||||
|
// generalized
|
||||||
|
TransactionsAndCursor
|
||||||
|
PostgresBackend::fetchNFTTransactions(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const limit,
|
||||||
|
bool forward,
|
||||||
|
std::optional<TransactionsCursor> const& cursor,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
throw std::runtime_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
TransactionsAndCursor
|
||||||
PostgresBackend::fetchAccountTransactions(
|
PostgresBackend::fetchAccountTransactions(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::uint32_t const limit,
|
std::uint32_t const limit,
|
||||||
bool forward,
|
bool forward,
|
||||||
std::optional<AccountTransactionsCursor> const& cursor,
|
std::optional<TransactionsCursor> const& cursor,
|
||||||
boost::asio::yield_context& yield) const
|
boost::asio::yield_context& yield) const
|
||||||
{
|
{
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
|
|||||||
@@ -62,6 +62,20 @@ public:
|
|||||||
std::uint32_t const ledgerSequence,
|
std::uint32_t const ledgerSequence,
|
||||||
boost::asio::yield_context& yield) const override;
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
std::optional<NFT>
|
||||||
|
fetchNFT(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const ledgerSequence,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
TransactionsAndCursor
|
||||||
|
fetchNFTTransactions(
|
||||||
|
ripple::uint256 const& tokenID,
|
||||||
|
std::uint32_t const limit,
|
||||||
|
bool const forward,
|
||||||
|
std::optional<TransactionsCursor> const& cursorIn,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
std::vector<LedgerObject>
|
std::vector<LedgerObject>
|
||||||
fetchLedgerDiff(
|
fetchLedgerDiff(
|
||||||
std::uint32_t const ledgerSequence,
|
std::uint32_t const ledgerSequence,
|
||||||
@@ -87,12 +101,12 @@ public:
|
|||||||
std::uint32_t const sequence,
|
std::uint32_t const sequence,
|
||||||
boost::asio::yield_context& yield) const override;
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
AccountTransactions
|
TransactionsAndCursor
|
||||||
fetchAccountTransactions(
|
fetchAccountTransactions(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::uint32_t const limit,
|
std::uint32_t const limit,
|
||||||
bool forward,
|
bool forward,
|
||||||
std::optional<AccountTransactionsCursor> const& cursor,
|
std::optional<TransactionsCursor> const& cursor,
|
||||||
boost::asio::yield_context& yield) const override;
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -120,10 +134,16 @@ public:
|
|||||||
std::string&& transaction,
|
std::string&& transaction,
|
||||||
std::string&& metadata) override;
|
std::string&& metadata) override;
|
||||||
|
|
||||||
|
void
|
||||||
|
writeNFTs(std::vector<NFTsData>&& data) override;
|
||||||
|
|
||||||
void
|
void
|
||||||
writeAccountTransactions(
|
writeAccountTransactions(
|
||||||
std::vector<AccountTransactionsData>&& data) override;
|
std::vector<AccountTransactionsData>&& data) override;
|
||||||
|
|
||||||
|
void
|
||||||
|
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
|
||||||
|
|
||||||
void
|
void
|
||||||
open(bool readOnly) override;
|
open(bool readOnly) override;
|
||||||
|
|
||||||
|
|||||||
@@ -1,174 +1,132 @@
|
|||||||
The data model used by clio is different than that used by rippled.
|
# Clio Backend
|
||||||
rippled uses what is known as a SHAMap, which is a tree structure, with
|
## Background
|
||||||
actual ledger and transaction data at the leaves of the tree. Looking up a record
|
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra is the only supported database that is production-ready. However, support for more databases like PostgreSQL and DynamoDB may be added in future versions. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||||
is a tree traversal, where the key is used to determine the path to the proper
|
|
||||||
leaf node. The path from root to leaf is used as a proof-tree on the p2p network,
|
|
||||||
where nodes can prove that a piece of data is present in a ledger by sending
|
|
||||||
the path from root to leaf. Other nodes can verify this path and be certain
|
|
||||||
that the data does actually exist in the ledger in question.
|
|
||||||
|
|
||||||
clio instead flattens the data model, so lookups are O(1). This results in time
|
## Data Model
|
||||||
and space savings. This is possible because clio does not participate in the peer
|
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
|
||||||
to peer protocol, and thus does not need to verify any data. clio fully trusts the
|
|
||||||
rippled nodes that are being used as a data source.
|
|
||||||
|
|
||||||
clio uses certain features of database query languages to make this happen. Many
|
Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
|
||||||
databases provide the necessary features to implement the clio data model. At the
|
|
||||||
time of writing, the data model is implemented in PostgreSQL and CQL (the query
|
|
||||||
language used by Apache Cassandra and ScyllaDB).
|
|
||||||
|
|
||||||
The below examples are a sort of pseudo query language
|
There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type.
|
||||||
|
|
||||||
## Ledgers
|
**Keywords**
|
||||||
|
*Sequence*: A unique incrementing identification number used to label the different ledger versions.
|
||||||
|
*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
|
||||||
|
*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
|
||||||
|
*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
|
||||||
|
*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
|
||||||
|
*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
|
||||||
|
|
||||||
We store ledger headers in a ledgers table. In PostgreSQL, we store
|
## Cassandra Implementation
|
||||||
the headers in their deserialized form, so we can look up by sequence or hash.
|
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
|
||||||
|
|
||||||
In Cassandra, we store the headers as blobs. The primary table maps a ledger sequence
|
In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below.
|
||||||
to the blob, and a secondary table maps a ledger hash to a ledger sequence.
|
|
||||||
|
|
||||||
## Transactions
|
*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.*
|
||||||
Transactions are stored in a very basic table, with a schema like so:
|
|
||||||
|
|
||||||
|
|
||||||
|
### `ledger_transactions`
|
||||||
```
|
```
|
||||||
CREATE TABLE transactions (
|
CREATE TABLE clio.ledger_transactions (
|
||||||
hash blob,
|
ledger_sequence bigint, # The sequence number of the ledger version
|
||||||
ledger_sequence int,
|
hash blob, # Hash of all the transactions on this ledger version
|
||||||
transaction blob,
|
PRIMARY KEY (ledger_sequence, hash)
|
||||||
PRIMARY KEY(hash))
|
) WITH CLUSTERING ORDER BY (hash ASC) ...
|
||||||
|
```
|
||||||
|
This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order.
|
||||||
|
|
||||||
|
### `transactions`
|
||||||
```
|
```
|
||||||
The primary key is the hash.
|
CREATE TABLE clio.transactions (
|
||||||
|
hash blob PRIMARY KEY, # The transaction hash
|
||||||
|
date bigint, # Date of the transaction
|
||||||
|
ledger_sequence bigint, # The sequence that the transaction was validated
|
||||||
|
metadata blob, # Metadata of the transaction
|
||||||
|
transaction blob # Data of the transaction
|
||||||
|
) ...
|
||||||
|
```
|
||||||
|
This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key.
|
||||||
|
|
||||||
A common query pattern is fetching all transactions in a ledger. In PostgreSQL,
|
To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
|
||||||
nothing special is needed for this. We just query:
|
|
||||||
|
### `ledger_hashes`
|
||||||
```
|
```
|
||||||
SELECT * FROM transactions WHERE ledger_sequence = s;
|
CREATE TABLE clio.ledger_hashes (
|
||||||
|
hash blob PRIMARY KEY, # Hash of entire ledger version's data
|
||||||
|
sequence bigint # The sequence of the ledger version
|
||||||
|
) ...
|
||||||
|
```
|
||||||
|
This table stores the hash of all ledger versions by their sequences.
|
||||||
|
### `ledger_range`
|
||||||
```
|
```
|
||||||
Cassandra doesn't handle queries like this well, since `ledger_sequence` is not
|
CREATE TABLE clio.ledger_range (
|
||||||
the primary key, so we use a second table that maps a ledger sequence number
|
is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range
|
||||||
to all of the hashes in that ledger:
|
sequence bigint # The sequence number of the starting/stopping range
|
||||||
|
) ...
|
||||||
|
```
|
||||||
|
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
|
||||||
|
|
||||||
|
### `objects`
|
||||||
```
|
```
|
||||||
CREATE TABLE transaction_hashes (
|
CREATE TABLE clio.objects (
|
||||||
ledger_sequence int,
|
key blob, # Object index of the object
|
||||||
hash blob,
|
sequence bigint, # The sequence this object was last updated
|
||||||
PRIMARY KEY(ledger_sequence, blob))
|
object blob, # Data of the object
|
||||||
|
PRIMARY KEY (key, sequence)
|
||||||
|
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||||
|
```
|
||||||
|
This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
|
||||||
|
|
||||||
|
This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
|
||||||
|
|
||||||
|
### `ledgers`
|
||||||
```
|
```
|
||||||
This table uses a compound primary key, so we can have multiple records with
|
CREATE TABLE clio.ledgers (
|
||||||
the same ledger sequence but different hash. Looking up all of the transactions
|
sequence bigint PRIMARY KEY, # Sequence of the ledger version
|
||||||
in a given ledger then requires querying the transaction_hashes table to get the hashes of
|
header blob # Data of the header
|
||||||
all of the transactions in the ledger, and then using those hashes to query the
|
) ...
|
||||||
transactions table. Sometimes we only want the hashes though.
|
```
|
||||||
|
This table stores the ledger header data of specific ledger versions by their sequence.
|
||||||
## Ledger data
|
|
||||||
|
|
||||||
Ledger data is more complicated than transaction data. Objects have different versions,
|
|
||||||
where applying transactions in a particular ledger changes an object with a given
|
|
||||||
key. A basic example is an account root object: the balance changes with every
|
|
||||||
transaction sent or received, though the key (object ID) for this object remains the same.
|
|
||||||
|
|
||||||
Ledger data then is modeled like so:
|
|
||||||
|
|
||||||
|
### `diff`
|
||||||
```
|
```
|
||||||
CREATE TABLE objects (
|
CREATE TABLE clio.diff (
|
||||||
id blob,
|
seq bigint, # Sequence of the ledger version
|
||||||
ledger_sequence int,
|
key blob, # Hash of changes in the ledger version
|
||||||
object blob,
|
PRIMARY KEY (seq, key)
|
||||||
PRIMARY KEY(key,ledger_sequence))
|
) WITH CLUSTERING ORDER BY (key ASC) ...
|
||||||
|
```
|
||||||
|
This table stores the object index of all the changes in each ledger version.
|
||||||
|
|
||||||
|
### `account_tx`
|
||||||
```
|
```
|
||||||
|
CREATE TABLE clio.account_tx (
|
||||||
|
account blob,
|
||||||
|
seq_idx frozen<tuple<bigint, bigint>>, # Tuple of (ledger_index, transaction_index)
|
||||||
|
hash blob, # Hash of the transaction
|
||||||
|
PRIMARY KEY (account, seq_idx)
|
||||||
|
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
|
||||||
|
```
|
||||||
|
This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received.
|
||||||
|
|
||||||
The `objects` table has a compound primary key. This is essential. Looking up
|
|
||||||
a ledger object as of a given ledger then is just:
|
### `successor`
|
||||||
```
|
```
|
||||||
SELECT object FROM objects WHERE id = ? and ledger_sequence <= ?
|
CREATE TABLE clio.successor (
|
||||||
ORDER BY ledger_sequence DESC LIMIT 1;
|
key blob, # Object index
|
||||||
```
|
seq bigint, # The sequnce that this ledger object's predecessor and successor was updated
|
||||||
This gives us the most recent ledger object written at or before a specified ledger.
|
next blob, # Index of the next object that existed in this sequence
|
||||||
|
PRIMARY KEY (key, seq)
|
||||||
|
) WITH CLUSTERING ORDER BY (seq ASC) ...
|
||||||
|
```
|
||||||
|
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
|
||||||
|
|
||||||
When a ledger object is deleted, we write a record where `object` is just an empty blob.
|

|
||||||
|
*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace*
|
||||||
|
|
||||||
### Next
|
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps:
|
||||||
Generally RPCs that read ledger data will just use the above query pattern. However,
|
1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
|
||||||
a few RPCs (`book_offers` and `ledger_data`) make use of a certain tree operation
|
2. If `v` is...
|
||||||
called `successor`, which takes in an object id and ledger sequence, and returns
|
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
|
||||||
the id of the successor object in the ledger. This is the object in the ledger with the smallest id
|
2. Being **modified**, do nothing.
|
||||||
greater than the input id.
|
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
|
||||||
|
|
||||||
This problem is quite difficult for clio's data model, since computing this
|
|
||||||
generally requires the inner nodes of the tree, which clio doesn't store. A naive
|
|
||||||
way to do this with PostgreSQL is like so:
|
|
||||||
```
|
|
||||||
SELECT * FROM objects WHERE id > ? AND ledger_sequence <= s ORDER BY id ASC, ledger_sequence DESC LIMIT 1;
|
|
||||||
```
|
|
||||||
This query is not really possible with Cassandra, unless you use ALLOW FILTERING, which
|
|
||||||
is an anti pattern (for good reason!). It would require contacting basically every node
|
|
||||||
in the entire cluster.
|
|
||||||
|
|
||||||
But even with Postgres, this query is not scalable. Why? Consider what the query
|
|
||||||
is doing at the database level. The database starts at the input id, and begins scanning
|
|
||||||
the table in ascending order of id. It needs to skip over any records that don't actually
|
|
||||||
exist in the desired ledger, which are objects that have been deleted, or objects that
|
|
||||||
were created later. As ledger history grows, this query skips over more and more records,
|
|
||||||
which results in the query taking longer and longer. The time this query takes grows
|
|
||||||
unbounded then, as ledger history just keeps growing. With under a million ledgers, this
|
|
||||||
query is usable, but as we approach 10 million ledgers are more, the query starts to become very slow.
|
|
||||||
|
|
||||||
To alleviate this issue, the data model uses a checkpointing method. We create a second
|
|
||||||
table called keys, like so:
|
|
||||||
```
|
|
||||||
CREATE TABLE keys (
|
|
||||||
ledger_sequence int,
|
|
||||||
id blob,
|
|
||||||
PRIMARY KEY(ledger_sequence, id)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
However, this table does not have an entry for every ledger sequence. Instead,
|
|
||||||
this table has an entry for rougly every 1 million ledgers. We call these ledgers
|
|
||||||
flag ledgers. For each flag ledger, the keys table contains every object id in that
|
|
||||||
ledger, as well as every object id that existed in any ledger between the last flag
|
|
||||||
ledger and this one. This is a lot of keys, but not every key that ever existed (which
|
|
||||||
is what the naive attempt at implementing successor was iterating over). In this manner,
|
|
||||||
the performance is bounded. If we wanted to increase the performance of the successor operation,
|
|
||||||
we can increase the frequency of flag ledgers. However, this will use more space. 1 million
|
|
||||||
was chosen as a reasonable tradeoff to bound the performance, but not use too much space,
|
|
||||||
especially since this is only needed for two RPC calls.
|
|
||||||
|
|
||||||
We write to this table every ledger, for each new key. However, we also need to handle
|
|
||||||
keys that existed in the previous flag ledger. To do that, at each flag ledger, we
|
|
||||||
iterate through the previous flag ledger, and write any keys that are still present
|
|
||||||
in the new flag ledger. This is done asynchronously.
|
|
||||||
|
|
||||||
## Account Transactions
|
|
||||||
rippled offers a RPC called `account_tx`. This RPC returns all transactions that
|
|
||||||
affect a given account, and allows users to page backwards or forwards in time.
|
|
||||||
Generally, this is a modeled with a table like so:
|
|
||||||
```
|
|
||||||
CREATE TABLE account_tx (
|
|
||||||
account blob,
|
|
||||||
ledger_sequence int,
|
|
||||||
transaction_index int,
|
|
||||||
hash blob,
|
|
||||||
PRIMARY KEY(account,ledger_sequence,transaction_index))
|
|
||||||
```
|
|
||||||
|
|
||||||
An example of looking up from this table going backwards in time is:
|
|
||||||
```
|
|
||||||
SELECT hash FROM account_tx WHERE account = ?
|
|
||||||
AND ledger_sequence <= ? and transaction_index <= ?
|
|
||||||
ORDER BY ledger_sequence DESC, transaction_index DESC;
|
|
||||||
```
|
|
||||||
|
|
||||||
This query returns the hashes, and then we use those hashes to read from the
|
|
||||||
transactions table.
|
|
||||||
|
|
||||||
## Comments
|
|
||||||
There are various nuances around how these data models are tuned and optimized
|
|
||||||
for each database implementation. Cassandra and PostgreSQL are very different,
|
|
||||||
so some slight modifications are needed. However, the general model outlined here
|
|
||||||
is implemented by both databases, and when adding a new database, this general model
|
|
||||||
should be followed, unless there is a good reason not to. Generally, a database will be
|
|
||||||
decently similar to either PostgreSQL or Cassandra, so using those as a basis should
|
|
||||||
be sufficient.
|
|
||||||
|
|
||||||
Whatever database is used, clio requires strong consistency, and durability. For this
|
|
||||||
reason, any replication strategy needs to maintain strong consistency.
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
#include <backend/SimpleCache.h>
|
#include <backend/SimpleCache.h>
|
||||||
namespace Backend {
|
namespace Backend {
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
SimpleCache::latestLedgerSequence() const
|
SimpleCache::latestLedgerSequence() const
|
||||||
{
|
{
|
||||||
@@ -13,6 +14,9 @@ SimpleCache::update(
|
|||||||
uint32_t seq,
|
uint32_t seq,
|
||||||
bool isBackground)
|
bool isBackground)
|
||||||
{
|
{
|
||||||
|
if (disabled_)
|
||||||
|
return;
|
||||||
|
|
||||||
{
|
{
|
||||||
std::unique_lock lck{mtx_};
|
std::unique_lock lck{mtx_};
|
||||||
if (seq > latestSeq_)
|
if (seq > latestSeq_)
|
||||||
@@ -26,6 +30,7 @@ SimpleCache::update(
|
|||||||
{
|
{
|
||||||
if (isBackground && deletes_.count(obj.key))
|
if (isBackground && deletes_.count(obj.key))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
auto& e = map_[obj.key];
|
auto& e = map_[obj.key];
|
||||||
if (seq > e.seq)
|
if (seq > e.seq)
|
||||||
{
|
{
|
||||||
@@ -41,19 +46,23 @@ SimpleCache::update(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<LedgerObject>
|
std::optional<LedgerObject>
|
||||||
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||||
{
|
{
|
||||||
if (!full_)
|
if (!full_)
|
||||||
return {};
|
return {};
|
||||||
std::shared_lock{mtx_};
|
std::shared_lock{mtx_};
|
||||||
|
successorReqCounter_++;
|
||||||
if (seq != latestSeq_)
|
if (seq != latestSeq_)
|
||||||
return {};
|
return {};
|
||||||
auto e = map_.upper_bound(key);
|
auto e = map_.upper_bound(key);
|
||||||
if (e == map_.end())
|
if (e == map_.end())
|
||||||
return {};
|
return {};
|
||||||
|
successorHitCounter_++;
|
||||||
return {{e->first, e->second.blob}};
|
return {{e->first, e->second.blob}};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<LedgerObject>
|
std::optional<LedgerObject>
|
||||||
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||||
{
|
{
|
||||||
@@ -74,17 +83,28 @@ SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
|
|||||||
if (seq > latestSeq_)
|
if (seq > latestSeq_)
|
||||||
return {};
|
return {};
|
||||||
std::shared_lock lck{mtx_};
|
std::shared_lock lck{mtx_};
|
||||||
|
objectReqCounter_++;
|
||||||
auto e = map_.find(key);
|
auto e = map_.find(key);
|
||||||
if (e == map_.end())
|
if (e == map_.end())
|
||||||
return {};
|
return {};
|
||||||
if (seq < e->second.seq)
|
if (seq < e->second.seq)
|
||||||
return {};
|
return {};
|
||||||
|
objectHitCounter_++;
|
||||||
return {e->second.blob};
|
return {e->second.blob};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SimpleCache::setDisabled()
|
||||||
|
{
|
||||||
|
disabled_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SimpleCache::setFull()
|
SimpleCache::setFull()
|
||||||
{
|
{
|
||||||
|
if (disabled_)
|
||||||
|
return;
|
||||||
|
|
||||||
full_ = true;
|
full_ = true;
|
||||||
std::unique_lock lck{mtx_};
|
std::unique_lock lck{mtx_};
|
||||||
deletes_.clear();
|
deletes_.clear();
|
||||||
@@ -101,4 +121,18 @@ SimpleCache::size() const
|
|||||||
std::shared_lock lck{mtx_};
|
std::shared_lock lck{mtx_};
|
||||||
return map_.size();
|
return map_.size();
|
||||||
}
|
}
|
||||||
|
float
|
||||||
|
SimpleCache::getObjectHitRate() const
|
||||||
|
{
|
||||||
|
if (!objectReqCounter_)
|
||||||
|
return 1;
|
||||||
|
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||||
|
}
|
||||||
|
float
|
||||||
|
SimpleCache::getSuccessorHitRate() const
|
||||||
|
{
|
||||||
|
if (!successorReqCounter_)
|
||||||
|
return 1;
|
||||||
|
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||||
|
}
|
||||||
} // namespace Backend
|
} // namespace Backend
|
||||||
|
|||||||
@@ -17,10 +17,19 @@ class SimpleCache
|
|||||||
uint32_t seq = 0;
|
uint32_t seq = 0;
|
||||||
Blob blob;
|
Blob blob;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// counters for fetchLedgerObject(s) hit rate
|
||||||
|
mutable std::atomic_uint32_t objectReqCounter_;
|
||||||
|
mutable std::atomic_uint32_t objectHitCounter_;
|
||||||
|
// counters for fetchSuccessorKey hit rate
|
||||||
|
mutable std::atomic_uint32_t successorReqCounter_;
|
||||||
|
mutable std::atomic_uint32_t successorHitCounter_;
|
||||||
|
|
||||||
std::map<ripple::uint256, CacheEntry> map_;
|
std::map<ripple::uint256, CacheEntry> map_;
|
||||||
mutable std::shared_mutex mtx_;
|
mutable std::shared_mutex mtx_;
|
||||||
uint32_t latestSeq_ = 0;
|
uint32_t latestSeq_ = 0;
|
||||||
std::atomic_bool full_ = false;
|
std::atomic_bool full_ = false;
|
||||||
|
std::atomic_bool disabled_ = false;
|
||||||
// temporary set to prevent background thread from writing already deleted
|
// temporary set to prevent background thread from writing already deleted
|
||||||
// data. not used when cache is full
|
// data. not used when cache is full
|
||||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||||
@@ -45,6 +54,9 @@ public:
|
|||||||
std::optional<LedgerObject>
|
std::optional<LedgerObject>
|
||||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||||
|
|
||||||
|
void
|
||||||
|
setDisabled();
|
||||||
|
|
||||||
void
|
void
|
||||||
setFull();
|
setFull();
|
||||||
|
|
||||||
@@ -57,6 +69,12 @@ public:
|
|||||||
|
|
||||||
size_t
|
size_t
|
||||||
size() const;
|
size() const;
|
||||||
|
|
||||||
|
float
|
||||||
|
getObjectHitRate() const;
|
||||||
|
|
||||||
|
float
|
||||||
|
getSuccessorHitRate() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Backend
|
} // namespace Backend
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#ifndef CLIO_TYPES_H_INCLUDED
|
#ifndef CLIO_TYPES_H_INCLUDED
|
||||||
#define CLIO_TYPES_H_INCLUDED
|
#define CLIO_TYPES_H_INCLUDED
|
||||||
#include <ripple/basics/base_uint.h>
|
#include <ripple/basics/base_uint.h>
|
||||||
|
#include <ripple/protocol/AccountID.h>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@@ -46,16 +47,34 @@ struct TransactionAndMetadata
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AccountTransactionsCursor
|
struct TransactionsCursor
|
||||||
{
|
{
|
||||||
std::uint32_t ledgerSequence;
|
std::uint32_t ledgerSequence;
|
||||||
std::uint32_t transactionIndex;
|
std::uint32_t transactionIndex;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AccountTransactions
|
struct TransactionsAndCursor
|
||||||
{
|
{
|
||||||
std::vector<TransactionAndMetadata> txns;
|
std::vector<TransactionAndMetadata> txns;
|
||||||
std::optional<AccountTransactionsCursor> cursor;
|
std::optional<TransactionsCursor> cursor;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NFT
|
||||||
|
{
|
||||||
|
ripple::uint256 tokenID;
|
||||||
|
std::uint32_t ledgerSequence;
|
||||||
|
ripple::AccountID owner;
|
||||||
|
bool isBurned;
|
||||||
|
|
||||||
|
// clearly two tokens are the same if they have the same ID, but this
|
||||||
|
// struct stores the state of a given token at a given ledger sequence, so
|
||||||
|
// we also need to compare with ledgerSequence
|
||||||
|
bool
|
||||||
|
operator==(NFT const& other) const
|
||||||
|
{
|
||||||
|
return tokenID == other.tokenID &&
|
||||||
|
ledgerSequence == other.ledgerSequence;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct LedgerRange
|
struct LedgerRange
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ class NetworkValidatedLedgers
|
|||||||
|
|
||||||
std::condition_variable cv_;
|
std::condition_variable cv_;
|
||||||
|
|
||||||
bool stopping_ = false;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static std::shared_ptr<NetworkValidatedLedgers>
|
static std::shared_ptr<NetworkValidatedLedgers>
|
||||||
make_ValidatedLedgers()
|
make_ValidatedLedgers()
|
||||||
@@ -174,4 +172,4 @@ getMarkers(size_t numMarkers)
|
|||||||
return markers;
|
return markers;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||||
|
|||||||
@@ -8,59 +8,91 @@
|
|||||||
#include <boost/log/trivial.hpp>
|
#include <boost/log/trivial.hpp>
|
||||||
#include <backend/DBHelpers.h>
|
#include <backend/DBHelpers.h>
|
||||||
#include <etl/ETLSource.h>
|
#include <etl/ETLSource.h>
|
||||||
|
#include <etl/ProbingETLSource.h>
|
||||||
#include <etl/ReportingETL.h>
|
#include <etl/ReportingETL.h>
|
||||||
|
#include <rpc/RPCHelpers.h>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
// Create ETL source without grpc endpoint
|
void
|
||||||
// Fetch ledger and load initial ledger will fail for this source
|
ForwardCache::freshen()
|
||||||
// Primarly used in read-only mode, to monitor when ledgers are validated
|
|
||||||
template <class Derived>
|
|
||||||
ETLSourceImpl<Derived>::ETLSourceImpl(
|
|
||||||
boost::json::object const& config,
|
|
||||||
boost::asio::io_context& ioContext,
|
|
||||||
std::shared_ptr<BackendInterface> backend,
|
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
|
||||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
|
||||||
ETLLoadBalancer& balancer)
|
|
||||||
: resolver_(boost::asio::make_strand(ioContext))
|
|
||||||
, networkValidatedLedgers_(networkValidatedLedgers)
|
|
||||||
, backend_(backend)
|
|
||||||
, subscriptions_(subscriptions)
|
|
||||||
, balancer_(balancer)
|
|
||||||
, ioc_(ioContext)
|
|
||||||
, timer_(ioContext)
|
|
||||||
{
|
{
|
||||||
if (config.contains("ip"))
|
BOOST_LOG_TRIVIAL(trace) << "Freshening ForwardCache";
|
||||||
|
|
||||||
|
auto numOutstanding =
|
||||||
|
std::make_shared<std::atomic_uint>(latestForwarded_.size());
|
||||||
|
|
||||||
|
for (auto const& cacheEntry : latestForwarded_)
|
||||||
{
|
{
|
||||||
auto ipJs = config.at("ip").as_string();
|
boost::asio::spawn(
|
||||||
ip_ = {ipJs.c_str(), ipJs.size()};
|
strand_,
|
||||||
|
[this, numOutstanding, command = cacheEntry.first](
|
||||||
|
boost::asio::yield_context yield) {
|
||||||
|
boost::json::object request = {{"command", command}};
|
||||||
|
auto resp = source_.requestFromRippled(request, {}, yield);
|
||||||
|
|
||||||
|
if (!resp || resp->contains("error"))
|
||||||
|
resp = {};
|
||||||
|
|
||||||
|
{
|
||||||
|
std::unique_lock lk(mtx_);
|
||||||
|
latestForwarded_[command] = resp;
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
if (config.contains("ws_port"))
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ForwardCache::clear()
|
||||||
|
{
|
||||||
|
std::unique_lock lk(mtx_);
|
||||||
|
for (auto& cacheEntry : latestForwarded_)
|
||||||
|
latestForwarded_[cacheEntry.first] = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
ForwardCache::get(boost::json::object const& request) const
|
||||||
|
{
|
||||||
|
std::optional<std::string> command = {};
|
||||||
|
if (request.contains("command") && !request.contains("method") &&
|
||||||
|
request.at("command").is_string())
|
||||||
|
command = request.at("command").as_string().c_str();
|
||||||
|
else if (
|
||||||
|
request.contains("method") && !request.contains("command") &&
|
||||||
|
request.at("method").is_string())
|
||||||
|
command = request.at("method").as_string().c_str();
|
||||||
|
|
||||||
|
if (!command)
|
||||||
|
return {};
|
||||||
|
if (RPC::specifiesCurrentOrClosedLedger(request))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
std::shared_lock lk(mtx_);
|
||||||
|
if (!latestForwarded_.contains(*command))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
return {latestForwarded_.at(*command)};
|
||||||
|
}
|
||||||
|
|
||||||
|
static boost::beast::websocket::stream_base::timeout
|
||||||
|
make_TimeoutOption()
|
||||||
|
{
|
||||||
|
// See #289 for details.
|
||||||
|
// TODO: investigate the issue and find if there is a solution other than
|
||||||
|
// introducing artificial timeouts.
|
||||||
|
if (true)
|
||||||
{
|
{
|
||||||
auto portjs = config.at("ws_port").as_string();
|
// The only difference between this and the suggested client role is
|
||||||
wsPort_ = {portjs.c_str(), portjs.size()};
|
// that idle_timeout is set to 20 instead of none()
|
||||||
|
auto opt = boost::beast::websocket::stream_base::timeout{};
|
||||||
|
opt.handshake_timeout = std::chrono::seconds(30);
|
||||||
|
opt.idle_timeout = std::chrono::seconds(20);
|
||||||
|
opt.keep_alive_pings = false;
|
||||||
|
return opt;
|
||||||
}
|
}
|
||||||
if (config.contains("grpc_port"))
|
else
|
||||||
{
|
{
|
||||||
auto portjs = config.at("grpc_port").as_string();
|
return boost::beast::websocket::stream_base::timeout::suggested(
|
||||||
grpcPort_ = {portjs.c_str(), portjs.size()};
|
boost::beast::role_type::client);
|
||||||
try
|
|
||||||
{
|
|
||||||
boost::asio::ip::tcp::endpoint endpoint{
|
|
||||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << endpoint;
|
|
||||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
|
||||||
grpc::CreateChannel(
|
|
||||||
ss.str(), grpc::InsecureChannelCredentials()));
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
|
||||||
<< "Exception while creating stub = " << e.what()
|
|
||||||
<< " . Remote = " << toString();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,6 +100,12 @@ template <class Derived>
|
|||||||
void
|
void
|
||||||
ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||||
{
|
{
|
||||||
|
if (paused_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (connected_)
|
||||||
|
hooks_.onDisconnected(ec);
|
||||||
|
|
||||||
connected_ = false;
|
connected_ = false;
|
||||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||||
@@ -136,11 +174,21 @@ PlainETLSource::close(bool startAgain)
|
|||||||
}
|
}
|
||||||
closing_ = false;
|
closing_ = false;
|
||||||
if (startAgain)
|
if (startAgain)
|
||||||
|
{
|
||||||
|
ws_ = std::make_unique<boost::beast::websocket::stream<
|
||||||
|
boost::beast::tcp_stream>>(
|
||||||
|
boost::asio::make_strand(ioc_));
|
||||||
|
|
||||||
run();
|
run();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
else if (startAgain)
|
else if (startAgain)
|
||||||
{
|
{
|
||||||
|
ws_ = std::make_unique<
|
||||||
|
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||||
|
boost::asio::make_strand(ioc_));
|
||||||
|
|
||||||
run();
|
run();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -235,21 +283,17 @@ PlainETLSource::onConnect(
|
|||||||
// own timeout system
|
// own timeout system
|
||||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||||
|
|
||||||
// Set suggested timeout settings for the websocket
|
// Set a desired timeout for the websocket stream
|
||||||
derived().ws().set_option(
|
derived().ws().set_option(make_TimeoutOption());
|
||||||
boost::beast::websocket::stream_base::timeout::suggested(
|
|
||||||
boost::beast::role_type::client));
|
|
||||||
|
|
||||||
// Set a decorator to change the User-Agent of the handshake
|
// Set a decorator to change the User-Agent of the handshake
|
||||||
derived().ws().set_option(
|
derived().ws().set_option(
|
||||||
boost::beast::websocket::stream_base::decorator(
|
boost::beast::websocket::stream_base::decorator(
|
||||||
[](boost::beast::websocket::request_type& req) {
|
[](boost::beast::websocket::request_type& req) {
|
||||||
req.set(
|
req.set(
|
||||||
boost::beast::http::field::user_agent,
|
boost::beast::http::field::user_agent, "clio-client");
|
||||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
|
||||||
" clio-client");
|
|
||||||
|
|
||||||
req.set("X-User", "coro-client");
|
req.set("X-User", "clio-client");
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Update the host_ string. This will provide the value of the
|
// Update the host_ string. This will provide the value of the
|
||||||
@@ -281,21 +325,17 @@ SslETLSource::onConnect(
|
|||||||
// own timeout system
|
// own timeout system
|
||||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||||
|
|
||||||
// Set suggested timeout settings for the websocket
|
// Set a desired timeout for the websocket stream
|
||||||
derived().ws().set_option(
|
derived().ws().set_option(make_TimeoutOption());
|
||||||
boost::beast::websocket::stream_base::timeout::suggested(
|
|
||||||
boost::beast::role_type::client));
|
|
||||||
|
|
||||||
// Set a decorator to change the User-Agent of the handshake
|
// Set a decorator to change the User-Agent of the handshake
|
||||||
derived().ws().set_option(
|
derived().ws().set_option(
|
||||||
boost::beast::websocket::stream_base::decorator(
|
boost::beast::websocket::stream_base::decorator(
|
||||||
[](boost::beast::websocket::request_type& req) {
|
[](boost::beast::websocket::request_type& req) {
|
||||||
req.set(
|
req.set(
|
||||||
boost::beast::http::field::user_agent,
|
boost::beast::http::field::user_agent, "clio-client");
|
||||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
|
||||||
" clio-client");
|
|
||||||
|
|
||||||
req.set("X-User", "coro-client");
|
req.set("X-User", "clio-client");
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Update the host_ string. This will provide the value of the
|
// Update the host_ string. This will provide the value of the
|
||||||
@@ -333,6 +373,10 @@ ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(trace)
|
||||||
<< __func__ << " : ec = " << ec << " - " << toString();
|
<< __func__ << " : ec = " << ec << " - " << toString();
|
||||||
|
if (auto action = hooks_.onConnected(ec);
|
||||||
|
action == ETLSourceHooks::Action::STOP)
|
||||||
|
return;
|
||||||
|
|
||||||
if (ec)
|
if (ec)
|
||||||
{
|
{
|
||||||
// start over
|
// start over
|
||||||
@@ -475,6 +519,7 @@ ETLSourceImpl<Derived>::handleMessage()
|
|||||||
{
|
{
|
||||||
if (response.contains("transaction"))
|
if (response.contains("transaction"))
|
||||||
{
|
{
|
||||||
|
forwardCache_.freshen();
|
||||||
subscriptions_->forwardProposedTransaction(response);
|
subscriptions_->forwardProposedTransaction(response);
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
@@ -863,8 +908,6 @@ ETLSourceImpl<Derived>::fetchLedger(
|
|||||||
"correctly on the ETL source. source = "
|
"correctly on the ETL source. source = "
|
||||||
<< toString() << " status = " << status.error_message();
|
<< toString() << " status = " << status.error_message();
|
||||||
}
|
}
|
||||||
// BOOST_LOG_TRIVIAL(debug)
|
|
||||||
// << __func__ << " Message size = " << response.ByteSizeLong();
|
|
||||||
return {status, std::move(response)};
|
return {status, std::move(response)};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -872,34 +915,18 @@ static std::unique_ptr<ETLSource>
|
|||||||
make_ETLSource(
|
make_ETLSource(
|
||||||
boost::json::object const& config,
|
boost::json::object const& config,
|
||||||
boost::asio::io_context& ioContext,
|
boost::asio::io_context& ioContext,
|
||||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||||
ETLLoadBalancer& balancer)
|
ETLLoadBalancer& balancer)
|
||||||
{
|
{
|
||||||
std::unique_ptr<ETLSource> src = nullptr;
|
auto src = std::make_unique<ProbingETLSource>(
|
||||||
if (sslCtx)
|
config,
|
||||||
{
|
ioContext,
|
||||||
src = std::make_unique<SslETLSource>(
|
backend,
|
||||||
config,
|
subscriptions,
|
||||||
ioContext,
|
networkValidatedLedgers,
|
||||||
sslCtx,
|
balancer);
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
networkValidatedLedgers,
|
|
||||||
balancer);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
src = std::make_unique<PlainETLSource>(
|
|
||||||
config,
|
|
||||||
ioContext,
|
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
networkValidatedLedgers,
|
|
||||||
balancer);
|
|
||||||
}
|
|
||||||
|
|
||||||
src->run();
|
src->run();
|
||||||
|
|
||||||
@@ -909,7 +936,6 @@ make_ETLSource(
|
|||||||
ETLLoadBalancer::ETLLoadBalancer(
|
ETLLoadBalancer::ETLLoadBalancer(
|
||||||
boost::json::object const& config,
|
boost::json::object const& config,
|
||||||
boost::asio::io_context& ioContext,
|
boost::asio::io_context& ioContext,
|
||||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
||||||
@@ -928,13 +954,7 @@ ETLLoadBalancer::ETLLoadBalancer(
|
|||||||
for (auto& entry : config.at("etl_sources").as_array())
|
for (auto& entry : config.at("etl_sources").as_array())
|
||||||
{
|
{
|
||||||
std::unique_ptr<ETLSource> source = make_ETLSource(
|
std::unique_ptr<ETLSource> source = make_ETLSource(
|
||||||
entry.as_object(),
|
entry.as_object(), ioContext, backend, subscriptions, nwvl, *this);
|
||||||
ioContext,
|
|
||||||
sslCtx,
|
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
nwvl,
|
|
||||||
*this);
|
|
||||||
|
|
||||||
sources_.push_back(std::move(source));
|
sources_.push_back(std::move(source));
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
||||||
@@ -973,7 +993,7 @@ ETLLoadBalancer::fetchLedger(
|
|||||||
auto [status, data] = source->fetchLedger(
|
auto [status, data] = source->fetchLedger(
|
||||||
ledgerSequence, getObjects, getObjectNeighbors);
|
ledgerSequence, getObjects, getObjectNeighbors);
|
||||||
response = std::move(data);
|
response = std::move(data);
|
||||||
if (status.ok() && (response.validated() || true))
|
if (status.ok() && response.validated())
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< "Successfully fetched ledger = " << ledgerSequence
|
<< "Successfully fetched ledger = " << ledgerSequence
|
||||||
@@ -1026,7 +1046,23 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
|||||||
std::string const& clientIp,
|
std::string const& clientIp,
|
||||||
boost::asio::yield_context& yield) const
|
boost::asio::yield_context& yield) const
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
|
if (auto resp = forwardCache_.get(request); resp)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << "request hit forwardCache";
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestFromRippled(request, clientIp, yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Derived>
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
ETLSourceImpl<Derived>::requestFromRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(trace) << "Attempting to forward request to tx. "
|
||||||
<< "request = " << boost::json::serialize(request);
|
<< "request = " << boost::json::serialize(request);
|
||||||
|
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
@@ -1047,7 +1083,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
|||||||
// These objects perform our I/O
|
// These objects perform our I/O
|
||||||
tcp::resolver resolver{ioc_};
|
tcp::resolver resolver{ioc_};
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Creating websocket";
|
BOOST_LOG_TRIVIAL(trace) << "Creating websocket";
|
||||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
||||||
|
|
||||||
// Look up the domain name
|
// Look up the domain name
|
||||||
@@ -1057,7 +1093,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
|||||||
|
|
||||||
ws->next_layer().expires_after(std::chrono::seconds(3));
|
ws->next_layer().expires_after(std::chrono::seconds(3));
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Connecting websocket";
|
BOOST_LOG_TRIVIAL(trace) << "Connecting websocket";
|
||||||
// Make the connection on the IP address we get from a lookup
|
// Make the connection on the IP address we get from a lookup
|
||||||
ws->next_layer().async_connect(results, yield[ec]);
|
ws->next_layer().async_connect(results, yield[ec]);
|
||||||
if (ec)
|
if (ec)
|
||||||
@@ -1076,15 +1112,15 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
|||||||
" websocket-client-coro");
|
" websocket-client-coro");
|
||||||
req.set(http::field::forwarded, "for=" + clientIp);
|
req.set(http::field::forwarded, "for=" + clientIp);
|
||||||
}));
|
}));
|
||||||
BOOST_LOG_TRIVIAL(debug) << "client ip: " << clientIp;
|
BOOST_LOG_TRIVIAL(trace) << "client ip: " << clientIp;
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
|
BOOST_LOG_TRIVIAL(trace) << "Performing websocket handshake";
|
||||||
// Perform the websocket handshake
|
// Perform the websocket handshake
|
||||||
ws->async_handshake(ip_, "/", yield[ec]);
|
ws->async_handshake(ip_, "/", yield[ec]);
|
||||||
if (ec)
|
if (ec)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Sending request";
|
BOOST_LOG_TRIVIAL(trace) << "Sending request";
|
||||||
// Send the message
|
// Send the message
|
||||||
ws->async_write(
|
ws->async_write(
|
||||||
net::buffer(boost::json::serialize(request)), yield[ec]);
|
net::buffer(boost::json::serialize(request)), yield[ec]);
|
||||||
@@ -1106,7 +1142,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
|
|||||||
<< "Error parsing response: " << std::string{begin, end};
|
<< "Error parsing response: " << std::string{begin, end};
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Successfully forward request";
|
BOOST_LOG_TRIVIAL(trace) << "Successfully forward request";
|
||||||
|
|
||||||
response = parsed.as_object();
|
response = parsed.as_object();
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,8 @@
|
|||||||
#include <grpcpp/grpcpp.h>
|
#include <grpcpp/grpcpp.h>
|
||||||
|
|
||||||
class ETLLoadBalancer;
|
class ETLLoadBalancer;
|
||||||
|
class ETLSource;
|
||||||
|
class ProbingETLSource;
|
||||||
class SubscriptionManager;
|
class SubscriptionManager;
|
||||||
|
|
||||||
/// This class manages a connection to a single ETL source. This is almost
|
/// This class manages a connection to a single ETL source. This is almost
|
||||||
@@ -24,6 +26,64 @@ class SubscriptionManager;
|
|||||||
/// has. This class also has methods for extracting said ledgers. Lastly this
|
/// has. This class also has methods for extracting said ledgers. Lastly this
|
||||||
/// class forwards transactions received on the transactions_proposed streams to
|
/// class forwards transactions received on the transactions_proposed streams to
|
||||||
/// any subscribers.
|
/// any subscribers.
|
||||||
|
class ForwardCache
|
||||||
|
{
|
||||||
|
using response_type = std::optional<boost::json::object>;
|
||||||
|
|
||||||
|
mutable std::atomic_bool stopping_ = false;
|
||||||
|
mutable std::shared_mutex mtx_;
|
||||||
|
std::unordered_map<std::string, response_type> latestForwarded_;
|
||||||
|
|
||||||
|
boost::asio::io_context::strand strand_;
|
||||||
|
boost::asio::steady_timer timer_;
|
||||||
|
ETLSource const& source_;
|
||||||
|
std::uint32_t duration_ = 10;
|
||||||
|
|
||||||
|
void
|
||||||
|
clear();
|
||||||
|
|
||||||
|
public:
|
||||||
|
ForwardCache(
|
||||||
|
boost::json::object const& config,
|
||||||
|
boost::asio::io_context& ioc,
|
||||||
|
ETLSource const& source)
|
||||||
|
: strand_(ioc), timer_(strand_), source_(source)
|
||||||
|
{
|
||||||
|
if (config.contains("cache") && !config.at("cache").is_array())
|
||||||
|
throw std::runtime_error("ETLSource cache must be array");
|
||||||
|
|
||||||
|
if (config.contains("cache_duration") &&
|
||||||
|
!config.at("cache_duration").is_int64())
|
||||||
|
throw std::runtime_error(
|
||||||
|
"ETLSource cache_duration must be a number");
|
||||||
|
|
||||||
|
duration_ = config.contains("cache_duration")
|
||||||
|
? config.at("cache_duration").as_int64()
|
||||||
|
: 10;
|
||||||
|
|
||||||
|
auto commands = config.contains("cache") ? config.at("cache").as_array()
|
||||||
|
: boost::json::array{};
|
||||||
|
|
||||||
|
for (auto const& command : commands)
|
||||||
|
{
|
||||||
|
if (!command.is_string())
|
||||||
|
throw std::runtime_error(
|
||||||
|
"ETLSource forward command must be array of strings");
|
||||||
|
|
||||||
|
latestForwarded_[command.as_string().c_str()] = {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to be called every freshenDuration_ seconds.
|
||||||
|
// It will request information from this etlSource, and
|
||||||
|
// will populate the cache with the latest value. If the
|
||||||
|
// request fails, it will evict that value from the cache.
|
||||||
|
void
|
||||||
|
freshen();
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
get(boost::json::object const& command) const;
|
||||||
|
};
|
||||||
|
|
||||||
class ETLSource
|
class ETLSource
|
||||||
{
|
{
|
||||||
@@ -37,6 +97,12 @@ public:
|
|||||||
virtual void
|
virtual void
|
||||||
run() = 0;
|
run() = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
pause() = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
resume() = 0;
|
||||||
|
|
||||||
virtual std::string
|
virtual std::string
|
||||||
toString() const = 0;
|
toString() const = 0;
|
||||||
|
|
||||||
@@ -64,6 +130,24 @@ public:
|
|||||||
virtual ~ETLSource()
|
virtual ~ETLSource()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend ForwardCache;
|
||||||
|
friend ProbingETLSource;
|
||||||
|
|
||||||
|
virtual std::optional<boost::json::object>
|
||||||
|
requestFromRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ETLSourceHooks
|
||||||
|
{
|
||||||
|
enum class Action { STOP, PROCEED };
|
||||||
|
|
||||||
|
std::function<Action(boost::beast::error_code)> onConnected;
|
||||||
|
std::function<Action(boost::beast::error_code)> onDisconnected;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class Derived>
|
template <class Derived>
|
||||||
@@ -105,6 +189,14 @@ class ETLSourceImpl : public ETLSource
|
|||||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||||
ETLLoadBalancer& balancer_;
|
ETLLoadBalancer& balancer_;
|
||||||
|
|
||||||
|
ForwardCache forwardCache_;
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
requestFromRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Derived&
|
Derived&
|
||||||
derived()
|
derived()
|
||||||
@@ -123,6 +215,10 @@ protected:
|
|||||||
|
|
||||||
std::atomic_bool closing_{false};
|
std::atomic_bool closing_{false};
|
||||||
|
|
||||||
|
std::atomic_bool paused_{false};
|
||||||
|
|
||||||
|
ETLSourceHooks hooks_;
|
||||||
|
|
||||||
void
|
void
|
||||||
run() override
|
run() override
|
||||||
{
|
{
|
||||||
@@ -139,7 +235,7 @@ protected:
|
|||||||
public:
|
public:
|
||||||
~ETLSourceImpl()
|
~ETLSourceImpl()
|
||||||
{
|
{
|
||||||
close(false);
|
derived().close(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -171,7 +267,54 @@ public:
|
|||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||||
ETLLoadBalancer& balancer);
|
ETLLoadBalancer& balancer,
|
||||||
|
ETLSourceHooks hooks)
|
||||||
|
: resolver_(boost::asio::make_strand(ioContext))
|
||||||
|
, networkValidatedLedgers_(networkValidatedLedgers)
|
||||||
|
, backend_(backend)
|
||||||
|
, subscriptions_(subscriptions)
|
||||||
|
, balancer_(balancer)
|
||||||
|
, forwardCache_(config, ioContext, *this)
|
||||||
|
, ioc_(ioContext)
|
||||||
|
, timer_(ioContext)
|
||||||
|
, hooks_(hooks)
|
||||||
|
{
|
||||||
|
if (config.contains("ip"))
|
||||||
|
{
|
||||||
|
auto ipJs = config.at("ip").as_string();
|
||||||
|
ip_ = {ipJs.c_str(), ipJs.size()};
|
||||||
|
}
|
||||||
|
if (config.contains("ws_port"))
|
||||||
|
{
|
||||||
|
auto portjs = config.at("ws_port").as_string();
|
||||||
|
wsPort_ = {portjs.c_str(), portjs.size()};
|
||||||
|
}
|
||||||
|
if (config.contains("grpc_port"))
|
||||||
|
{
|
||||||
|
auto portjs = config.at("grpc_port").as_string();
|
||||||
|
grpcPort_ = {portjs.c_str(), portjs.size()};
|
||||||
|
try
|
||||||
|
{
|
||||||
|
boost::asio::ip::tcp::endpoint endpoint{
|
||||||
|
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << endpoint;
|
||||||
|
grpc::ChannelArguments chArgs;
|
||||||
|
chArgs.SetMaxReceiveMessageSize(-1);
|
||||||
|
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||||
|
grpc::CreateCustomChannel(
|
||||||
|
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< "Made stub for remote = " << toString();
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< "Exception while creating stub = " << e.what()
|
||||||
|
<< " . Remote = " << toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// @param sequence ledger sequence to check for
|
/// @param sequence ledger sequence to check for
|
||||||
/// @return true if this source has the desired ledger
|
/// @return true if this source has the desired ledger
|
||||||
@@ -295,6 +438,22 @@ public:
|
|||||||
void
|
void
|
||||||
reconnect(boost::beast::error_code ec);
|
reconnect(boost::beast::error_code ec);
|
||||||
|
|
||||||
|
/// Pause the source effectively stopping it from trying to reconnect
|
||||||
|
void
|
||||||
|
pause() override
|
||||||
|
{
|
||||||
|
paused_ = true;
|
||||||
|
derived().close(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resume the source allowing it to reconnect again
|
||||||
|
void
|
||||||
|
resume() override
|
||||||
|
{
|
||||||
|
paused_ = false;
|
||||||
|
derived().close(true);
|
||||||
|
}
|
||||||
|
|
||||||
/// Callback
|
/// Callback
|
||||||
void
|
void
|
||||||
onResolve(
|
onResolve(
|
||||||
@@ -344,8 +503,16 @@ public:
|
|||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||||
ETLLoadBalancer& balancer)
|
ETLLoadBalancer& balancer,
|
||||||
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
|
ETLSourceHooks hooks)
|
||||||
|
: ETLSourceImpl(
|
||||||
|
config,
|
||||||
|
ioc,
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
nwvl,
|
||||||
|
balancer,
|
||||||
|
std::move(hooks))
|
||||||
, ws_(std::make_unique<
|
, ws_(std::make_unique<
|
||||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||||
boost::asio::make_strand(ioc)))
|
boost::asio::make_strand(ioc)))
|
||||||
@@ -386,8 +553,16 @@ public:
|
|||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||||
ETLLoadBalancer& balancer)
|
ETLLoadBalancer& balancer,
|
||||||
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
|
ETLSourceHooks hooks)
|
||||||
|
: ETLSourceImpl(
|
||||||
|
config,
|
||||||
|
ioc,
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
nwvl,
|
||||||
|
balancer,
|
||||||
|
std::move(hooks))
|
||||||
, sslCtx_(sslCtx)
|
, sslCtx_(sslCtx)
|
||||||
, ws_(std::make_unique<boost::beast::websocket::stream<
|
, ws_(std::make_unique<boost::beast::websocket::stream<
|
||||||
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||||
@@ -437,7 +612,6 @@ public:
|
|||||||
ETLLoadBalancer(
|
ETLLoadBalancer(
|
||||||
boost::json::object const& config,
|
boost::json::object const& config,
|
||||||
boost::asio::io_context& ioContext,
|
boost::asio::io_context& ioContext,
|
||||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||||
@@ -446,13 +620,12 @@ public:
|
|||||||
make_ETLLoadBalancer(
|
make_ETLLoadBalancer(
|
||||||
boost::json::object const& config,
|
boost::json::object const& config,
|
||||||
boost::asio::io_context& ioc,
|
boost::asio::io_context& ioc,
|
||||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||||
{
|
{
|
||||||
return std::make_shared<ETLLoadBalancer>(
|
return std::make_shared<ETLLoadBalancer>(
|
||||||
config, ioc, sslCtx, backend, subscriptions, validatedLedgers);
|
config, ioc, backend, subscriptions, validatedLedgers);
|
||||||
}
|
}
|
||||||
|
|
||||||
~ETLLoadBalancer()
|
~ETLLoadBalancer()
|
||||||
|
|||||||
370
src/etl/NFTHelpers.cpp
Normal file
370
src/etl/NFTHelpers.cpp
Normal file
@@ -0,0 +1,370 @@
|
|||||||
|
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||||
|
#include <ripple/protocol/STBase.h>
|
||||||
|
#include <ripple/protocol/STTx.h>
|
||||||
|
#include <ripple/protocol/TxMeta.h>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <backend/BackendInterface.h>
|
||||||
|
#include <backend/DBHelpers.h>
|
||||||
|
#include <backend/Types.h>
|
||||||
|
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
// To find the minted token ID, we put all tokenIDs referenced in the
|
||||||
|
// metadata from prior to the tx application into one vector, then all
|
||||||
|
// tokenIDs referenced in the metadata from after the tx application into
|
||||||
|
// another, then find the one tokenID that was added by this tx
|
||||||
|
// application.
|
||||||
|
std::vector<ripple::uint256> prevIDs;
|
||||||
|
std::vector<ripple::uint256> finalIDs;
|
||||||
|
|
||||||
|
// The owner is not necessarily the issuer, if using authorized minter
|
||||||
|
// flow. Determine owner from the ledger object ID of the NFTokenPages
|
||||||
|
// that were changed.
|
||||||
|
std::optional<ripple::AccountID> owner;
|
||||||
|
|
||||||
|
for (ripple::STObject const& node : txMeta.getNodes())
|
||||||
|
{
|
||||||
|
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||||
|
ripple::ltNFTOKEN_PAGE)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!owner)
|
||||||
|
owner = ripple::AccountID::fromVoid(
|
||||||
|
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||||
|
|
||||||
|
if (node.getFName() == ripple::sfCreatedNode)
|
||||||
|
{
|
||||||
|
ripple::STArray const& toAddNFTs =
|
||||||
|
node.peekAtField(ripple::sfNewFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldArray(ripple::sfNFTokens);
|
||||||
|
std::transform(
|
||||||
|
toAddNFTs.begin(),
|
||||||
|
toAddNFTs.end(),
|
||||||
|
std::back_inserter(finalIDs),
|
||||||
|
[](ripple::STObject const& nft) {
|
||||||
|
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// Else it's modified, as there should never be a deleted NFToken page
|
||||||
|
// as a result of a mint.
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// When a mint results in splitting an existing page,
|
||||||
|
// it results in a created page and a modified node. Sometimes,
|
||||||
|
// the created node needs to be linked to a third page, resulting
|
||||||
|
// in modifying that third page's PreviousPageMin or NextPageMin
|
||||||
|
// field changing, but no NFTs within that page changing. In this
|
||||||
|
// case, there will be no previous NFTs and we need to skip.
|
||||||
|
// However, there will always be NFTs listed in the final fields,
|
||||||
|
// as rippled outputs all fields in final fields even if they were
|
||||||
|
// not changed.
|
||||||
|
ripple::STObject const& previousFields =
|
||||||
|
node.peekAtField(ripple::sfPreviousFields)
|
||||||
|
.downcast<ripple::STObject>();
|
||||||
|
if (!previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ripple::STArray const& toAddNFTs =
|
||||||
|
previousFields.getFieldArray(ripple::sfNFTokens);
|
||||||
|
std::transform(
|
||||||
|
toAddNFTs.begin(),
|
||||||
|
toAddNFTs.end(),
|
||||||
|
std::back_inserter(prevIDs),
|
||||||
|
[](ripple::STObject const& nft) {
|
||||||
|
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
});
|
||||||
|
|
||||||
|
ripple::STArray const& toAddFinalNFTs =
|
||||||
|
node.peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldArray(ripple::sfNFTokens);
|
||||||
|
std::transform(
|
||||||
|
toAddFinalNFTs.begin(),
|
||||||
|
toAddFinalNFTs.end(),
|
||||||
|
std::back_inserter(finalIDs),
|
||||||
|
[](ripple::STObject const& nft) {
|
||||||
|
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::sort(finalIDs.begin(), finalIDs.end());
|
||||||
|
std::sort(prevIDs.begin(), prevIDs.end());
|
||||||
|
std::vector<ripple::uint256> tokenIDResult;
|
||||||
|
std::set_difference(
|
||||||
|
finalIDs.begin(),
|
||||||
|
finalIDs.end(),
|
||||||
|
prevIDs.begin(),
|
||||||
|
prevIDs.end(),
|
||||||
|
std::inserter(tokenIDResult, tokenIDResult.begin()));
|
||||||
|
if (tokenIDResult.size() == 1 && owner)
|
||||||
|
return {
|
||||||
|
{NFTTransactionsData(
|
||||||
|
tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||||
|
NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
|
||||||
|
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - unexpected NFTokenMint data in tx "
|
||||||
|
<< sttx.getTransactionID();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
std::vector<NFTTransactionsData> const txs = {
|
||||||
|
NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
|
||||||
|
|
||||||
|
// Determine who owned the token when it was burned by finding an
|
||||||
|
// NFTokenPage that was deleted or modified that contains this
|
||||||
|
// tokenID.
|
||||||
|
for (ripple::STObject const& node : txMeta.getNodes())
|
||||||
|
{
|
||||||
|
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||||
|
ripple::ltNFTOKEN_PAGE ||
|
||||||
|
node.getFName() == ripple::sfCreatedNode)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// NFT burn can result in an NFTokenPage being modified to no longer
|
||||||
|
// include the target, or an NFTokenPage being deleted. If this is
|
||||||
|
// modified, we want to look for the target in the fields prior to
|
||||||
|
// modification. If deleted, it's possible that the page was modified
|
||||||
|
// to remove the target NFT prior to the entire page being deleted. In
|
||||||
|
// this case, we need to look in the PreviousFields. Otherwise, the
|
||||||
|
// page was not modified prior to deleting and we need to look in the
|
||||||
|
// FinalFields.
|
||||||
|
std::optional<ripple::STArray> prevNFTs;
|
||||||
|
|
||||||
|
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||||
|
{
|
||||||
|
ripple::STObject const& previousFields =
|
||||||
|
node.peekAtField(ripple::sfPreviousFields)
|
||||||
|
.downcast<ripple::STObject>();
|
||||||
|
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||||
|
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||||
|
}
|
||||||
|
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||||
|
prevNFTs = node.peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldArray(ripple::sfNFTokens);
|
||||||
|
|
||||||
|
if (!prevNFTs)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
auto const nft = std::find_if(
|
||||||
|
prevNFTs->begin(),
|
||||||
|
prevNFTs->end(),
|
||||||
|
[&tokenID](ripple::STObject const& candidate) {
|
||||||
|
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||||
|
});
|
||||||
|
if (nft != prevNFTs->end())
|
||||||
|
return std::make_pair(
|
||||||
|
txs,
|
||||||
|
NFTsData(
|
||||||
|
tokenID,
|
||||||
|
ripple::AccountID::fromVoid(
|
||||||
|
node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||||
|
txMeta,
|
||||||
|
true));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - could not determine owner at burntime for tx "
|
||||||
|
<< sttx.getTransactionID();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTokenAcceptOfferData(
|
||||||
|
ripple::TxMeta const& txMeta,
|
||||||
|
ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
// If we have the buy offer from this tx, we can determine the owner
|
||||||
|
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||||
|
// object.
|
||||||
|
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||||
|
{
|
||||||
|
auto const affectedBuyOffer = std::find_if(
|
||||||
|
txMeta.getNodes().begin(),
|
||||||
|
txMeta.getNodes().end(),
|
||||||
|
[&sttx](ripple::STObject const& node) {
|
||||||
|
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||||
|
sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||||
|
});
|
||||||
|
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||||
|
{
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||||
|
<< sttx.getTransactionID();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
ripple::uint256 const tokenID =
|
||||||
|
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
|
||||||
|
ripple::AccountID const owner =
|
||||||
|
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getAccountID(ripple::sfOwner);
|
||||||
|
return {
|
||||||
|
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||||
|
NFTsData(tokenID, owner, txMeta, false)};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise we have to infer the new owner from the affected nodes.
|
||||||
|
auto const affectedSellOffer = std::find_if(
|
||||||
|
txMeta.getNodes().begin(),
|
||||||
|
txMeta.getNodes().end(),
|
||||||
|
[&sttx](ripple::STObject const& node) {
|
||||||
|
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||||
|
sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||||
|
});
|
||||||
|
if (affectedSellOffer == txMeta.getNodes().end())
|
||||||
|
{
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||||
|
<< sttx.getTransactionID();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
ripple::uint256 const tokenID =
|
||||||
|
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
|
||||||
|
ripple::AccountID const seller =
|
||||||
|
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getAccountID(ripple::sfOwner);
|
||||||
|
|
||||||
|
for (ripple::STObject const& node : txMeta.getNodes())
|
||||||
|
{
|
||||||
|
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||||
|
ripple::ltNFTOKEN_PAGE ||
|
||||||
|
node.getFName() == ripple::sfDeletedNode)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid(
|
||||||
|
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||||
|
if (nodeOwner == seller)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ripple::STArray const& nfts = [&node] {
|
||||||
|
if (node.getFName() == ripple::sfCreatedNode)
|
||||||
|
return node.peekAtField(ripple::sfNewFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldArray(ripple::sfNFTokens);
|
||||||
|
return node.peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldArray(ripple::sfNFTokens);
|
||||||
|
}();
|
||||||
|
|
||||||
|
auto const nft = std::find_if(
|
||||||
|
nfts.begin(),
|
||||||
|
nfts.end(),
|
||||||
|
[&tokenID](ripple::STObject const& candidate) {
|
||||||
|
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||||
|
});
|
||||||
|
if (nft != nfts.end())
|
||||||
|
return {
|
||||||
|
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||||
|
NFTsData(tokenID, nodeOwner, txMeta, false)};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||||
|
<< sttx.getTransactionID();
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the only transaction where there can be more than 1 element in
|
||||||
|
// the returned vector, because you can cancel multiple offers in one
|
||||||
|
// transaction using this feature. This transaction also never returns an
|
||||||
|
// NFTsData because it does not change the state of an NFT itself.
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTokenCancelOfferData(
|
||||||
|
ripple::TxMeta const& txMeta,
|
||||||
|
ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
std::vector<NFTTransactionsData> txs;
|
||||||
|
for (ripple::STObject const& node : txMeta.getNodes())
|
||||||
|
{
|
||||||
|
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||||
|
ripple::ltNFTOKEN_OFFER)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields)
|
||||||
|
.downcast<ripple::STObject>()
|
||||||
|
.getFieldH256(ripple::sfNFTokenID);
|
||||||
|
txs.emplace_back(tokenID, txMeta, sttx.getTransactionID());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate any transactions based on tokenID/txIdx combo. Can't just
|
||||||
|
// use txIdx because in this case one tx can cancel offers for several
|
||||||
|
// NFTs.
|
||||||
|
std::sort(
|
||||||
|
txs.begin(),
|
||||||
|
txs.end(),
|
||||||
|
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||||
|
return a.tokenID < b.tokenID &&
|
||||||
|
a.transactionIndex < b.transactionIndex;
|
||||||
|
});
|
||||||
|
auto last = std::unique(
|
||||||
|
txs.begin(),
|
||||||
|
txs.end(),
|
||||||
|
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||||
|
return a.tokenID == b.tokenID &&
|
||||||
|
a.transactionIndex == b.transactionIndex;
|
||||||
|
});
|
||||||
|
txs.erase(last, txs.end());
|
||||||
|
return {txs, {}};
|
||||||
|
}
|
||||||
|
|
||||||
|
// This transaction never returns an NFTokensData because it does not
|
||||||
|
// change the state of an NFT itself.
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTokenCreateOfferData(
|
||||||
|
ripple::TxMeta const& txMeta,
|
||||||
|
ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
return {
|
||||||
|
{NFTTransactionsData(
|
||||||
|
sttx.getFieldH256(ripple::sfNFTokenID),
|
||||||
|
txMeta,
|
||||||
|
sttx.getTransactionID())},
|
||||||
|
{}};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||||
|
{
|
||||||
|
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||||
|
return {{}, {}};
|
||||||
|
|
||||||
|
switch (sttx.getTxnType())
|
||||||
|
{
|
||||||
|
case ripple::TxType::ttNFTOKEN_MINT:
|
||||||
|
return getNFTokenMintData(txMeta, sttx);
|
||||||
|
|
||||||
|
case ripple::TxType::ttNFTOKEN_BURN:
|
||||||
|
return getNFTokenBurnData(txMeta, sttx);
|
||||||
|
|
||||||
|
case ripple::TxType::ttNFTOKEN_ACCEPT_OFFER:
|
||||||
|
return getNFTokenAcceptOfferData(txMeta, sttx);
|
||||||
|
|
||||||
|
case ripple::TxType::ttNFTOKEN_CANCEL_OFFER:
|
||||||
|
return getNFTokenCancelOfferData(txMeta, sttx);
|
||||||
|
|
||||||
|
case ripple::TxType::ttNFTOKEN_CREATE_OFFER:
|
||||||
|
return getNFTokenCreateOfferData(txMeta, sttx);
|
||||||
|
|
||||||
|
default:
|
||||||
|
return {{}, {}};
|
||||||
|
}
|
||||||
|
}
|
||||||
190
src/etl/ProbingETLSource.cpp
Normal file
190
src/etl/ProbingETLSource.cpp
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
#include <etl/ProbingETLSource.h>
|
||||||
|
|
||||||
|
ProbingETLSource::ProbingETLSource(
|
||||||
|
boost::json::object const& config,
|
||||||
|
boost::asio::io_context& ioc,
|
||||||
|
std::shared_ptr<BackendInterface> backend,
|
||||||
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
|
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||||
|
ETLLoadBalancer& balancer,
|
||||||
|
boost::asio::ssl::context sslCtx)
|
||||||
|
: ioc_{ioc}
|
||||||
|
, sslCtx_{std::move(sslCtx)}
|
||||||
|
, sslSrc_{make_shared<SslETLSource>(
|
||||||
|
config,
|
||||||
|
ioc,
|
||||||
|
std::ref(sslCtx_),
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
nwvl,
|
||||||
|
balancer,
|
||||||
|
make_SSLHooks())}
|
||||||
|
, plainSrc_{make_shared<PlainETLSource>(
|
||||||
|
config,
|
||||||
|
ioc,
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
nwvl,
|
||||||
|
balancer,
|
||||||
|
make_PlainHooks())}
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ProbingETLSource::run()
|
||||||
|
{
|
||||||
|
sslSrc_->run();
|
||||||
|
plainSrc_->run();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ProbingETLSource::pause()
|
||||||
|
{
|
||||||
|
sslSrc_->pause();
|
||||||
|
plainSrc_->pause();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ProbingETLSource::resume()
|
||||||
|
{
|
||||||
|
sslSrc_->resume();
|
||||||
|
plainSrc_->resume();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
ProbingETLSource::isConnected() const
|
||||||
|
{
|
||||||
|
return currentSrc_ && currentSrc_->isConnected();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
ProbingETLSource::hasLedger(uint32_t sequence) const
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return false;
|
||||||
|
return currentSrc_->hasLedger(sequence);
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
ProbingETLSource::toJson() const
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return {};
|
||||||
|
return currentSrc_->toJson();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string
|
||||||
|
ProbingETLSource::toString() const
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return "{ probing }";
|
||||||
|
return currentSrc_->toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
ProbingETLSource::loadInitialLedger(
|
||||||
|
std::uint32_t ledgerSequence,
|
||||||
|
std::uint32_t numMarkers,
|
||||||
|
bool cacheOnly)
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return false;
|
||||||
|
return currentSrc_->loadInitialLedger(
|
||||||
|
ledgerSequence, numMarkers, cacheOnly);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||||
|
ProbingETLSource::fetchLedger(
|
||||||
|
uint32_t ledgerSequence,
|
||||||
|
bool getObjects,
|
||||||
|
bool getObjectNeighbors)
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return {};
|
||||||
|
return currentSrc_->fetchLedger(
|
||||||
|
ledgerSequence, getObjects, getObjectNeighbors);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
ProbingETLSource::forwardToRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return {};
|
||||||
|
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
ProbingETLSource::requestFromRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const
|
||||||
|
{
|
||||||
|
if (!currentSrc_)
|
||||||
|
return {};
|
||||||
|
return currentSrc_->requestFromRippled(request, clientIp, yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
ETLSourceHooks
|
||||||
|
ProbingETLSource::make_SSLHooks() noexcept
|
||||||
|
{
|
||||||
|
return {// onConnected
|
||||||
|
[this](auto ec) {
|
||||||
|
std::lock_guard lck(mtx_);
|
||||||
|
if (currentSrc_)
|
||||||
|
return ETLSourceHooks::Action::STOP;
|
||||||
|
|
||||||
|
if (!ec)
|
||||||
|
{
|
||||||
|
plainSrc_->pause();
|
||||||
|
currentSrc_ = sslSrc_;
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "Selected WSS as the main source: "
|
||||||
|
<< currentSrc_->toString();
|
||||||
|
}
|
||||||
|
return ETLSourceHooks::Action::PROCEED;
|
||||||
|
},
|
||||||
|
// onDisconnected
|
||||||
|
[this](auto ec) {
|
||||||
|
std::lock_guard lck(mtx_);
|
||||||
|
if (currentSrc_)
|
||||||
|
{
|
||||||
|
currentSrc_ = nullptr;
|
||||||
|
plainSrc_->resume();
|
||||||
|
}
|
||||||
|
return ETLSourceHooks::Action::STOP;
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
ETLSourceHooks
|
||||||
|
ProbingETLSource::make_PlainHooks() noexcept
|
||||||
|
{
|
||||||
|
return {// onConnected
|
||||||
|
[this](auto ec) {
|
||||||
|
std::lock_guard lck(mtx_);
|
||||||
|
if (currentSrc_)
|
||||||
|
return ETLSourceHooks::Action::STOP;
|
||||||
|
|
||||||
|
if (!ec)
|
||||||
|
{
|
||||||
|
sslSrc_->pause();
|
||||||
|
currentSrc_ = plainSrc_;
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "Selected Plain WS as the main source: "
|
||||||
|
<< currentSrc_->toString();
|
||||||
|
}
|
||||||
|
return ETLSourceHooks::Action::PROCEED;
|
||||||
|
},
|
||||||
|
// onDisconnected
|
||||||
|
[this](auto ec) {
|
||||||
|
std::lock_guard lck(mtx_);
|
||||||
|
if (currentSrc_)
|
||||||
|
{
|
||||||
|
currentSrc_ = nullptr;
|
||||||
|
sslSrc_->resume();
|
||||||
|
}
|
||||||
|
return ETLSourceHooks::Action::STOP;
|
||||||
|
}};
|
||||||
|
}
|
||||||
91
src/etl/ProbingETLSource.h
Normal file
91
src/etl/ProbingETLSource.h
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
#ifndef RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
|
||||||
|
#define RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
|
||||||
|
|
||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <boost/beast/core.hpp>
|
||||||
|
#include <boost/beast/core/string.hpp>
|
||||||
|
#include <boost/beast/ssl.hpp>
|
||||||
|
#include <boost/beast/websocket.hpp>
|
||||||
|
#include <etl/ETLSource.h>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
/// This ETLSource implementation attempts to connect over both secure websocket
|
||||||
|
/// and plain websocket. First to connect pauses the other and the probing is
|
||||||
|
/// considered done at this point. If however the connected source loses
|
||||||
|
/// connection the probing is kickstarted again.
|
||||||
|
class ProbingETLSource : public ETLSource
|
||||||
|
{
|
||||||
|
std::mutex mtx_;
|
||||||
|
boost::asio::io_context& ioc_;
|
||||||
|
boost::asio::ssl::context sslCtx_;
|
||||||
|
std::shared_ptr<ETLSource> sslSrc_;
|
||||||
|
std::shared_ptr<ETLSource> plainSrc_;
|
||||||
|
std::shared_ptr<ETLSource> currentSrc_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ProbingETLSource(
|
||||||
|
boost::json::object const& config,
|
||||||
|
boost::asio::io_context& ioc,
|
||||||
|
std::shared_ptr<BackendInterface> backend,
|
||||||
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
|
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||||
|
ETLLoadBalancer& balancer,
|
||||||
|
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{
|
||||||
|
boost::asio::ssl::context::tlsv12});
|
||||||
|
|
||||||
|
~ProbingETLSource() = default;
|
||||||
|
|
||||||
|
void
|
||||||
|
run() override;
|
||||||
|
|
||||||
|
void
|
||||||
|
pause() override;
|
||||||
|
|
||||||
|
void
|
||||||
|
resume() override;
|
||||||
|
|
||||||
|
bool
|
||||||
|
isConnected() const override;
|
||||||
|
|
||||||
|
bool
|
||||||
|
hasLedger(uint32_t sequence) const override;
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
toJson() const override;
|
||||||
|
|
||||||
|
std::string
|
||||||
|
toString() const override;
|
||||||
|
|
||||||
|
bool
|
||||||
|
loadInitialLedger(
|
||||||
|
std::uint32_t ledgerSequence,
|
||||||
|
std::uint32_t numMarkers,
|
||||||
|
bool cacheOnly = false) override;
|
||||||
|
|
||||||
|
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||||
|
fetchLedger(
|
||||||
|
uint32_t ledgerSequence,
|
||||||
|
bool getObjects = true,
|
||||||
|
bool getObjectNeighbors = false) override;
|
||||||
|
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
forwardToRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
requestFromRippled(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::string const& clientIp,
|
||||||
|
boost::asio::yield_context& yield) const override;
|
||||||
|
|
||||||
|
ETLSourceHooks
|
||||||
|
make_SSLHooks() noexcept;
|
||||||
|
|
||||||
|
ETLSourceHooks
|
||||||
|
make_PlainHooks() noexcept;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -28,12 +28,13 @@ toString(ripple::LedgerInfo const& info)
|
|||||||
}
|
}
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
std::vector<AccountTransactionsData>
|
FormattedTransactionsData
|
||||||
ReportingETL::insertTransactions(
|
ReportingETL::insertTransactions(
|
||||||
ripple::LedgerInfo const& ledger,
|
ripple::LedgerInfo const& ledger,
|
||||||
org::xrpl::rpc::v1::GetLedgerResponse& data)
|
org::xrpl::rpc::v1::GetLedgerResponse& data)
|
||||||
{
|
{
|
||||||
std::vector<AccountTransactionsData> accountTxData;
|
FormattedTransactionsData result;
|
||||||
|
|
||||||
for (auto& txn :
|
for (auto& txn :
|
||||||
*(data.mutable_transactions_list()->mutable_transactions()))
|
*(data.mutable_transactions_list()->mutable_transactions()))
|
||||||
{
|
{
|
||||||
@@ -42,21 +43,22 @@ ReportingETL::insertTransactions(
|
|||||||
ripple::SerialIter it{raw->data(), raw->size()};
|
ripple::SerialIter it{raw->data(), raw->size()};
|
||||||
ripple::STTx sttx{it};
|
ripple::STTx sttx{it};
|
||||||
|
|
||||||
auto txSerializer =
|
|
||||||
std::make_shared<ripple::Serializer>(sttx.getSerializer());
|
|
||||||
|
|
||||||
ripple::TxMeta txMeta{
|
|
||||||
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
|
||||||
|
|
||||||
auto metaSerializer = std::make_shared<ripple::Serializer>(
|
|
||||||
txMeta.getAsObject().getSerializer());
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(trace)
|
||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Inserting transaction = " << sttx.getTransactionID();
|
<< "Inserting transaction = " << sttx.getTransactionID();
|
||||||
|
|
||||||
|
ripple::TxMeta txMeta{
|
||||||
|
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
||||||
|
|
||||||
|
auto const [nftTxs, maybeNFT] = getNFTData(txMeta, sttx);
|
||||||
|
result.nfTokenTxData.insert(
|
||||||
|
result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end());
|
||||||
|
if (maybeNFT)
|
||||||
|
result.nfTokensData.push_back(*maybeNFT);
|
||||||
|
|
||||||
auto journal = ripple::debugLog();
|
auto journal = ripple::debugLog();
|
||||||
accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
|
result.accountTxData.emplace_back(
|
||||||
|
txMeta, sttx.getTransactionID(), journal);
|
||||||
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
|
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
|
||||||
backend_->writeTransaction(
|
backend_->writeTransaction(
|
||||||
std::move(keyStr),
|
std::move(keyStr),
|
||||||
@@ -65,7 +67,27 @@ ReportingETL::insertTransactions(
|
|||||||
std::move(*raw),
|
std::move(*raw),
|
||||||
std::move(*txn.mutable_metadata_blob()));
|
std::move(*txn.mutable_metadata_blob()));
|
||||||
}
|
}
|
||||||
return accountTxData;
|
|
||||||
|
// Remove all but the last NFTsData for each id. unique removes all
|
||||||
|
// but the first of a group, so we want to reverse sort by transaction
|
||||||
|
// index
|
||||||
|
std::sort(
|
||||||
|
result.nfTokensData.begin(),
|
||||||
|
result.nfTokensData.end(),
|
||||||
|
[](NFTsData const& a, NFTsData const& b) {
|
||||||
|
return a.tokenID > b.tokenID &&
|
||||||
|
a.transactionIndex > b.transactionIndex;
|
||||||
|
});
|
||||||
|
// Now we can unique the NFTs by tokenID.
|
||||||
|
auto last = std::unique(
|
||||||
|
result.nfTokensData.begin(),
|
||||||
|
result.nfTokensData.end(),
|
||||||
|
[](NFTsData const& a, NFTsData const& b) {
|
||||||
|
return a.tokenID == b.tokenID;
|
||||||
|
});
|
||||||
|
result.nfTokensData.erase(last, result.nfTokensData.end());
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ripple::LedgerInfo>
|
std::optional<ripple::LedgerInfo>
|
||||||
@@ -106,7 +128,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
|
|||||||
lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
|
lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger";
|
||||||
std::vector<AccountTransactionsData> accountTxData =
|
FormattedTransactionsData insertTxResult =
|
||||||
insertTransactions(lgrInfo, *ledgerData);
|
insertTransactions(lgrInfo, *ledgerData);
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns";
|
||||||
|
|
||||||
@@ -119,8 +141,12 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
|
|||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger";
|
||||||
|
|
||||||
if (!stopping_)
|
if (!stopping_)
|
||||||
backend_->writeAccountTransactions(std::move(accountTxData));
|
{
|
||||||
|
backend_->writeAccountTransactions(
|
||||||
|
std::move(insertTxResult.accountTxData));
|
||||||
|
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||||
|
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||||
|
}
|
||||||
backend_->finishWrites(startingSequence);
|
backend_->finishWrites(startingSequence);
|
||||||
|
|
||||||
auto end = std::chrono::system_clock::now();
|
auto end = std::chrono::system_clock::now();
|
||||||
@@ -147,11 +173,9 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
|
|||||||
backend_->cache().update(diff, lgrInfo.seq);
|
backend_->cache().update(diff, lgrInfo.seq);
|
||||||
backend_->updateRange(lgrInfo.seq);
|
backend_->updateRange(lgrInfo.seq);
|
||||||
}
|
}
|
||||||
auto now = std::chrono::duration_cast<std::chrono::seconds>(
|
|
||||||
std::chrono::system_clock::now().time_since_epoch())
|
setLastClose(lgrInfo.closeTime);
|
||||||
.count();
|
auto age = lastCloseAgeSeconds();
|
||||||
auto closeTime = lgrInfo.closeTime.time_since_epoch().count();
|
|
||||||
auto age = now - (rippleEpochStart + closeTime);
|
|
||||||
// if the ledger closed over 10 minutes ago, assume we are still
|
// if the ledger closed over 10 minutes ago, assume we are still
|
||||||
// catching up and don't publish
|
// catching up and don't publish
|
||||||
if (age < 600)
|
if (age < 600)
|
||||||
@@ -513,15 +537,15 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Inserted/modified/deleted all objects. Number of objects = "
|
<< "Inserted/modified/deleted all objects. Number of objects = "
|
||||||
<< rawData.ledger_objects().objects_size();
|
<< rawData.ledger_objects().objects_size();
|
||||||
std::vector<AccountTransactionsData> accountTxData{
|
FormattedTransactionsData insertTxResult =
|
||||||
insertTransactions(lgrInfo, rawData)};
|
insertTransactions(lgrInfo, rawData);
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Inserted all transactions. Number of transactions = "
|
<< "Inserted all transactions. Number of transactions = "
|
||||||
<< rawData.transactions_list().transactions_size();
|
<< rawData.transactions_list().transactions_size();
|
||||||
|
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
|
||||||
backend_->writeAccountTransactions(std::move(accountTxData));
|
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||||
|
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
<< "wrote account_tx";
|
<< "wrote account_tx";
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
@@ -670,8 +694,6 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
beast::setCurrentThreadName("rippled: ReportingETL transform");
|
beast::setCurrentThreadName("rippled: ReportingETL transform");
|
||||||
uint32_t currentSequence = startSequence;
|
uint32_t currentSequence = startSequence;
|
||||||
|
|
||||||
auto begin = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
while (!writeConflict)
|
while (!writeConflict)
|
||||||
{
|
{
|
||||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
|
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
|
||||||
@@ -894,6 +916,7 @@ ReportingETL::loadCache(uint32_t seq)
|
|||||||
{
|
{
|
||||||
if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL)
|
if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL)
|
||||||
{
|
{
|
||||||
|
backend_->cache().setDisabled();
|
||||||
BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading";
|
BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -915,7 +938,7 @@ ReportingETL::loadCache(uint32_t seq)
|
|||||||
a.insert(std::end(a), std::begin(b), std::end(b));
|
a.insert(std::end(a), std::begin(b), std::end(b));
|
||||||
};
|
};
|
||||||
|
|
||||||
for (size_t i = 0; i < numDiffs_; ++i)
|
for (size_t i = 0; i < numCacheDiffs_; ++i)
|
||||||
{
|
{
|
||||||
append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) {
|
append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||||
return backend_->fetchLedgerDiff(seq - i, yield);
|
return backend_->fetchLedgerDiff(seq - i, yield);
|
||||||
@@ -950,55 +973,74 @@ ReportingETL::loadCache(uint32_t seq)
|
|||||||
<< "Loading cache. num cursors = " << cursors.size() - 1;
|
<< "Loading cache. num cursors = " << cursors.size() - 1;
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " cursors = " << cursorStr.str();
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " cursors = " << cursorStr.str();
|
||||||
|
|
||||||
std::atomic_uint* numRemaining = new std::atomic_uint{cursors.size() - 1};
|
cacheDownloader_ = std::thread{[this, seq, cursors]() {
|
||||||
|
auto startTime = std::chrono::system_clock::now();
|
||||||
auto startTime = std::chrono::system_clock::now();
|
auto markers = std::make_shared<std::atomic_int>(0);
|
||||||
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
auto numRemaining =
|
||||||
{
|
std::make_shared<std::atomic_int>(cursors.size() - 1);
|
||||||
std::optional<ripple::uint256> start = cursors[i];
|
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
||||||
std::optional<ripple::uint256> end = cursors[i + 1];
|
{
|
||||||
boost::asio::spawn(
|
std::optional<ripple::uint256> start = cursors[i];
|
||||||
ioContext_,
|
std::optional<ripple::uint256> end = cursors[i + 1];
|
||||||
[this, seq, start, end, numRemaining, startTime](
|
markers->wait(numCacheMarkers_);
|
||||||
boost::asio::yield_context yield) {
|
++(*markers);
|
||||||
std::optional<ripple::uint256> cursor = start;
|
boost::asio::spawn(
|
||||||
while (true)
|
ioContext_,
|
||||||
{
|
[this, seq, start, end, numRemaining, startTime, markers](
|
||||||
auto res =
|
boost::asio::yield_context yield) {
|
||||||
Backend::retryOnTimeout([this, seq, &cursor, &yield]() {
|
std::optional<ripple::uint256> cursor = start;
|
||||||
return backend_->fetchLedgerPage(
|
std::string cursorStr = cursor.has_value()
|
||||||
cursor, seq, 256, false, yield);
|
? ripple::strHex(cursor.value())
|
||||||
});
|
: ripple::strHex(Backend::firstKey);
|
||||||
backend_->cache().update(res.objects, seq, true);
|
|
||||||
if (!res.cursor || (end && *(res.cursor) > *end))
|
|
||||||
break;
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< "Loading cache. cache size = "
|
<< "Starting a cursor: " << cursorStr
|
||||||
<< backend_->cache().size()
|
<< " markers = " << *markers;
|
||||||
<< " - cursor = " << ripple::strHex(res.cursor.value());
|
|
||||||
cursor = std::move(res.cursor);
|
while (!stopping_)
|
||||||
}
|
{
|
||||||
if (--(*numRemaining) == 0)
|
auto res = Backend::retryOnTimeout([this,
|
||||||
{
|
seq,
|
||||||
auto endTime = std::chrono::system_clock::now();
|
&cursor,
|
||||||
auto duration =
|
&yield]() {
|
||||||
std::chrono::duration_cast<std::chrono::seconds>(
|
return backend_->fetchLedgerPage(
|
||||||
endTime - startTime);
|
cursor, seq, cachePageFetchSize_, false, yield);
|
||||||
BOOST_LOG_TRIVIAL(info)
|
});
|
||||||
<< "Finished loading cache. cache size = "
|
backend_->cache().update(res.objects, seq, true);
|
||||||
<< backend_->cache().size() << ". Took "
|
if (!res.cursor || (end && *(res.cursor) > *end))
|
||||||
<< duration.count() << " seconds";
|
break;
|
||||||
backend_->cache().setFull();
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
delete numRemaining;
|
<< "Loading cache. cache size = "
|
||||||
}
|
<< backend_->cache().size() << " - cursor = "
|
||||||
else
|
<< ripple::strHex(res.cursor.value())
|
||||||
{
|
<< " start = " << cursorStr
|
||||||
BOOST_LOG_TRIVIAL(info)
|
<< " markers = " << *markers;
|
||||||
<< "Finished a cursor. num remaining = "
|
|
||||||
<< *numRemaining;
|
cursor = std::move(res.cursor);
|
||||||
}
|
}
|
||||||
});
|
--(*markers);
|
||||||
}
|
markers->notify_one();
|
||||||
|
if (--(*numRemaining) == 0)
|
||||||
|
{
|
||||||
|
auto endTime = std::chrono::system_clock::now();
|
||||||
|
auto duration =
|
||||||
|
std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
|
endTime - startTime);
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "Finished loading cache. cache size = "
|
||||||
|
<< backend_->cache().size() << ". Took "
|
||||||
|
<< duration.count() << " seconds";
|
||||||
|
backend_->cache().setFull();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "Finished a cursor. num remaining = "
|
||||||
|
<< *numRemaining << " start = " << cursorStr
|
||||||
|
<< " markers = " << *markers;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}};
|
||||||
// If loading synchronously, poll cache until full
|
// If loading synchronously, poll cache until full
|
||||||
while (cacheLoadStyle_ == CacheLoadStyle::SYNC &&
|
while (cacheLoadStyle_ == CacheLoadStyle::SYNC &&
|
||||||
!backend_->cache().isFull())
|
!backend_->cache().isFull())
|
||||||
@@ -1108,9 +1150,12 @@ ReportingETL::ReportingETL(
|
|||||||
if (entry == "none" || entry == "no")
|
if (entry == "none" || entry == "no")
|
||||||
cacheLoadStyle_ = CacheLoadStyle::NOT_AT_ALL;
|
cacheLoadStyle_ = CacheLoadStyle::NOT_AT_ALL;
|
||||||
}
|
}
|
||||||
if (cache.contains("num_diffs") && cache.at("num_diffs").as_int64())
|
if (cache.contains("num_diffs") && cache.at("num_diffs").is_int64())
|
||||||
{
|
numCacheDiffs_ = cache.at("num_diffs").as_int64();
|
||||||
numDiffs_ = cache.at("num_diffs").as_int64();
|
if (cache.contains("num_markers") && cache.at("num_markers").is_int64())
|
||||||
}
|
numCacheMarkers_ = cache.at("num_markers").as_int64();
|
||||||
|
if (cache.contains("page_fetch_size") &&
|
||||||
|
cache.at("page_fetch_size").is_int64())
|
||||||
|
cachePageFetchSize_ = cache.at("page_fetch_size").as_int64();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,22 @@
|
|||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for the ReportingETL, implemented in NFTHelpers.cpp, to
|
||||||
|
* pull to-write data out of a transaction that relates to NFTs.
|
||||||
|
*/
|
||||||
|
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||||
|
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||||
|
|
||||||
struct AccountTransactionsData;
|
struct AccountTransactionsData;
|
||||||
|
struct NFTTransactionsData;
|
||||||
|
struct NFTsData;
|
||||||
|
struct FormattedTransactionsData
|
||||||
|
{
|
||||||
|
std::vector<AccountTransactionsData> accountTxData;
|
||||||
|
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||||
|
std::vector<NFTsData> nfTokensData;
|
||||||
|
};
|
||||||
class SubscriptionManager;
|
class SubscriptionManager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -52,7 +67,15 @@ private:
|
|||||||
|
|
||||||
// number of diffs to use to generate cursors to traverse the ledger in
|
// number of diffs to use to generate cursors to traverse the ledger in
|
||||||
// parallel during initial cache download
|
// parallel during initial cache download
|
||||||
size_t numDiffs_ = 1;
|
size_t numCacheDiffs_ = 32;
|
||||||
|
// number of markers to use at one time to traverse the ledger in parallel
|
||||||
|
// during initial cache download
|
||||||
|
size_t numCacheMarkers_ = 48;
|
||||||
|
// number of ledger objects to fetch concurrently per marker during cache
|
||||||
|
// download
|
||||||
|
size_t cachePageFetchSize_ = 512;
|
||||||
|
// thread responsible for syncing the cache on startup
|
||||||
|
std::thread cacheDownloader_;
|
||||||
|
|
||||||
std::thread worker_;
|
std::thread worker_;
|
||||||
boost::asio::io_context& ioContext_;
|
boost::asio::io_context& ioContext_;
|
||||||
@@ -86,18 +109,6 @@ private:
|
|||||||
// deletion
|
// deletion
|
||||||
std::atomic_bool deleting_ = false;
|
std::atomic_bool deleting_ = false;
|
||||||
|
|
||||||
/// Used to determine when to write to the database during the initial
|
|
||||||
/// ledger download. By default, the software downloads an entire ledger and
|
|
||||||
/// then writes to the database. If flushInterval_ is non-zero, the software
|
|
||||||
/// will write to the database as new ledger data (SHAMap leaf nodes)
|
|
||||||
/// arrives. It is not neccesarily more effient to write the data as it
|
|
||||||
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
|
|
||||||
/// nodes; flushing prematurely can result in the same SHAMap inner node
|
|
||||||
/// being written to the database more than once. It is recommended to use
|
|
||||||
/// the default value of 0 for this variable; however, different values can
|
|
||||||
/// be experimented with if better performance is desired.
|
|
||||||
size_t flushInterval_ = 0;
|
|
||||||
|
|
||||||
/// This variable controls the number of GetLedgerData calls that will be
|
/// This variable controls the number of GetLedgerData calls that will be
|
||||||
/// executed in parallel during the initial ledger download. GetLedgerData
|
/// executed in parallel during the initial ledger download. GetLedgerData
|
||||||
/// allows clients to page through a ledger over many RPC calls.
|
/// allows clients to page through a ledger over many RPC calls.
|
||||||
@@ -123,29 +134,33 @@ private:
|
|||||||
std::optional<uint32_t> startSequence_;
|
std::optional<uint32_t> startSequence_;
|
||||||
std::optional<uint32_t> finishSequence_;
|
std::optional<uint32_t> finishSequence_;
|
||||||
|
|
||||||
size_t accumTxns_ = 0;
|
|
||||||
size_t txnThreshold_ = 0;
|
size_t txnThreshold_ = 0;
|
||||||
|
|
||||||
/// The time that the most recently published ledger was published. Used by
|
/// The time that the most recently published ledger was published. Used by
|
||||||
/// server_info
|
/// server_info
|
||||||
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
||||||
|
|
||||||
mutable std::mutex publishTimeMtx_;
|
mutable std::shared_mutex publishTimeMtx_;
|
||||||
|
|
||||||
std::chrono::time_point<std::chrono::system_clock>
|
|
||||||
getLastPublish() const
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
|
||||||
return lastPublish_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
setLastPublish()
|
setLastPublish()
|
||||||
{
|
{
|
||||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
std::unique_lock lck(publishTimeMtx_);
|
||||||
lastPublish_ = std::chrono::system_clock::now();
|
lastPublish_ = std::chrono::system_clock::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The time that the most recently published ledger was closed.
|
||||||
|
std::chrono::time_point<ripple::NetClock> lastCloseTime_;
|
||||||
|
|
||||||
|
mutable std::shared_mutex closeTimeMtx_;
|
||||||
|
|
||||||
|
void
|
||||||
|
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||||
|
{
|
||||||
|
std::unique_lock lck(closeTimeMtx_);
|
||||||
|
lastCloseTime_ = lastCloseTime;
|
||||||
|
}
|
||||||
|
|
||||||
/// Download a ledger with specified sequence in full, via GetLedgerData,
|
/// Download a ledger with specified sequence in full, via GetLedgerData,
|
||||||
/// and write the data to the databases. This takes several minutes or
|
/// and write the data to the databases. This takes several minutes or
|
||||||
/// longer.
|
/// longer.
|
||||||
@@ -208,14 +223,16 @@ private:
|
|||||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||||
fetchLedgerDataAndDiff(uint32_t sequence);
|
fetchLedgerDataAndDiff(uint32_t sequence);
|
||||||
|
|
||||||
/// Insert all of the extracted transactions into the ledger
|
/// Insert all of the extracted transactions into the ledger, returning
|
||||||
|
/// transactions related to accounts, transactions related to NFTs, and
|
||||||
|
/// NFTs themselves for later processsing.
|
||||||
/// @param ledger ledger to insert transactions into
|
/// @param ledger ledger to insert transactions into
|
||||||
/// @param data data extracted from an ETL source
|
/// @param data data extracted from an ETL source
|
||||||
/// @return struct that contains the neccessary info to write to the
|
/// @return struct that contains the neccessary info to write to the
|
||||||
/// transctions and account_transactions tables in Postgres (mostly
|
/// account_transactions/account_tx and nft_token_transactions tables
|
||||||
/// transaction hashes, corresponding nodestore hashes and affected
|
/// (mostly transaction hashes, corresponding nodestore hashes and affected
|
||||||
/// accounts)
|
/// accounts)
|
||||||
std::vector<AccountTransactionsData>
|
FormattedTransactionsData
|
||||||
insertTransactions(
|
insertTransactions(
|
||||||
ripple::LedgerInfo const& ledger,
|
ripple::LedgerInfo const& ledger,
|
||||||
org::xrpl::rpc::v1::GetLedgerResponse& data);
|
org::xrpl::rpc::v1::GetLedgerResponse& data);
|
||||||
@@ -308,6 +325,8 @@ public:
|
|||||||
|
|
||||||
if (worker_.joinable())
|
if (worker_.joinable())
|
||||||
worker_.join();
|
worker_.join();
|
||||||
|
if (cacheDownloader_.joinable())
|
||||||
|
cacheDownloader_.join();
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
|
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
|
||||||
}
|
}
|
||||||
@@ -322,13 +341,38 @@ public:
|
|||||||
result["read_only"] = readOnly_;
|
result["read_only"] = readOnly_;
|
||||||
auto last = getLastPublish();
|
auto last = getLastPublish();
|
||||||
if (last.time_since_epoch().count() != 0)
|
if (last.time_since_epoch().count() != 0)
|
||||||
result["last_publish_age_seconds"] = std::to_string(
|
result["last_publish_age_seconds"] =
|
||||||
std::chrono::duration_cast<std::chrono::seconds>(
|
std::to_string(lastPublishAgeSeconds());
|
||||||
std::chrono::system_clock::now() - getLastPublish())
|
|
||||||
.count());
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::chrono::time_point<std::chrono::system_clock>
|
||||||
|
getLastPublish() const
|
||||||
|
{
|
||||||
|
std::shared_lock lck(publishTimeMtx_);
|
||||||
|
return lastPublish_;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::uint32_t
|
||||||
|
lastPublishAgeSeconds() const
|
||||||
|
{
|
||||||
|
return std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
|
std::chrono::system_clock::now() - getLastPublish())
|
||||||
|
.count();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::uint32_t
|
||||||
|
lastCloseAgeSeconds() const
|
||||||
|
{
|
||||||
|
std::shared_lock lck(closeTimeMtx_);
|
||||||
|
auto now = std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
|
std::chrono::system_clock::now().time_since_epoch())
|
||||||
|
.count();
|
||||||
|
auto closeTime = lastCloseTime_.time_since_epoch().count();
|
||||||
|
if (now < (rippleEpochStart + closeTime))
|
||||||
|
return 0;
|
||||||
|
return now - (rippleEpochStart + closeTime);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
16
src/main/Build.h
Normal file
16
src/main/Build.h
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#ifndef CLIO_BUILD_INFO_H
|
||||||
|
#define CLIO_BUILD_INFO_H
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace Build {
|
||||||
|
|
||||||
|
std::string const&
|
||||||
|
getClioVersionString();
|
||||||
|
|
||||||
|
std::string const&
|
||||||
|
getClioFullVersionString();
|
||||||
|
|
||||||
|
} // namespace Build
|
||||||
|
|
||||||
|
#endif // CLIO_BUILD_INFO_H
|
||||||
59
src/main/impl/Build.cpp
Normal file
59
src/main/impl/Build.cpp
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
#include <ripple/beast/core/SemanticVersion.h>
|
||||||
|
#include <boost/preprocessor/stringize.hpp>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <main/Build.h>
|
||||||
|
#include <optional>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
namespace Build {
|
||||||
|
|
||||||
|
//--------------------------------------------------------------------------
|
||||||
|
// The build version number. You must edit this for each release
|
||||||
|
// and follow the format described at http://semver.org/
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
// clang-format off
|
||||||
|
char const* const versionString = "1.0.3"
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
#if defined(DEBUG) || defined(SANITIZER)
|
||||||
|
"+"
|
||||||
|
#ifdef CLIO_GIT_COMMIT_HASH
|
||||||
|
CLIO_GIT_COMMIT_HASH
|
||||||
|
"."
|
||||||
|
#endif
|
||||||
|
#ifdef DEBUG
|
||||||
|
"DEBUG"
|
||||||
|
#ifdef SANITIZER
|
||||||
|
"."
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef SANITIZER
|
||||||
|
BOOST_PP_STRINGIZE(SANITIZER)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//--------------------------------------------------------------------------
|
||||||
|
;
|
||||||
|
|
||||||
|
std::string const&
|
||||||
|
getClioVersionString()
|
||||||
|
{
|
||||||
|
static std::string const value = [] {
|
||||||
|
std::string const s = versionString;
|
||||||
|
beast::SemanticVersion v;
|
||||||
|
if (!v.parse(s) || v.print() != s)
|
||||||
|
throw std::runtime_error(s + ": Bad server version string");
|
||||||
|
return s;
|
||||||
|
}();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string const&
|
||||||
|
getClioFullVersionString()
|
||||||
|
{
|
||||||
|
static std::string const value = "clio-" + getClioVersionString();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Build
|
||||||
@@ -28,6 +28,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <main/Build.h>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
@@ -103,45 +104,85 @@ parse_certs(boost::json::object const& config)
|
|||||||
void
|
void
|
||||||
initLogging(boost::json::object const& config)
|
initLogging(boost::json::object const& config)
|
||||||
{
|
{
|
||||||
|
namespace src = boost::log::sources;
|
||||||
|
namespace keywords = boost::log::keywords;
|
||||||
|
namespace sinks = boost::log::sinks;
|
||||||
|
namespace trivial = boost::log::trivial;
|
||||||
boost::log::add_common_attributes();
|
boost::log::add_common_attributes();
|
||||||
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
|
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
|
||||||
boost::log::add_console_log(
|
if (!config.contains("log_to_console") ||
|
||||||
std::cout, boost::log::keywords::format = format);
|
config.at("log_to_console").as_bool())
|
||||||
if (config.contains("log_file"))
|
|
||||||
{
|
{
|
||||||
boost::log::add_file_log(
|
boost::log::add_console_log(std::cout, keywords::format = format);
|
||||||
config.at("log_file").as_string().c_str(),
|
}
|
||||||
boost::log::keywords::format = format,
|
if (config.contains("log_directory"))
|
||||||
boost::log::keywords::open_mode = std::ios_base::app);
|
{
|
||||||
|
if (!config.at("log_directory").is_string())
|
||||||
|
throw std::runtime_error("log directory must be a string");
|
||||||
|
boost::filesystem::path dirPath{
|
||||||
|
config.at("log_directory").as_string().c_str()};
|
||||||
|
if (!boost::filesystem::exists(dirPath))
|
||||||
|
boost::filesystem::create_directories(dirPath);
|
||||||
|
const int64_t rotationSize = config.contains("log_rotation_size")
|
||||||
|
? config.at("log_rotation_size").as_int64() * 1024 * 1024u
|
||||||
|
: 2 * 1024 * 1024 * 1024u;
|
||||||
|
if (rotationSize <= 0)
|
||||||
|
throw std::runtime_error(
|
||||||
|
"log rotation size must be greater than 0");
|
||||||
|
const int64_t rotationPeriod =
|
||||||
|
config.contains("log_rotation_hour_interval")
|
||||||
|
? config.at("log_rotation_hour_interval").as_int64()
|
||||||
|
: 12u;
|
||||||
|
if (rotationPeriod <= 0)
|
||||||
|
throw std::runtime_error(
|
||||||
|
"log rotation time interval must be greater than 0");
|
||||||
|
const int64_t dirSize = config.contains("log_directory_max_size")
|
||||||
|
? config.at("log_directory_max_size").as_int64() * 1024 * 1024u
|
||||||
|
: 50 * 1024 * 1024 * 1024u;
|
||||||
|
if (dirSize <= 0)
|
||||||
|
throw std::runtime_error(
|
||||||
|
"log rotation directory max size must be greater than 0");
|
||||||
|
auto fileSink = boost::log::add_file_log(
|
||||||
|
keywords::file_name = dirPath / "clio.log",
|
||||||
|
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
|
||||||
|
keywords::auto_flush = true,
|
||||||
|
keywords::format = format,
|
||||||
|
keywords::open_mode = std::ios_base::app,
|
||||||
|
keywords::rotation_size = rotationSize,
|
||||||
|
keywords::time_based_rotation =
|
||||||
|
sinks::file::rotation_at_time_interval(
|
||||||
|
boost::posix_time::hours(rotationPeriod)));
|
||||||
|
fileSink->locked_backend()->set_file_collector(
|
||||||
|
sinks::file::make_collector(
|
||||||
|
keywords::target = dirPath, keywords::max_size = dirSize));
|
||||||
|
fileSink->locked_backend()->scan_for_files();
|
||||||
}
|
}
|
||||||
auto const logLevel = config.contains("log_level")
|
auto const logLevel = config.contains("log_level")
|
||||||
? config.at("log_level").as_string()
|
? config.at("log_level").as_string()
|
||||||
: "info";
|
: "info";
|
||||||
if (boost::iequals(logLevel, "trace"))
|
if (boost::iequals(logLevel, "trace"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(
|
||||||
boost::log::trivial::severity >= boost::log::trivial::trace);
|
trivial::severity >= trivial::trace);
|
||||||
else if (boost::iequals(logLevel, "debug"))
|
else if (boost::iequals(logLevel, "debug"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(
|
||||||
boost::log::trivial::severity >= boost::log::trivial::debug);
|
trivial::severity >= trivial::debug);
|
||||||
else if (boost::iequals(logLevel, "info"))
|
else if (boost::iequals(logLevel, "info"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
|
||||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
|
||||||
else if (
|
else if (
|
||||||
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
|
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(
|
||||||
boost::log::trivial::severity >= boost::log::trivial::warning);
|
trivial::severity >= trivial::warning);
|
||||||
else if (boost::iequals(logLevel, "error"))
|
else if (boost::iequals(logLevel, "error"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(
|
||||||
boost::log::trivial::severity >= boost::log::trivial::error);
|
trivial::severity >= trivial::error);
|
||||||
else if (boost::iequals(logLevel, "fatal"))
|
else if (boost::iequals(logLevel, "fatal"))
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(
|
||||||
boost::log::trivial::severity >= boost::log::trivial::fatal);
|
trivial::severity >= trivial::fatal);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
|
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
|
||||||
<< ". Setting log level to info";
|
<< ". Setting log level to info";
|
||||||
boost::log::core::get()->set_filter(
|
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
|
||||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
|
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
|
||||||
}
|
}
|
||||||
@@ -170,6 +211,12 @@ main(int argc, char* argv[])
|
|||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (std::string{argv[1]} == "-v" || std::string{argv[1]} == "--version")
|
||||||
|
{
|
||||||
|
std::cout << Build::getClioFullVersionString() << std::endl;
|
||||||
|
return EXIT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
auto const config = parse_config(argv[1]);
|
auto const config = parse_config(argv[1]);
|
||||||
if (!config)
|
if (!config)
|
||||||
{
|
{
|
||||||
@@ -179,21 +226,25 @@ main(int argc, char* argv[])
|
|||||||
|
|
||||||
initLogging(*config);
|
initLogging(*config);
|
||||||
|
|
||||||
|
// Announce Clio version
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "Clio version: " << Build::getClioFullVersionString();
|
||||||
|
|
||||||
auto ctx = parse_certs(*config);
|
auto ctx = parse_certs(*config);
|
||||||
auto ctxRef = ctx
|
auto ctxRef = ctx
|
||||||
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
|
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
|
||||||
: std::nullopt;
|
: std::nullopt;
|
||||||
|
|
||||||
auto const threads = config->contains("workers")
|
auto const threads = config->contains("io_threads")
|
||||||
? config->at("workers").as_int64()
|
? config->at("io_threads").as_int64()
|
||||||
: std::thread::hardware_concurrency();
|
: 2;
|
||||||
|
|
||||||
if (threads <= 0)
|
if (threads <= 0)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(fatal) << "Workers is less than 0";
|
BOOST_LOG_TRIVIAL(fatal) << "io_threads is less than 0";
|
||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(info) << "Number of workers = " << threads;
|
BOOST_LOG_TRIVIAL(info) << "Number of io threads = " << threads;
|
||||||
|
|
||||||
// io context to handle all incoming requests, as well as other things
|
// io context to handle all incoming requests, as well as other things
|
||||||
// This is not the only io context in the application
|
// This is not the only io context in the application
|
||||||
@@ -221,7 +272,7 @@ main(int argc, char* argv[])
|
|||||||
// The balancer itself publishes to streams (transactions_proposed and
|
// The balancer itself publishes to streams (transactions_proposed and
|
||||||
// accounts_proposed)
|
// accounts_proposed)
|
||||||
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
|
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
|
||||||
*config, ioc, ctxRef, backend, subscriptions, ledgers);
|
*config, ioc, backend, subscriptions, ledgers);
|
||||||
|
|
||||||
// ETL is responsible for writing and publishing to streams. In read-only
|
// ETL is responsible for writing and publishing to streams. In read-only
|
||||||
// mode, ETL only publishes
|
// mode, ETL only publishes
|
||||||
@@ -6,6 +6,7 @@
|
|||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <shared_mutex>
|
#include <shared_mutex>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
namespace RPC {
|
namespace RPC {
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,9 @@ doAccountCurrencies(Context const& context);
|
|||||||
Result
|
Result
|
||||||
doAccountLines(Context const& context);
|
doAccountLines(Context const& context);
|
||||||
|
|
||||||
|
Result
|
||||||
|
doAccountNFTs(Context const& context);
|
||||||
|
|
||||||
Result
|
Result
|
||||||
doAccountObjects(Context const& context);
|
doAccountObjects(Context const& context);
|
||||||
|
|
||||||
@@ -41,10 +44,23 @@ doChannelAuthorize(Context const& context);
|
|||||||
Result
|
Result
|
||||||
doChannelVerify(Context const& context);
|
doChannelVerify(Context const& context);
|
||||||
|
|
||||||
// offers methods
|
// book methods
|
||||||
|
Result
|
||||||
|
doBookChanges(Context const& context);
|
||||||
|
|
||||||
Result
|
Result
|
||||||
doBookOffers(Context const& context);
|
doBookOffers(Context const& context);
|
||||||
|
|
||||||
|
// NFT methods
|
||||||
|
Result
|
||||||
|
doNFTBuyOffers(Context const& context);
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTSellOffers(Context const& context);
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTInfo(Context const& context);
|
||||||
|
|
||||||
// ledger methods
|
// ledger methods
|
||||||
Result
|
Result
|
||||||
doLedger(Context const& context);
|
doLedger(Context const& context);
|
||||||
|
|||||||
196
src/rpc/RPC.cpp
196
src/rpc/RPC.cpp
@@ -1,6 +1,7 @@
|
|||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <etl/ETLSource.h>
|
#include <etl/ETLSource.h>
|
||||||
#include <rpc/Handlers.h>
|
#include <rpc/Handlers.h>
|
||||||
|
#include <rpc/RPCHelpers.h>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
namespace RPC {
|
namespace RPC {
|
||||||
@@ -90,6 +91,38 @@ make_HttpContext(
|
|||||||
clientIp};
|
clientIp};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr static WarningInfo warningInfos[]{
|
||||||
|
{warnUNKNOWN, "Unknown warning"},
|
||||||
|
{warnRPC_CLIO,
|
||||||
|
"This is a clio server. clio only serves validated data. If you "
|
||||||
|
"want to talk to rippled, include 'ledger_index':'current' in your "
|
||||||
|
"request"},
|
||||||
|
{warnRPC_OUTDATED, "This server may be out of date"},
|
||||||
|
{warnRPC_RATE_LIMIT, "You are about to be rate limited"}};
|
||||||
|
|
||||||
|
WarningInfo const&
|
||||||
|
get_warning_info(warning_code code)
|
||||||
|
{
|
||||||
|
for (WarningInfo const& info : warningInfos)
|
||||||
|
{
|
||||||
|
if (info.code == code)
|
||||||
|
{
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw(std::out_of_range("Invalid warning_code"));
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
make_warning(warning_code code)
|
||||||
|
{
|
||||||
|
boost::json::object json;
|
||||||
|
WarningInfo const& info(get_warning_info(code));
|
||||||
|
json["id"] = code;
|
||||||
|
json["message"] = static_cast<std::string>(info.message);
|
||||||
|
return json;
|
||||||
|
}
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
make_error(Error err)
|
make_error(Error err)
|
||||||
{
|
{
|
||||||
@@ -106,6 +139,14 @@ make_error(Error err)
|
|||||||
boost::json::object
|
boost::json::object
|
||||||
make_error(Status const& status)
|
make_error(Status const& status)
|
||||||
{
|
{
|
||||||
|
if (status.error == ripple::rpcUNKNOWN)
|
||||||
|
{
|
||||||
|
return {
|
||||||
|
{"error", status.message},
|
||||||
|
{"type", "response"},
|
||||||
|
{"status", "error"}};
|
||||||
|
}
|
||||||
|
|
||||||
boost::json::object json;
|
boost::json::object json;
|
||||||
ripple::RPC::ErrorInfo const& info(
|
ripple::RPC::ErrorInfo const& info(
|
||||||
ripple::RPC::get_error_info(status.error));
|
ripple::RPC::get_error_info(status.error));
|
||||||
@@ -118,31 +159,81 @@ make_error(Status const& status)
|
|||||||
json["type"] = "response";
|
json["type"] = "response";
|
||||||
return json;
|
return json;
|
||||||
}
|
}
|
||||||
static std::unordered_map<std::string, std::function<Result(Context const&)>>
|
|
||||||
handlerTable{
|
using LimitRange = std::tuple<std::uint32_t, std::uint32_t, std::uint32_t>;
|
||||||
{"account_channels", &doAccountChannels},
|
using HandlerFunction = std::function<Result(Context const&)>;
|
||||||
{"account_currencies", &doAccountCurrencies},
|
|
||||||
{"account_info", &doAccountInfo},
|
struct Handler
|
||||||
{"account_lines", &doAccountLines},
|
{
|
||||||
{"account_objects", &doAccountObjects},
|
std::string method;
|
||||||
{"account_offers", &doAccountOffers},
|
std::function<Result(Context const&)> handler;
|
||||||
{"account_tx", &doAccountTx},
|
std::optional<LimitRange> limit;
|
||||||
{"gateway_balances", &doGatewayBalances},
|
};
|
||||||
{"noripple_check", &doNoRippleCheck},
|
|
||||||
{"book_offers", &doBookOffers},
|
class HandlerTable
|
||||||
{"channel_authorize", &doChannelAuthorize},
|
{
|
||||||
{"channel_verify", &doChannelVerify},
|
std::unordered_map<std::string, Handler> handlerMap_;
|
||||||
{"ledger", &doLedger},
|
|
||||||
{"ledger_data", &doLedgerData},
|
public:
|
||||||
{"ledger_entry", &doLedgerEntry},
|
HandlerTable(std::initializer_list<Handler> handlers)
|
||||||
{"ledger_range", &doLedgerRange},
|
{
|
||||||
{"ledger_data", &doLedgerData},
|
for (auto const& handler : handlers)
|
||||||
{"subscribe", &doSubscribe},
|
{
|
||||||
{"server_info", &doServerInfo},
|
handlerMap_[handler.method] = std::move(handler);
|
||||||
{"unsubscribe", &doUnsubscribe},
|
}
|
||||||
{"tx", &doTx},
|
}
|
||||||
{"transaction_entry", &doTransactionEntry},
|
|
||||||
{"random", &doRandom}};
|
bool
|
||||||
|
contains(std::string const& method)
|
||||||
|
{
|
||||||
|
return handlerMap_.contains(method);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<LimitRange>
|
||||||
|
getLimitRange(std::string const& command)
|
||||||
|
{
|
||||||
|
if (!handlerMap_.contains(command))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
return handlerMap_[command].limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<HandlerFunction>
|
||||||
|
getHandler(std::string const& command)
|
||||||
|
{
|
||||||
|
if (!handlerMap_.contains(command))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
return handlerMap_[command].handler;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static HandlerTable handlerTable{
|
||||||
|
{"account_channels", &doAccountChannels, LimitRange{10, 50, 256}},
|
||||||
|
{"account_currencies", &doAccountCurrencies, {}},
|
||||||
|
{"account_info", &doAccountInfo, {}},
|
||||||
|
{"account_lines", &doAccountLines, LimitRange{10, 50, 256}},
|
||||||
|
{"account_nfts", &doAccountNFTs, LimitRange{1, 5, 10}},
|
||||||
|
{"account_objects", &doAccountObjects, LimitRange{10, 50, 256}},
|
||||||
|
{"account_offers", &doAccountOffers, LimitRange{10, 50, 256}},
|
||||||
|
{"account_tx", &doAccountTx, LimitRange{1, 50, 100}},
|
||||||
|
{"gateway_balances", &doGatewayBalances, {}},
|
||||||
|
{"noripple_check", &doNoRippleCheck, {}},
|
||||||
|
{"book_changes", &doBookChanges, {}},
|
||||||
|
{"book_offers", &doBookOffers, LimitRange{1, 50, 100}},
|
||||||
|
{"ledger", &doLedger, {}},
|
||||||
|
{"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}},
|
||||||
|
{"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}},
|
||||||
|
{"nft_info", &doNFTInfo},
|
||||||
|
{"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}},
|
||||||
|
{"ledger_entry", &doLedgerEntry, {}},
|
||||||
|
{"ledger_range", &doLedgerRange, {}},
|
||||||
|
{"subscribe", &doSubscribe, {}},
|
||||||
|
{"server_info", &doServerInfo, {}},
|
||||||
|
{"unsubscribe", &doUnsubscribe, {}},
|
||||||
|
{"tx", &doTx, {}},
|
||||||
|
{"transaction_entry", &doTransactionEntry, {}},
|
||||||
|
{"random", &doRandom, {}}};
|
||||||
|
|
||||||
static std::unordered_set<std::string> forwardCommands{
|
static std::unordered_set<std::string> forwardCommands{
|
||||||
"submit",
|
"submit",
|
||||||
@@ -151,7 +242,9 @@ static std::unordered_set<std::string> forwardCommands{
|
|||||||
"ledger_closed",
|
"ledger_closed",
|
||||||
"ledger_current",
|
"ledger_current",
|
||||||
"ripple_path_find",
|
"ripple_path_find",
|
||||||
"manifest"};
|
"manifest",
|
||||||
|
"channel_authorize",
|
||||||
|
"channel_verify"};
|
||||||
|
|
||||||
bool
|
bool
|
||||||
validHandler(std::string const& method)
|
validHandler(std::string const& method)
|
||||||
@@ -159,6 +252,36 @@ validHandler(std::string const& method)
|
|||||||
return handlerTable.contains(method) || forwardCommands.contains(method);
|
return handlerTable.contains(method) || forwardCommands.contains(method);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getLimit(RPC::Context const& context, std::uint32_t& limit)
|
||||||
|
{
|
||||||
|
if (!handlerTable.getHandler(context.method))
|
||||||
|
return Status{Error::rpcUNKNOWN_COMMAND};
|
||||||
|
|
||||||
|
if (!handlerTable.getLimitRange(context.method))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"};
|
||||||
|
|
||||||
|
auto [lo, def, hi] = *handlerTable.getLimitRange(context.method);
|
||||||
|
|
||||||
|
if (context.params.contains(JS(limit)))
|
||||||
|
{
|
||||||
|
if (!context.params.at(JS(limit)).is_int64())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
||||||
|
|
||||||
|
limit = context.params.at(JS(limit)).as_int64();
|
||||||
|
if (limit <= 0)
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
||||||
|
|
||||||
|
limit = std::clamp(limit, lo, hi);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
limit = def;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
shouldForwardToRippled(Context const& ctx)
|
shouldForwardToRippled(Context const& ctx)
|
||||||
{
|
{
|
||||||
@@ -167,15 +290,8 @@ shouldForwardToRippled(Context const& ctx)
|
|||||||
if (forwardCommands.find(ctx.method) != forwardCommands.end())
|
if (forwardCommands.find(ctx.method) != forwardCommands.end())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (request.contains("ledger_index"))
|
if (specifiesCurrentOrClosedLedger(request))
|
||||||
{
|
return true;
|
||||||
auto indexValue = request.at("ledger_index");
|
|
||||||
if (indexValue.is_string())
|
|
||||||
{
|
|
||||||
std::string index = indexValue.as_string().c_str();
|
|
||||||
return index == "current" || index == "closed";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ctx.method == "account_info" && request.contains("queue") &&
|
if (ctx.method == "account_info" && request.contains("queue") &&
|
||||||
request.at("queue").as_bool())
|
request.at("queue").as_bool())
|
||||||
@@ -209,14 +325,14 @@ buildResponse(Context const& ctx)
|
|||||||
if (ctx.method == "ping")
|
if (ctx.method == "ping")
|
||||||
return boost::json::object{};
|
return boost::json::object{};
|
||||||
|
|
||||||
if (handlerTable.find(ctx.method) == handlerTable.end())
|
auto method = handlerTable.getHandler(ctx.method);
|
||||||
return Status{Error::rpcUNKNOWN_COMMAND};
|
|
||||||
|
|
||||||
auto method = handlerTable[ctx.method];
|
if (!method)
|
||||||
|
return Status{Error::rpcUNKNOWN_COMMAND};
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto v = method(ctx);
|
auto v = (*method)(ctx);
|
||||||
|
|
||||||
if (auto object = std::get_if<boost::json::object>(&v))
|
if (auto object = std::get_if<boost::json::object>(&v))
|
||||||
(*object)["validated"] = true;
|
(*object)["validated"] = true;
|
||||||
@@ -235,7 +351,7 @@ buildResponse(Context const& ctx)
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(error)
|
BOOST_LOG_TRIVIAL(error)
|
||||||
<< __func__ << " caught exception : " << err.what();
|
<< __func__ << " caught exception : " << err.what();
|
||||||
return Status{Error::rpcINTERNAL, err.what()};
|
return Status{Error::rpcINTERNAL};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -103,10 +103,19 @@ struct Status
|
|||||||
|
|
||||||
Status(Error error_) : error(error_){};
|
Status(Error error_) : error(error_){};
|
||||||
|
|
||||||
|
// HACK. Some rippled handlers explicitly specify errors.
|
||||||
|
// This means that we have to be able to duplicate this
|
||||||
|
// functionality.
|
||||||
|
Status(std::string const& message_)
|
||||||
|
: error(ripple::rpcUNKNOWN), message(message_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
Status(Error error_, std::string message_)
|
Status(Error error_, std::string message_)
|
||||||
: error(error_), message(message_)
|
: error(error_), message(message_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
Status(Error error_, std::string strCode_, std::string message_)
|
Status(Error error_, std::string strCode_, std::string message_)
|
||||||
: error(error_), strCode(strCode_), message(message_)
|
: error(error_), strCode(strCode_), message(message_)
|
||||||
{
|
{
|
||||||
@@ -153,6 +162,33 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum warning_code {
|
||||||
|
warnUNKNOWN = -1,
|
||||||
|
warnRPC_CLIO = 2001,
|
||||||
|
warnRPC_OUTDATED = 2002,
|
||||||
|
warnRPC_RATE_LIMIT = 2003
|
||||||
|
};
|
||||||
|
|
||||||
|
struct WarningInfo
|
||||||
|
{
|
||||||
|
constexpr WarningInfo() : code(warnUNKNOWN), message("unknown warning")
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr WarningInfo(warning_code code_, char const* message_)
|
||||||
|
: code(code_), message(message_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
warning_code code;
|
||||||
|
std::string_view const message;
|
||||||
|
};
|
||||||
|
|
||||||
|
WarningInfo const&
|
||||||
|
get_warning_info(warning_code code);
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
make_warning(warning_code code);
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
make_error(Status const& status);
|
make_error(Status const& status);
|
||||||
|
|
||||||
@@ -190,6 +226,9 @@ buildResponse(Context const& ctx);
|
|||||||
bool
|
bool
|
||||||
validHandler(std::string const& method);
|
validHandler(std::string const& method);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getLimit(RPC::Context const& context, std::uint32_t& limit);
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
void
|
void
|
||||||
logDuration(Context const& ctx, T const& dur)
|
logDuration(Context const& ctx, T const& dur)
|
||||||
@@ -205,7 +244,7 @@ logDuration(Context const& ctx, T const& dur)
|
|||||||
else if (seconds > 1)
|
else if (seconds > 1)
|
||||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||||
else
|
else
|
||||||
BOOST_LOG_TRIVIAL(debug) << ss.str();
|
BOOST_LOG_TRIVIAL(info) << ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ getBool(boost::json::object const& request, std::string const& field)
|
|||||||
else
|
else
|
||||||
throw InvalidParamsError("Invalid field " + field + ", not bool.");
|
throw InvalidParamsError("Invalid field " + field + ", not bool.");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
getBool(
|
getBool(
|
||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
@@ -24,6 +25,7 @@ getBool(
|
|||||||
else
|
else
|
||||||
return dfault;
|
return dfault;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
getRequiredBool(boost::json::object const& request, std::string const& field)
|
getRequiredBool(boost::json::object const& request, std::string const& field)
|
||||||
{
|
{
|
||||||
@@ -152,6 +154,7 @@ getString(boost::json::object const& request, std::string const& field)
|
|||||||
else
|
else
|
||||||
throw InvalidParamsError("Invalid field " + field + ", not string.");
|
throw InvalidParamsError("Invalid field " + field + ", not string.");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string
|
std::string
|
||||||
getRequiredString(boost::json::object const& request, std::string const& field)
|
getRequiredString(boost::json::object const& request, std::string const& field)
|
||||||
{
|
{
|
||||||
@@ -160,6 +163,7 @@ getRequiredString(boost::json::object const& request, std::string const& field)
|
|||||||
else
|
else
|
||||||
throw InvalidParamsError("Missing field " + field);
|
throw InvalidParamsError("Missing field " + field);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string
|
std::string
|
||||||
getString(
|
getString(
|
||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
@@ -172,11 +176,128 @@ getString(
|
|||||||
return dfault;
|
return dfault;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getHexMarker(boost::json::object const& request, ripple::uint256& marker)
|
||||||
|
{
|
||||||
|
if (request.contains(JS(marker)))
|
||||||
|
{
|
||||||
|
if (!request.at(JS(marker)).is_string())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
|
if (!marker.parseHex(request.at(JS(marker)).as_string().c_str()))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "malformedMarker"};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getAccount(
|
||||||
|
boost::json::object const& request,
|
||||||
|
ripple::AccountID& account,
|
||||||
|
boost::string_view const& field,
|
||||||
|
bool required)
|
||||||
|
{
|
||||||
|
if (!request.contains(field))
|
||||||
|
{
|
||||||
|
if (required)
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, field.to_string() + "Missing"};
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!request.at(field).is_string())
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, field.to_string() + "NotString"};
|
||||||
|
|
||||||
|
if (auto a = accountFromStringStrict(request.at(field).as_string().c_str());
|
||||||
|
a)
|
||||||
|
{
|
||||||
|
account = a.value();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, field.to_string() + "Malformed"};
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getOptionalAccount(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::optional<ripple::AccountID>& account,
|
||||||
|
boost::string_view const& field)
|
||||||
|
{
|
||||||
|
if (!request.contains(field))
|
||||||
|
{
|
||||||
|
account = {};
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!request.at(field).is_string())
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, field.to_string() + "NotString"};
|
||||||
|
|
||||||
|
if (auto a = accountFromStringStrict(request.at(field).as_string().c_str());
|
||||||
|
a)
|
||||||
|
{
|
||||||
|
account = a.value();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, field.to_string() + "Malformed"};
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getAccount(boost::json::object const& request, ripple::AccountID& accountId)
|
||||||
|
{
|
||||||
|
return getAccount(request, accountId, JS(account), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getAccount(
|
||||||
|
boost::json::object const& request,
|
||||||
|
ripple::AccountID& destAccount,
|
||||||
|
boost::string_view const& field)
|
||||||
|
{
|
||||||
|
return getAccount(request, destAccount, field, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getTaker(boost::json::object const& request, ripple::AccountID& takerID)
|
||||||
|
{
|
||||||
|
if (request.contains(JS(taker)))
|
||||||
|
{
|
||||||
|
auto parsed = parseTaker(request.at(JS(taker)));
|
||||||
|
if (auto status = std::get_if<Status>(&parsed))
|
||||||
|
return *status;
|
||||||
|
else
|
||||||
|
takerID = std::get<ripple::AccountID>(parsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Status
|
||||||
|
getChannelId(boost::json::object const& request, ripple::uint256& channelId)
|
||||||
|
{
|
||||||
|
if (!request.contains(JS(channel_id)))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
|
||||||
|
|
||||||
|
if (!request.at(JS(channel_id)).is_string())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
|
||||||
|
|
||||||
|
if (!channelId.parseHex(request.at(JS(channel_id)).as_string().c_str()))
|
||||||
|
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<ripple::STAmount>
|
std::optional<ripple::STAmount>
|
||||||
getDeliveredAmount(
|
getDeliveredAmount(
|
||||||
std::shared_ptr<ripple::STTx const> const& txn,
|
std::shared_ptr<ripple::STTx const> const& txn,
|
||||||
std::shared_ptr<ripple::TxMeta const> const& meta,
|
std::shared_ptr<ripple::TxMeta const> const& meta,
|
||||||
std::uint32_t const ledgerSequence)
|
std::uint32_t const ledgerSequence,
|
||||||
|
uint32_t date)
|
||||||
{
|
{
|
||||||
if (meta->hasDeliveredAmount())
|
if (meta->hasDeliveredAmount())
|
||||||
return meta->getDeliveredAmount();
|
return meta->getDeliveredAmount();
|
||||||
@@ -192,7 +313,7 @@ getDeliveredAmount(
|
|||||||
// then its absence indicates that the amount delivered is listed in the
|
// then its absence indicates that the amount delivered is listed in the
|
||||||
// Amount field. DeliveredAmount went live January 24, 2014.
|
// Amount field. DeliveredAmount went live January 24, 2014.
|
||||||
// 446000000 is in Feb 2014, well after DeliveredAmount went live
|
// 446000000 is in Feb 2014, well after DeliveredAmount went live
|
||||||
if (ledgerSequence >= 4594095)
|
if (ledgerSequence >= 4594095 || date > 446000000)
|
||||||
{
|
{
|
||||||
return txn->getFieldAmount(ripple::sfAmount);
|
return txn->getFieldAmount(ripple::sfAmount);
|
||||||
}
|
}
|
||||||
@@ -338,7 +459,7 @@ toExpandedJson(Backend::TransactionAndMetadata const& blobs)
|
|||||||
auto [txn, meta] = deserializeTxPlusMeta(blobs, blobs.ledgerSequence);
|
auto [txn, meta] = deserializeTxPlusMeta(blobs, blobs.ledgerSequence);
|
||||||
auto txnJson = toJson(*txn);
|
auto txnJson = toJson(*txn);
|
||||||
auto metaJson = toJson(*meta);
|
auto metaJson = toJson(*meta);
|
||||||
insertDeliveredAmount(metaJson, txn, meta);
|
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
|
||||||
return {txnJson, metaJson};
|
return {txnJson, metaJson};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -346,11 +467,12 @@ bool
|
|||||||
insertDeliveredAmount(
|
insertDeliveredAmount(
|
||||||
boost::json::object& metaJson,
|
boost::json::object& metaJson,
|
||||||
std::shared_ptr<ripple::STTx const> const& txn,
|
std::shared_ptr<ripple::STTx const> const& txn,
|
||||||
std::shared_ptr<ripple::TxMeta const> const& meta)
|
std::shared_ptr<ripple::TxMeta const> const& meta,
|
||||||
|
uint32_t date)
|
||||||
{
|
{
|
||||||
if (canHaveDeliveredAmount(txn, meta))
|
if (canHaveDeliveredAmount(txn, meta))
|
||||||
{
|
{
|
||||||
if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq()))
|
if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq(), date))
|
||||||
metaJson["delivered_amount"] =
|
metaJson["delivered_amount"] =
|
||||||
toBoostJson(amt->getJson(ripple::JsonOptions::include_date));
|
toBoostJson(amt->getJson(ripple::JsonOptions::include_date));
|
||||||
else
|
else
|
||||||
@@ -448,6 +570,11 @@ ledgerInfoFromRequest(Context const& ctx)
|
|||||||
return Status{Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
|
return Status{Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
|
||||||
|
|
||||||
auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield);
|
auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield);
|
||||||
|
|
||||||
|
if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence)
|
||||||
|
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
|
||||||
|
|
||||||
|
return *lgrInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto indexValue = ctx.params.contains("ledger_index")
|
auto indexValue = ctx.params.contains("ledger_index")
|
||||||
@@ -479,7 +606,7 @@ ledgerInfoFromRequest(Context const& ctx)
|
|||||||
auto lgrInfo =
|
auto lgrInfo =
|
||||||
ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield);
|
ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield);
|
||||||
|
|
||||||
if (!lgrInfo)
|
if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence)
|
||||||
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
|
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
|
||||||
|
|
||||||
return *lgrInfo;
|
return *lgrInfo;
|
||||||
@@ -537,20 +664,47 @@ traverseOwnedNodes(
|
|||||||
if (!parsedCursor)
|
if (!parsedCursor)
|
||||||
return Status(ripple::rpcINVALID_PARAMS, "Malformed cursor");
|
return Status(ripple::rpcINVALID_PARAMS, "Malformed cursor");
|
||||||
|
|
||||||
auto cursor = AccountCursor({beast::zero, 0});
|
|
||||||
|
|
||||||
auto [hexCursor, startHint] = *parsedCursor;
|
auto [hexCursor, startHint] = *parsedCursor;
|
||||||
|
|
||||||
auto const rootIndex = ripple::keylet::ownerDir(accountID);
|
return traverseOwnedNodes(
|
||||||
|
backend,
|
||||||
|
ripple::keylet::ownerDir(accountID),
|
||||||
|
hexCursor,
|
||||||
|
startHint,
|
||||||
|
sequence,
|
||||||
|
limit,
|
||||||
|
jsonCursor,
|
||||||
|
yield,
|
||||||
|
atOwnedNode);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::variant<Status, AccountCursor>
|
||||||
|
traverseOwnedNodes(
|
||||||
|
BackendInterface const& backend,
|
||||||
|
ripple::Keylet const& owner,
|
||||||
|
ripple::uint256 const& hexMarker,
|
||||||
|
std::uint32_t const startHint,
|
||||||
|
std::uint32_t sequence,
|
||||||
|
std::uint32_t limit,
|
||||||
|
std::optional<std::string> jsonCursor,
|
||||||
|
boost::asio::yield_context& yield,
|
||||||
|
std::function<void(ripple::SLE)> atOwnedNode)
|
||||||
|
{
|
||||||
|
auto cursor = AccountCursor({beast::zero, 0});
|
||||||
|
|
||||||
|
auto const rootIndex = owner;
|
||||||
auto currentIndex = rootIndex;
|
auto currentIndex = rootIndex;
|
||||||
|
|
||||||
std::vector<ripple::uint256> keys;
|
std::vector<ripple::uint256> keys;
|
||||||
keys.reserve(limit);
|
// Only reserve 2048 nodes when fetching all owned ledger objects. If there
|
||||||
|
// are more, then keys will allocate more memory, which is suboptimal, but
|
||||||
|
// should only occur occasionally.
|
||||||
|
keys.reserve(std::min(std::uint32_t{2048}, limit));
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
|
|
||||||
// If startAfter is not zero try jumping to that page using the hint
|
// If startAfter is not zero try jumping to that page using the hint
|
||||||
if (hexCursor.isNonZero())
|
if (hexMarker.isNonZero())
|
||||||
{
|
{
|
||||||
auto const hintIndex = ripple::keylet::page(rootIndex, startHint);
|
auto const hintIndex = ripple::keylet::page(rootIndex, startHint);
|
||||||
auto hintDir =
|
auto hintDir =
|
||||||
@@ -563,7 +717,7 @@ traverseOwnedNodes(
|
|||||||
|
|
||||||
for (auto const& key : sle.getFieldV256(ripple::sfIndexes))
|
for (auto const& key : sle.getFieldV256(ripple::sfIndexes))
|
||||||
{
|
{
|
||||||
if (key == hexCursor)
|
if (key == hexMarker)
|
||||||
{
|
{
|
||||||
// We found the hint, we can start here
|
// We found the hint, we can start here
|
||||||
currentIndex = hintIndex;
|
currentIndex = hintIndex;
|
||||||
@@ -589,7 +743,7 @@ traverseOwnedNodes(
|
|||||||
{
|
{
|
||||||
if (!found)
|
if (!found)
|
||||||
{
|
{
|
||||||
if (key == hexCursor)
|
if (key == hexMarker)
|
||||||
found = true;
|
found = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@@ -625,7 +779,7 @@ traverseOwnedNodes(
|
|||||||
backend.fetchLedgerObject(currentIndex.key, sequence, yield);
|
backend.fetchLedgerObject(currentIndex.key, sequence, yield);
|
||||||
|
|
||||||
if (!ownerDir)
|
if (!ownerDir)
|
||||||
return Status(ripple::rpcACT_NOT_FOUND);
|
break;
|
||||||
|
|
||||||
ripple::SerialIter it{ownerDir->data(), ownerDir->size()};
|
ripple::SerialIter it{ownerDir->data(), ownerDir->size()};
|
||||||
ripple::SLE sle{it, currentIndex.key};
|
ripple::SLE sle{it, currentIndex.key};
|
||||||
@@ -678,6 +832,23 @@ traverseOwnedNodes(
|
|||||||
return AccountCursor({beast::zero, 0});
|
return AccountCursor({beast::zero, 0});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ripple::SLE const>
|
||||||
|
read(
|
||||||
|
ripple::Keylet const& keylet,
|
||||||
|
ripple::LedgerInfo const& lgrInfo,
|
||||||
|
Context const& context)
|
||||||
|
{
|
||||||
|
if (auto const blob = context.backend->fetchLedgerObject(
|
||||||
|
keylet.key, lgrInfo.seq, context.yield);
|
||||||
|
blob)
|
||||||
|
{
|
||||||
|
return std::make_shared<ripple::SLE const>(
|
||||||
|
ripple::SerialIter{blob->data(), blob->size()}, keylet.key);
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<ripple::Seed>
|
std::optional<ripple::Seed>
|
||||||
parseRippleLibSeed(boost::json::value const& value)
|
parseRippleLibSeed(boost::json::value const& value)
|
||||||
{
|
{
|
||||||
@@ -1280,6 +1451,7 @@ parseBook(boost::json::object const& request)
|
|||||||
|
|
||||||
return ripple::Book{{pay_currency, pay_issuer}, {get_currency, get_issuer}};
|
return ripple::Book{{pay_currency, pay_issuer}, {get_currency, get_issuer}};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::variant<Status, ripple::AccountID>
|
std::variant<Status, ripple::AccountID>
|
||||||
parseTaker(boost::json::value const& taker)
|
parseTaker(boost::json::value const& taker)
|
||||||
{
|
{
|
||||||
@@ -1293,5 +1465,19 @@ parseTaker(boost::json::value const& taker)
|
|||||||
return Status{Error::rpcINVALID_PARAMS, "invalidTakerAccount"};
|
return Status{Error::rpcINVALID_PARAMS, "invalidTakerAccount"};
|
||||||
return *takerID;
|
return *takerID;
|
||||||
}
|
}
|
||||||
|
bool
|
||||||
|
specifiesCurrentOrClosedLedger(boost::json::object const& request)
|
||||||
|
{
|
||||||
|
if (request.contains("ledger_index"))
|
||||||
|
{
|
||||||
|
auto indexValue = request.at("ledger_index");
|
||||||
|
if (indexValue.is_string())
|
||||||
|
{
|
||||||
|
std::string index = indexValue.as_string().c_str();
|
||||||
|
return index == "current" || index == "closed";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -14,6 +14,13 @@
|
|||||||
#include <backend/BackendInterface.h>
|
#include <backend/BackendInterface.h>
|
||||||
#include <rpc/RPC.h>
|
#include <rpc/RPC.h>
|
||||||
|
|
||||||
|
// Useful macro for borrowing from ripple::jss
|
||||||
|
// static strings. (J)son (S)trings
|
||||||
|
#define JS(x) ripple::jss::x.c_str()
|
||||||
|
|
||||||
|
// Access (SF)ield name (S)trings
|
||||||
|
#define SFS(x) ripple::x.jsonName.c_str()
|
||||||
|
|
||||||
namespace RPC {
|
namespace RPC {
|
||||||
std::optional<ripple::AccountID>
|
std::optional<ripple::AccountID>
|
||||||
accountFromStringStrict(std::string const& account);
|
accountFromStringStrict(std::string const& account);
|
||||||
@@ -55,7 +62,8 @@ bool
|
|||||||
insertDeliveredAmount(
|
insertDeliveredAmount(
|
||||||
boost::json::object& metaJson,
|
boost::json::object& metaJson,
|
||||||
std::shared_ptr<ripple::STTx const> const& txn,
|
std::shared_ptr<ripple::STTx const> const& txn,
|
||||||
std::shared_ptr<ripple::TxMeta const> const& meta);
|
std::shared_ptr<ripple::TxMeta const> const& meta,
|
||||||
|
uint32_t date);
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
toJson(ripple::STBase const& obj);
|
toJson(ripple::STBase const& obj);
|
||||||
@@ -93,6 +101,24 @@ traverseOwnedNodes(
|
|||||||
boost::asio::yield_context& yield,
|
boost::asio::yield_context& yield,
|
||||||
std::function<void(ripple::SLE)> atOwnedNode);
|
std::function<void(ripple::SLE)> atOwnedNode);
|
||||||
|
|
||||||
|
std::variant<Status, AccountCursor>
|
||||||
|
traverseOwnedNodes(
|
||||||
|
BackendInterface const& backend,
|
||||||
|
ripple::Keylet const& owner,
|
||||||
|
ripple::uint256 const& hexMarker,
|
||||||
|
std::uint32_t const startHint,
|
||||||
|
std::uint32_t sequence,
|
||||||
|
std::uint32_t limit,
|
||||||
|
std::optional<std::string> jsonCursor,
|
||||||
|
boost::asio::yield_context& yield,
|
||||||
|
std::function<void(ripple::SLE)> atOwnedNode);
|
||||||
|
|
||||||
|
std::shared_ptr<ripple::SLE const>
|
||||||
|
read(
|
||||||
|
ripple::Keylet const& keylet,
|
||||||
|
ripple::LedgerInfo const& lgrInfo,
|
||||||
|
Context const& context);
|
||||||
|
|
||||||
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
|
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
|
||||||
keypairFromRequst(boost::json::object const& request);
|
keypairFromRequst(boost::json::object const& request);
|
||||||
|
|
||||||
@@ -200,5 +226,33 @@ getString(
|
|||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
std::string const& field,
|
std::string const& field,
|
||||||
std::string dfault);
|
std::string dfault);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getHexMarker(boost::json::object const& request, ripple::uint256& marker);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getAccount(boost::json::object const& request, ripple::AccountID& accountId);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getAccount(
|
||||||
|
boost::json::object const& request,
|
||||||
|
ripple::AccountID& destAccount,
|
||||||
|
boost::string_view const& field);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getOptionalAccount(
|
||||||
|
boost::json::object const& request,
|
||||||
|
std::optional<ripple::AccountID>& account,
|
||||||
|
boost::string_view const& field);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getTaker(boost::json::object const& request, ripple::AccountID& takerID);
|
||||||
|
|
||||||
|
Status
|
||||||
|
getChannelId(boost::json::object const& request, ripple::uint256& channelId);
|
||||||
|
|
||||||
|
bool
|
||||||
|
specifiesCurrentOrClosedLedger(boost::json::object const& request);
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
11
src/rpc/WorkQueue.cpp
Normal file
11
src/rpc/WorkQueue.cpp
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
#include <rpc/WorkQueue.h>
|
||||||
|
|
||||||
|
WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize)
|
||||||
|
{
|
||||||
|
if (maxSize != 0)
|
||||||
|
maxSize_ = maxSize;
|
||||||
|
while (--numWorkers)
|
||||||
|
{
|
||||||
|
threads_.emplace_back([this] { ioc_.run(); });
|
||||||
|
}
|
||||||
|
}
|
||||||
82
src/rpc/WorkQueue.h
Normal file
82
src/rpc/WorkQueue.h
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
#ifndef CLIO_WORK_QUEUE_H
|
||||||
|
#define CLIO_WORK_QUEUE_H
|
||||||
|
|
||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <boost/asio/spawn.hpp>
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
#include <boost/log/trivial.hpp>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <optional>
|
||||||
|
#include <queue>
|
||||||
|
#include <shared_mutex>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
class WorkQueue
|
||||||
|
{
|
||||||
|
// these are cumulative for the lifetime of the process
|
||||||
|
std::atomic_uint64_t queued_ = 0;
|
||||||
|
std::atomic_uint64_t durationUs_ = 0;
|
||||||
|
|
||||||
|
std::atomic_uint64_t curSize_ = 0;
|
||||||
|
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
|
||||||
|
|
||||||
|
public:
|
||||||
|
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
bool
|
||||||
|
postCoro(F&& f, bool isWhiteListed)
|
||||||
|
{
|
||||||
|
if (curSize_ >= maxSize_ && !isWhiteListed)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__
|
||||||
|
<< " queue is full. rejecting job. current size = " << curSize_
|
||||||
|
<< " max size = " << maxSize_;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
++curSize_;
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
// Each time we enqueue a job, we want to post a symmetrical job that
|
||||||
|
// will dequeue and run the job at the front of the job queue.
|
||||||
|
boost::asio::spawn(
|
||||||
|
ioc_,
|
||||||
|
[this, f = std::move(f), start](boost::asio::yield_context yield) {
|
||||||
|
auto run = std::chrono::system_clock::now();
|
||||||
|
auto wait =
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||||
|
run - start)
|
||||||
|
.count();
|
||||||
|
// increment queued_ here, in the same place we implement
|
||||||
|
// durationUs_
|
||||||
|
++queued_;
|
||||||
|
durationUs_ += wait;
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << "WorkQueue wait time = " << wait
|
||||||
|
<< " queue size = " << curSize_;
|
||||||
|
f(yield);
|
||||||
|
--curSize_;
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: this is not actually being called. Wait for application refactor
|
||||||
|
boost::json::object
|
||||||
|
report()
|
||||||
|
{
|
||||||
|
boost::json::object obj;
|
||||||
|
obj["queued"] = queued_;
|
||||||
|
obj["queued_duration_us"] = durationUs_;
|
||||||
|
obj["current_queue_size"] = curSize_;
|
||||||
|
obj["max_queue_size"] = maxSize_;
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::thread> threads_ = {};
|
||||||
|
|
||||||
|
boost::asio::io_context ioc_ = {};
|
||||||
|
std::optional<boost::asio::io_context::work> work_{ioc_};
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // CLIO_WORK_QUEUE_H
|
||||||
@@ -17,27 +17,27 @@ void
|
|||||||
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
|
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
|
||||||
{
|
{
|
||||||
boost::json::object jDst;
|
boost::json::object jDst;
|
||||||
jDst["channel_id"] = ripple::to_string(line.key());
|
jDst[JS(channel_id)] = ripple::to_string(line.key());
|
||||||
jDst["account"] = ripple::to_string(line.getAccountID(ripple::sfAccount));
|
jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount));
|
||||||
jDst["destination_account"] =
|
jDst[JS(destination_account)] =
|
||||||
ripple::to_string(line.getAccountID(ripple::sfDestination));
|
ripple::to_string(line.getAccountID(ripple::sfDestination));
|
||||||
jDst["amount"] = line[ripple::sfAmount].getText();
|
jDst[JS(amount)] = line[ripple::sfAmount].getText();
|
||||||
jDst["balance"] = line[ripple::sfBalance].getText();
|
jDst[JS(balance)] = line[ripple::sfBalance].getText();
|
||||||
if (publicKeyType(line[ripple::sfPublicKey]))
|
if (publicKeyType(line[ripple::sfPublicKey]))
|
||||||
{
|
{
|
||||||
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
|
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
|
||||||
jDst["public_key"] = toBase58(ripple::TokenType::AccountPublic, pk);
|
jDst[JS(public_key)] = toBase58(ripple::TokenType::AccountPublic, pk);
|
||||||
jDst["public_key_hex"] = strHex(pk);
|
jDst[JS(public_key_hex)] = strHex(pk);
|
||||||
}
|
}
|
||||||
jDst["settle_delay"] = line[ripple::sfSettleDelay];
|
jDst[JS(settle_delay)] = line[ripple::sfSettleDelay];
|
||||||
if (auto const& v = line[~ripple::sfExpiration])
|
if (auto const& v = line[~ripple::sfExpiration])
|
||||||
jDst["expiration"] = *v;
|
jDst[JS(expiration)] = *v;
|
||||||
if (auto const& v = line[~ripple::sfCancelAfter])
|
if (auto const& v = line[~ripple::sfCancelAfter])
|
||||||
jDst["cancel_after"] = *v;
|
jDst[JS(cancel_after)] = *v;
|
||||||
if (auto const& v = line[~ripple::sfSourceTag])
|
if (auto const& v = line[~ripple::sfSourceTag])
|
||||||
jDst["source_tag"] = *v;
|
jDst[JS(source_tag)] = *v;
|
||||||
if (auto const& v = line[~ripple::sfDestinationTag])
|
if (auto const& v = line[~ripple::sfDestinationTag])
|
||||||
jDst["destination_tag"] = *v;
|
jDst[JS(destination_tag)] = *v;
|
||||||
|
|
||||||
jsonLines.push_back(jDst);
|
jsonLines.push_back(jDst);
|
||||||
}
|
}
|
||||||
@@ -54,66 +54,45 @@ doAccountChannels(Context const& context)
|
|||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
auto rawAcct = context.backend->fetchLedgerObject(
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
auto accountID =
|
if (!rawAcct)
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||||
|
|
||||||
if (!accountID)
|
ripple::AccountID destAccount;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
if (auto const status =
|
||||||
|
getAccount(request, destAccount, JS(destination_account));
|
||||||
|
status)
|
||||||
|
return status;
|
||||||
|
|
||||||
std::optional<ripple::AccountID> destAccount = {};
|
std::uint32_t limit;
|
||||||
if (request.contains("destination_account"))
|
if (auto const status = getLimit(context, limit); status)
|
||||||
{
|
return status;
|
||||||
if (!request.at("destination_account").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "destinationNotString"};
|
|
||||||
|
|
||||||
destAccount = accountFromStringStrict(
|
|
||||||
request.at("destination_account").as_string().c_str());
|
|
||||||
|
|
||||||
if (!destAccount)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "destinationMalformed"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
|
||||||
if (request.contains("limit"))
|
|
||||||
{
|
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
|
||||||
if (limit <= 0)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::string> marker = {};
|
std::optional<std::string> marker = {};
|
||||||
if (request.contains("marker"))
|
if (request.contains(JS(marker)))
|
||||||
{
|
{
|
||||||
if (!request.at("marker").is_string())
|
if (!request.at(JS(marker)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
marker = request.at("marker").as_string().c_str();
|
marker = request.at(JS(marker)).as_string().c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
response["account"] = ripple::to_string(*accountID);
|
response[JS(account)] = ripple::to_string(accountID);
|
||||||
response["channels"] = boost::json::value(boost::json::array_kind);
|
response[JS(channels)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonChannels = response.at("channels").as_array();
|
boost::json::array& jsonChannels = response.at(JS(channels)).as_array();
|
||||||
|
|
||||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||||
if (sle.getType() == ripple::ltPAYCHAN &&
|
if (sle.getType() == ripple::ltPAYCHAN &&
|
||||||
sle.getAccountID(ripple::sfAccount) == *accountID &&
|
sle.getAccountID(ripple::sfAccount) == accountID &&
|
||||||
(!destAccount ||
|
(!destAccount ||
|
||||||
*destAccount == sle.getAccountID(ripple::sfDestination)))
|
destAccount == sle.getAccountID(ripple::sfDestination)))
|
||||||
{
|
{
|
||||||
if (limit-- == 0)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
addChannel(jsonChannels, sle);
|
addChannel(jsonChannels, sle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,23 +101,23 @@ doAccountChannels(Context const& context)
|
|||||||
|
|
||||||
auto next = traverseOwnedNodes(
|
auto next = traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
limit,
|
limit,
|
||||||
marker,
|
marker,
|
||||||
context.yield,
|
context.yield,
|
||||||
addToResponse);
|
addToResponse);
|
||||||
|
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
if (auto status = std::get_if<RPC::Status>(&next))
|
if (auto status = std::get_if<RPC::Status>(&next))
|
||||||
return *status;
|
return *status;
|
||||||
|
|
||||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||||
|
|
||||||
if (nextCursor.isNonZero())
|
if (nextMarker.isNonZero())
|
||||||
response["marker"] = nextCursor.toString();
|
response[JS(marker)] = nextMarker.toString();
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,17 +24,15 @@ doAccountCurrencies(Context const& context)
|
|||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
auto rawAcct = context.backend->fetchLedgerObject(
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
auto accountID =
|
if (!rawAcct)
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||||
|
|
||||||
if (!accountID)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
std::set<std::string> send, receive;
|
std::set<std::string> send, receive;
|
||||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||||
@@ -61,26 +59,26 @@ doAccountCurrencies(Context const& context)
|
|||||||
|
|
||||||
traverseOwnedNodes(
|
traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
std::numeric_limits<std::uint32_t>::max(),
|
std::numeric_limits<std::uint32_t>::max(),
|
||||||
{},
|
{},
|
||||||
context.yield,
|
context.yield,
|
||||||
addToResponse);
|
addToResponse);
|
||||||
|
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
response["receive_currencies"] =
|
response[JS(receive_currencies)] =
|
||||||
boost::json::value(boost::json::array_kind);
|
boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonReceive =
|
boost::json::array& jsonReceive =
|
||||||
response.at("receive_currencies").as_array();
|
response.at(JS(receive_currencies)).as_array();
|
||||||
|
|
||||||
for (auto const& currency : receive)
|
for (auto const& currency : receive)
|
||||||
jsonReceive.push_back(currency.c_str());
|
jsonReceive.push_back(currency.c_str());
|
||||||
|
|
||||||
response["send_currencies"] = boost::json::value(boost::json::array_kind);
|
response[JS(send_currencies)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonSend = response.at("send_currencies").as_array();
|
boost::json::array& jsonSend = response.at(JS(send_currencies)).as_array();
|
||||||
|
|
||||||
for (auto const& currency : send)
|
for (auto const& currency : send)
|
||||||
jsonSend.push_back(currency.c_str());
|
jsonSend.push_back(currency.c_str());
|
||||||
|
|||||||
@@ -29,10 +29,10 @@ doAccountInfo(Context const& context)
|
|||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
std::string strIdent;
|
std::string strIdent;
|
||||||
if (request.contains("account"))
|
if (request.contains(JS(account)))
|
||||||
strIdent = request.at("account").as_string().c_str();
|
strIdent = request.at(JS(account)).as_string().c_str();
|
||||||
else if (request.contains("ident"))
|
else if (request.contains(JS(ident)))
|
||||||
strIdent = request.at("ident").as_string().c_str();
|
strIdent = request.at(JS(ident)).as_string().c_str();
|
||||||
else
|
else
|
||||||
return Status{Error::rpcACT_MALFORMED};
|
return Status{Error::rpcACT_MALFORMED};
|
||||||
|
|
||||||
@@ -54,10 +54,8 @@ doAccountInfo(Context const& context)
|
|||||||
|
|
||||||
auto key = ripple::keylet::account(accountID.value());
|
auto key = ripple::keylet::account(accountID.value());
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
std::optional<std::vector<unsigned char>> dbResponse =
|
std::optional<std::vector<unsigned char>> dbResponse =
|
||||||
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
|
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
if (!dbResponse)
|
if (!dbResponse)
|
||||||
{
|
{
|
||||||
@@ -71,18 +69,18 @@ doAccountInfo(Context const& context)
|
|||||||
return Status{Error::rpcDB_DESERIALIZATION};
|
return Status{Error::rpcDB_DESERIALIZATION};
|
||||||
|
|
||||||
// if (!binary)
|
// if (!binary)
|
||||||
// response["account_data"] = getJson(sle);
|
// response[JS(account_data)] = getJson(sle);
|
||||||
// else
|
// else
|
||||||
// response["account_data"] = ripple::strHex(*dbResponse);
|
// response[JS(account_data)] = ripple::strHex(*dbResponse);
|
||||||
// response["db_time"] = time;
|
// response[JS(db_time)] = time;
|
||||||
|
|
||||||
response["account_data"] = toJson(sle);
|
response[JS(account_data)] = toJson(sle);
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
// Return SignerList(s) if that is requested.
|
// Return SignerList(s) if that is requested.
|
||||||
if (request.contains("signer_lists") &&
|
if (request.contains(JS(signer_lists)) &&
|
||||||
request.at("signer_lists").as_bool())
|
request.at(JS(signer_lists)).as_bool())
|
||||||
{
|
{
|
||||||
// We put the SignerList in an array because of an anticipated
|
// We put the SignerList in an array because of an anticipated
|
||||||
// future when we support multiple signer lists on one account.
|
// future when we support multiple signer lists on one account.
|
||||||
@@ -104,7 +102,7 @@ doAccountInfo(Context const& context)
|
|||||||
signerList.push_back(toJson(sleSigners));
|
signerList.push_back(toJson(sleSigners));
|
||||||
}
|
}
|
||||||
|
|
||||||
response["account_data"].as_object()["signer_lists"] =
|
response[JS(account_data)].as_object()[JS(signer_lists)] =
|
||||||
std::move(signerList);
|
std::move(signerList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ addLine(
|
|||||||
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
|
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
|
||||||
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
|
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
|
||||||
|
|
||||||
if (peerAccount and peerAccount != lineAccountIDPeer)
|
if (peerAccount && peerAccount != lineAccountIDPeer)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!viewLowest)
|
if (!viewLowest)
|
||||||
@@ -64,25 +64,25 @@ addLine(
|
|||||||
ripple::STAmount const& saLimitPeer(lineLimitPeer);
|
ripple::STAmount const& saLimitPeer(lineLimitPeer);
|
||||||
|
|
||||||
boost::json::object jPeer;
|
boost::json::object jPeer;
|
||||||
jPeer["account"] = ripple::to_string(lineAccountIDPeer);
|
jPeer[JS(account)] = ripple::to_string(lineAccountIDPeer);
|
||||||
jPeer["balance"] = saBalance.getText();
|
jPeer[JS(balance)] = saBalance.getText();
|
||||||
jPeer["currency"] = ripple::to_string(saBalance.issue().currency);
|
jPeer[JS(currency)] = ripple::to_string(saBalance.issue().currency);
|
||||||
jPeer["limit"] = saLimit.getText();
|
jPeer[JS(limit)] = saLimit.getText();
|
||||||
jPeer["limit_peer"] = saLimitPeer.getText();
|
jPeer[JS(limit_peer)] = saLimitPeer.getText();
|
||||||
jPeer["quality_in"] = lineQualityIn;
|
jPeer[JS(quality_in)] = lineQualityIn;
|
||||||
jPeer["quality_out"] = lineQualityOut;
|
jPeer[JS(quality_out)] = lineQualityOut;
|
||||||
if (lineAuth)
|
if (lineAuth)
|
||||||
jPeer["authorized"] = true;
|
jPeer[JS(authorized)] = true;
|
||||||
if (lineAuthPeer)
|
if (lineAuthPeer)
|
||||||
jPeer["peer_authorized"] = true;
|
jPeer[JS(peer_authorized)] = true;
|
||||||
if (lineNoRipple || !lineDefaultRipple)
|
if (lineNoRipple || !lineDefaultRipple)
|
||||||
jPeer["no_ripple"] = lineNoRipple;
|
jPeer[JS(no_ripple)] = lineNoRipple;
|
||||||
if (lineNoRipple || !lineDefaultRipple)
|
if (lineNoRipple || !lineDefaultRipple)
|
||||||
jPeer["no_ripple_peer"] = lineNoRipplePeer;
|
jPeer[JS(no_ripple_peer)] = lineNoRipplePeer;
|
||||||
if (lineFreeze)
|
if (lineFreeze)
|
||||||
jPeer["freeze"] = true;
|
jPeer[JS(freeze)] = true;
|
||||||
if (lineFreezePeer)
|
if (lineFreezePeer)
|
||||||
jPeer["freeze_peer"] = true;
|
jPeer[JS(freeze_peer)] = true;
|
||||||
|
|
||||||
jsonLines.push_back(jPeer);
|
jsonLines.push_back(jPeer);
|
||||||
}
|
}
|
||||||
@@ -99,82 +99,65 @@ doAccountLines(Context const& context)
|
|||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
auto rawAcct = context.backend->fetchLedgerObject(
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
auto accountID =
|
if (!rawAcct)
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||||
|
|
||||||
if (!accountID)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
std::optional<ripple::AccountID> peerAccount;
|
std::optional<ripple::AccountID> peerAccount;
|
||||||
if (request.contains("peer"))
|
if (auto const status = getOptionalAccount(request, peerAccount, JS(peer));
|
||||||
|
status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
std::uint32_t limit;
|
||||||
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
std::optional<std::string> marker = {};
|
||||||
|
if (request.contains(JS(marker)))
|
||||||
{
|
{
|
||||||
if (!request.at("peer").is_string())
|
if (!request.at(JS(marker)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "peerNotString"};
|
|
||||||
|
|
||||||
peerAccount =
|
|
||||||
accountFromStringStrict(request.at("peer").as_string().c_str());
|
|
||||||
|
|
||||||
if (!peerAccount)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "peerMalformed"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
|
||||||
if (request.contains("limit"))
|
|
||||||
{
|
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
|
||||||
if (limit <= 0)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::string> cursor = {};
|
|
||||||
if (request.contains("marker"))
|
|
||||||
{
|
|
||||||
if (!request.at("marker").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
cursor = request.at("marker").as_string().c_str();
|
marker = request.at(JS(marker)).as_string().c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
response["account"] = ripple::to_string(*accountID);
|
response[JS(account)] = ripple::to_string(accountID);
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
response["lines"] = boost::json::value(boost::json::array_kind);
|
response[JS(lines)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonLines = response.at("lines").as_array();
|
boost::json::array& jsonLines = response.at(JS(lines)).as_array();
|
||||||
|
|
||||||
auto const addToResponse = [&](ripple::SLE const& sle) -> void {
|
auto const addToResponse = [&](ripple::SLE const& sle) -> void {
|
||||||
if (sle.getType() == ripple::ltRIPPLE_STATE)
|
if (sle.getType() == ripple::ltRIPPLE_STATE)
|
||||||
{
|
{
|
||||||
addLine(jsonLines, sle, *accountID, peerAccount);
|
addLine(jsonLines, sle, accountID, peerAccount);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto next = traverseOwnedNodes(
|
auto next = traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
limit,
|
limit,
|
||||||
cursor,
|
marker,
|
||||||
context.yield,
|
context.yield,
|
||||||
addToResponse);
|
addToResponse);
|
||||||
|
|
||||||
if (auto status = std::get_if<RPC::Status>(&next))
|
if (auto status = std::get_if<RPC::Status>(&next))
|
||||||
return *status;
|
return *status;
|
||||||
|
|
||||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||||
|
|
||||||
if (nextCursor.isNonZero())
|
if (nextMarker.isNonZero())
|
||||||
response["marker"] = nextCursor.toString();
|
response[JS(marker)] = nextMarker.toString();
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
#include <ripple/app/ledger/Ledger.h>
|
#include <ripple/app/ledger/Ledger.h>
|
||||||
#include <ripple/app/paths/TrustLine.h>
|
#include <ripple/app/paths/TrustLine.h>
|
||||||
|
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||||
#include <ripple/basics/StringUtilities.h>
|
#include <ripple/basics/StringUtilities.h>
|
||||||
#include <ripple/protocol/ErrorCodes.h>
|
#include <ripple/protocol/ErrorCodes.h>
|
||||||
#include <ripple/protocol/Indexes.h>
|
#include <ripple/protocol/Indexes.h>
|
||||||
#include <ripple/protocol/STLedgerEntry.h>
|
#include <ripple/protocol/STLedgerEntry.h>
|
||||||
#include <ripple/protocol/jss.h>
|
#include <ripple/protocol/jss.h>
|
||||||
|
#include <ripple/protocol/nftPageMask.h>
|
||||||
#include <boost/json.hpp>
|
#include <boost/json.hpp>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <rpc/RPCHelpers.h>
|
#include <rpc/RPCHelpers.h>
|
||||||
@@ -23,7 +25,112 @@ std::unordered_map<std::string, ripple::LedgerEntryType> types{
|
|||||||
{"escrow", ripple::ltESCROW},
|
{"escrow", ripple::ltESCROW},
|
||||||
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
|
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
|
||||||
{"check", ripple::ltCHECK},
|
{"check", ripple::ltCHECK},
|
||||||
};
|
{"nft_page", ripple::ltNFTOKEN_PAGE},
|
||||||
|
{"nft_offer", ripple::ltNFTOKEN_OFFER}};
|
||||||
|
|
||||||
|
Result
|
||||||
|
doAccountNFTs(Context const& context)
|
||||||
|
{
|
||||||
|
auto request = context.params;
|
||||||
|
boost::json::object response = {};
|
||||||
|
|
||||||
|
auto v = ledgerInfoFromRequest(context);
|
||||||
|
if (auto status = std::get_if<Status>(&v))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
|
ripple::AccountID accountID;
|
||||||
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
if (!accountID)
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||||
|
|
||||||
|
auto rawAcct = context.backend->fetchLedgerObject(
|
||||||
|
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
|
if (!rawAcct)
|
||||||
|
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||||
|
|
||||||
|
std::uint32_t limit;
|
||||||
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
ripple::uint256 marker;
|
||||||
|
if (auto const status = getHexMarker(request, marker); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
response[JS(account)] = ripple::toBase58(accountID);
|
||||||
|
response[JS(validated)] = true;
|
||||||
|
|
||||||
|
std::uint32_t numPages = 0;
|
||||||
|
response[JS(account_nfts)] = boost::json::value(boost::json::array_kind);
|
||||||
|
auto& nfts = response.at(JS(account_nfts)).as_array();
|
||||||
|
|
||||||
|
// if a marker was passed, start at the page specified in marker. Else,
|
||||||
|
// start at the max page
|
||||||
|
auto const pageKey =
|
||||||
|
marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker;
|
||||||
|
|
||||||
|
auto const blob =
|
||||||
|
context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield);
|
||||||
|
if (!blob)
|
||||||
|
return response;
|
||||||
|
std::optional<ripple::SLE const> page{
|
||||||
|
ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}};
|
||||||
|
|
||||||
|
// Continue iteration from the current page
|
||||||
|
while (page)
|
||||||
|
{
|
||||||
|
auto arr = page->getFieldArray(ripple::sfNFTokens);
|
||||||
|
|
||||||
|
for (auto const& o : arr)
|
||||||
|
{
|
||||||
|
ripple::uint256 const nftokenID = o[ripple::sfNFTokenID];
|
||||||
|
|
||||||
|
{
|
||||||
|
nfts.push_back(
|
||||||
|
toBoostJson(o.getJson(ripple::JsonOptions::none)));
|
||||||
|
auto& obj = nfts.back().as_object();
|
||||||
|
|
||||||
|
// Pull out the components of the nft ID.
|
||||||
|
obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID);
|
||||||
|
obj[SFS(sfIssuer)] =
|
||||||
|
to_string(ripple::nft::getIssuer(nftokenID));
|
||||||
|
obj[SFS(sfNFTokenTaxon)] =
|
||||||
|
ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID));
|
||||||
|
obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID);
|
||||||
|
|
||||||
|
if (std::uint16_t xferFee = {
|
||||||
|
ripple::nft::getTransferFee(nftokenID)})
|
||||||
|
obj[SFS(sfTransferFee)] = xferFee;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
++numPages;
|
||||||
|
if (auto npm = (*page)[~ripple::sfPreviousPageMin])
|
||||||
|
{
|
||||||
|
auto const nextKey = ripple::Keylet(ripple::ltNFTOKEN_PAGE, *npm);
|
||||||
|
if (numPages == limit)
|
||||||
|
{
|
||||||
|
response[JS(marker)] = to_string(nextKey.key);
|
||||||
|
response[JS(limit)] = numPages;
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
auto const nextBlob = context.backend->fetchLedgerObject(
|
||||||
|
nextKey.key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
|
page.emplace(ripple::SLE{
|
||||||
|
ripple::SerialIter{nextBlob->data(), nextBlob->size()},
|
||||||
|
nextKey.key});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
page.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
Result
|
Result
|
||||||
doAccountObjects(Context const& context)
|
doAccountObjects(Context const& context)
|
||||||
@@ -37,54 +144,40 @@ doAccountObjects(Context const& context)
|
|||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
std::uint32_t limit;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
auto accountID =
|
std::optional<std::string> marker = {};
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
|
||||||
|
|
||||||
if (!accountID)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
|
||||||
if (request.contains("limit"))
|
|
||||||
{
|
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
|
||||||
if (limit <= 0)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::string> cursor = {};
|
|
||||||
if (request.contains("marker"))
|
if (request.contains("marker"))
|
||||||
{
|
{
|
||||||
if (!request.at("marker").is_string())
|
if (!request.at("marker").is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
cursor = request.at("marker").as_string().c_str();
|
marker = request.at("marker").as_string().c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ripple::LedgerEntryType> objectType = {};
|
std::optional<ripple::LedgerEntryType> objectType = {};
|
||||||
if (request.contains("type"))
|
if (request.contains(JS(type)))
|
||||||
{
|
{
|
||||||
if (!request.at("type").is_string())
|
if (!request.at(JS(type)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "typeNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "typeNotString"};
|
||||||
|
|
||||||
std::string typeAsString = request.at("type").as_string().c_str();
|
std::string typeAsString = request.at(JS(type)).as_string().c_str();
|
||||||
if (types.find(typeAsString) == types.end())
|
if (types.find(typeAsString) == types.end())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "typeInvalid"};
|
return Status{Error::rpcINVALID_PARAMS, "typeInvalid"};
|
||||||
|
|
||||||
objectType = types[typeAsString];
|
objectType = types[typeAsString];
|
||||||
}
|
}
|
||||||
|
|
||||||
response["account"] = ripple::to_string(*accountID);
|
response[JS(account)] = ripple::to_string(accountID);
|
||||||
response["account_objects"] = boost::json::value(boost::json::array_kind);
|
response[JS(account_objects)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonObjects = response.at("account_objects").as_array();
|
boost::json::array& jsonObjects =
|
||||||
|
response.at(JS(account_objects)).as_array();
|
||||||
|
|
||||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||||
if (!objectType || objectType == sle.getType())
|
if (!objectType || objectType == sle.getType())
|
||||||
@@ -95,23 +188,23 @@ doAccountObjects(Context const& context)
|
|||||||
|
|
||||||
auto next = traverseOwnedNodes(
|
auto next = traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
limit,
|
limit,
|
||||||
cursor,
|
marker,
|
||||||
context.yield,
|
context.yield,
|
||||||
addToResponse);
|
addToResponse);
|
||||||
|
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
if (auto status = std::get_if<RPC::Status>(&next))
|
if (auto status = std::get_if<RPC::Status>(&next))
|
||||||
return *status;
|
return *status;
|
||||||
|
|
||||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||||
|
|
||||||
if (nextCursor.isNonZero())
|
if (nextMarker.isNonZero())
|
||||||
response["marker"] = nextCursor.toString();
|
response[JS(marker)] = nextMarker.toString();
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,37 +27,39 @@ addOffer(boost::json::array& offersJson, ripple::SLE const& offer)
|
|||||||
|
|
||||||
if (!takerPays.native())
|
if (!takerPays.native())
|
||||||
{
|
{
|
||||||
obj["taker_pays"] = boost::json::value(boost::json::object_kind);
|
obj[JS(taker_pays)] = boost::json::value(boost::json::object_kind);
|
||||||
boost::json::object& takerPaysJson = obj.at("taker_pays").as_object();
|
boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object();
|
||||||
|
|
||||||
takerPaysJson["value"] = takerPays.getText();
|
takerPaysJson[JS(value)] = takerPays.getText();
|
||||||
takerPaysJson["currency"] = ripple::to_string(takerPays.getCurrency());
|
takerPaysJson[JS(currency)] =
|
||||||
takerPaysJson["issuer"] = ripple::to_string(takerPays.getIssuer());
|
ripple::to_string(takerPays.getCurrency());
|
||||||
|
takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
obj["taker_pays"] = takerPays.getText();
|
obj[JS(taker_pays)] = takerPays.getText();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!takerGets.native())
|
if (!takerGets.native())
|
||||||
{
|
{
|
||||||
obj["taker_gets"] = boost::json::value(boost::json::object_kind);
|
obj[JS(taker_gets)] = boost::json::value(boost::json::object_kind);
|
||||||
boost::json::object& takerGetsJson = obj.at("taker_gets").as_object();
|
boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object();
|
||||||
|
|
||||||
takerGetsJson["value"] = takerGets.getText();
|
takerGetsJson[JS(value)] = takerGets.getText();
|
||||||
takerGetsJson["currency"] = ripple::to_string(takerGets.getCurrency());
|
takerGetsJson[JS(currency)] =
|
||||||
takerGetsJson["issuer"] = ripple::to_string(takerGets.getIssuer());
|
ripple::to_string(takerGets.getCurrency());
|
||||||
|
takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
obj["taker_gets"] = takerGets.getText();
|
obj[JS(taker_gets)] = takerGets.getText();
|
||||||
}
|
}
|
||||||
|
|
||||||
obj["seq"] = offer.getFieldU32(ripple::sfSequence);
|
obj[JS(seq)] = offer.getFieldU32(ripple::sfSequence);
|
||||||
obj["flags"] = offer.getFieldU32(ripple::sfFlags);
|
obj[JS(flags)] = offer.getFieldU32(ripple::sfFlags);
|
||||||
obj["quality"] = rate.getText();
|
obj[JS(quality)] = rate.getText();
|
||||||
if (offer.isFieldPresent(ripple::sfExpiration))
|
if (offer.isFieldPresent(ripple::sfExpiration))
|
||||||
obj["expiration"] = offer.getFieldU32(ripple::sfExpiration);
|
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
|
||||||
|
|
||||||
offersJson.push_back(obj);
|
offersJson.push_back(obj);
|
||||||
};
|
};
|
||||||
@@ -74,52 +76,38 @@ doAccountOffers(Context const& context)
|
|||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
auto rawAcct = context.backend->fetchLedgerObject(
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
auto accountID =
|
if (!rawAcct)
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
|
||||||
|
|
||||||
if (!accountID)
|
std::uint32_t limit;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
std::optional<std::string> marker = {};
|
||||||
if (request.contains("limit"))
|
if (request.contains(JS(marker)))
|
||||||
{
|
{
|
||||||
if (!request.at("limit").is_int64())
|
if (!request.at(JS(marker)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
|
||||||
if (limit <= 0)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::string> cursor = {};
|
|
||||||
if (request.contains("marker"))
|
|
||||||
{
|
|
||||||
if (!request.at("marker").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
cursor = request.at("marker").as_string().c_str();
|
marker = request.at(JS(marker)).as_string().c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
response["account"] = ripple::to_string(*accountID);
|
response[JS(account)] = ripple::to_string(accountID);
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
response["offers"] = boost::json::value(boost::json::array_kind);
|
response[JS(offers)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonLines = response.at("offers").as_array();
|
boost::json::array& jsonLines = response.at(JS(offers)).as_array();
|
||||||
|
|
||||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||||
if (sle.getType() == ripple::ltOFFER)
|
if (sle.getType() == ripple::ltOFFER)
|
||||||
{
|
{
|
||||||
if (limit-- == 0)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
addOffer(jsonLines, sle);
|
addOffer(jsonLines, sle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,22 +116,22 @@ doAccountOffers(Context const& context)
|
|||||||
|
|
||||||
auto next = traverseOwnedNodes(
|
auto next = traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
limit,
|
limit,
|
||||||
cursor,
|
marker,
|
||||||
context.yield,
|
context.yield,
|
||||||
addToResponse);
|
addToResponse);
|
||||||
|
|
||||||
if (auto status = std::get_if<RPC::Status>(&next))
|
if (auto status = std::get_if<RPC::Status>(&next))
|
||||||
return *status;
|
return *status;
|
||||||
|
|
||||||
auto nextCursor = std::get<RPC::AccountCursor>(next);
|
auto nextMarker = std::get<RPC::AccountCursor>(next);
|
||||||
|
|
||||||
if (nextCursor.isNonZero())
|
if (nextMarker.isNonZero())
|
||||||
response["marker"] = nextCursor.toString();
|
response[JS(marker)] = nextMarker.toString();
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -12,60 +12,38 @@ doAccountTx(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!request.at("account").is_string())
|
bool const binary = getBool(request, JS(binary), false);
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
bool const forward = getBool(request, JS(forward), false);
|
||||||
|
|
||||||
auto accountID =
|
std::optional<Backend::TransactionsCursor> cursor;
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
|
||||||
|
|
||||||
if (!accountID)
|
if (request.contains(JS(marker)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
bool binary = false;
|
|
||||||
if (request.contains("binary"))
|
|
||||||
{
|
{
|
||||||
if (!request.at("binary").is_bool())
|
auto const& obj = request.at(JS(marker)).as_object();
|
||||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
|
||||||
|
|
||||||
binary = request.at("binary").as_bool();
|
|
||||||
}
|
|
||||||
bool forward = false;
|
|
||||||
if (request.contains("forward"))
|
|
||||||
{
|
|
||||||
if (!request.at("forward").is_bool())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "forwardNotBool"};
|
|
||||||
|
|
||||||
forward = request.at("forward").as_bool();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<Backend::AccountTransactionsCursor> cursor;
|
|
||||||
|
|
||||||
if (request.contains("marker"))
|
|
||||||
{
|
|
||||||
auto const& obj = request.at("marker").as_object();
|
|
||||||
|
|
||||||
std::optional<std::uint32_t> transactionIndex = {};
|
std::optional<std::uint32_t> transactionIndex = {};
|
||||||
if (obj.contains("seq"))
|
if (obj.contains(JS(seq)))
|
||||||
{
|
{
|
||||||
if (!obj.at("seq").is_int64())
|
if (!obj.at(JS(seq)).is_int64())
|
||||||
return Status{
|
return Status{
|
||||||
Error::rpcINVALID_PARAMS, "transactionIndexNotInt"};
|
Error::rpcINVALID_PARAMS, "transactionIndexNotInt"};
|
||||||
|
|
||||||
transactionIndex =
|
transactionIndex =
|
||||||
boost::json::value_to<std::uint32_t>(obj.at("seq"));
|
boost::json::value_to<std::uint32_t>(obj.at(JS(seq)));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::uint32_t> ledgerIndex = {};
|
std::optional<std::uint32_t> ledgerIndex = {};
|
||||||
if (obj.contains("ledger"))
|
if (obj.contains(JS(ledger)))
|
||||||
{
|
{
|
||||||
if (!obj.at("ledger").is_int64())
|
if (!obj.at(JS(ledger)).is_int64())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotInt"};
|
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotInt"};
|
||||||
|
|
||||||
ledgerIndex =
|
ledgerIndex =
|
||||||
boost::json::value_to<std::uint32_t>(obj.at("ledger"));
|
boost::json::value_to<std::uint32_t>(obj.at(JS(ledger)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!transactionIndex || !ledgerIndex)
|
if (!transactionIndex || !ledgerIndex)
|
||||||
@@ -75,9 +53,9 @@ doAccountTx(Context const& context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto minIndex = context.range.minSequence;
|
auto minIndex = context.range.minSequence;
|
||||||
if (request.contains("ledger_index_min"))
|
if (request.contains(JS(ledger_index_min)))
|
||||||
{
|
{
|
||||||
auto& min = request.at("ledger_index_min");
|
auto& min = request.at(JS(ledger_index_min));
|
||||||
|
|
||||||
if (!min.is_int64())
|
if (!min.is_int64())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"};
|
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"};
|
||||||
@@ -87,7 +65,7 @@ doAccountTx(Context const& context)
|
|||||||
if (context.range.maxSequence < min.as_int64() ||
|
if (context.range.maxSequence < min.as_int64() ||
|
||||||
context.range.minSequence > min.as_int64())
|
context.range.minSequence > min.as_int64())
|
||||||
return Status{
|
return Status{
|
||||||
Error::rpcINVALID_PARAMS, "ledgerSeqMaxOutOfRange"};
|
Error::rpcINVALID_PARAMS, "ledgerSeqMinOutOfRange"};
|
||||||
else
|
else
|
||||||
minIndex = value_to<std::uint32_t>(min);
|
minIndex = value_to<std::uint32_t>(min);
|
||||||
}
|
}
|
||||||
@@ -97,9 +75,9 @@ doAccountTx(Context const& context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto maxIndex = context.range.maxSequence;
|
auto maxIndex = context.range.maxSequence;
|
||||||
if (request.contains("ledger_index_max"))
|
if (request.contains(JS(ledger_index_max)))
|
||||||
{
|
{
|
||||||
auto& max = request.at("ledger_index_max");
|
auto& max = request.at(JS(ledger_index_max));
|
||||||
|
|
||||||
if (!max.is_int64())
|
if (!max.is_int64())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"};
|
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"};
|
||||||
@@ -121,30 +99,18 @@ doAccountTx(Context const& context)
|
|||||||
cursor = {maxIndex, INT32_MAX};
|
cursor = {maxIndex, INT32_MAX};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("ledger_index"))
|
if (request.contains(JS(ledger_index)) || request.contains(JS(ledger_hash)))
|
||||||
{
|
{
|
||||||
if (!request.at("ledger_index").is_int64())
|
if (request.contains(JS(ledger_index_max)) ||
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotNumber"};
|
request.contains(JS(ledger_index_min)))
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"};
|
||||||
|
|
||||||
auto ledgerIndex =
|
auto v = ledgerInfoFromRequest(context);
|
||||||
boost::json::value_to<std::uint32_t>(request.at("ledger_index"));
|
if (auto status = std::get_if<Status>(&v))
|
||||||
maxIndex = minIndex = ledgerIndex;
|
return *status;
|
||||||
}
|
|
||||||
|
|
||||||
if (request.contains("ledger_hash"))
|
maxIndex = minIndex = std::get<ripple::LedgerInfo>(v).seq;
|
||||||
{
|
|
||||||
if (!request.at("ledger_hash").is_string())
|
|
||||||
return RPC::Status{
|
|
||||||
RPC::Error::rpcINVALID_PARAMS, "ledgerHashNotString"};
|
|
||||||
|
|
||||||
ripple::uint256 ledgerHash;
|
|
||||||
if (!ledgerHash.parseHex(request.at("ledger_hash").as_string().c_str()))
|
|
||||||
return RPC::Status{
|
|
||||||
RPC::Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
|
|
||||||
|
|
||||||
auto lgrInfo =
|
|
||||||
context.backend->fetchLedgerByHash(ledgerHash, context.yield);
|
|
||||||
maxIndex = minIndex = lgrInfo->seq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cursor)
|
if (!cursor)
|
||||||
@@ -155,37 +121,31 @@ doAccountTx(Context const& context)
|
|||||||
cursor = {maxIndex, INT32_MAX};
|
cursor = {maxIndex, INT32_MAX};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
std::uint32_t limit;
|
||||||
if (request.contains("limit"))
|
if (auto const status = getLimit(context, limit); status)
|
||||||
{
|
return status;
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
if (request.contains(JS(limit)))
|
||||||
if (limit <= 0)
|
response[JS(limit)] = limit;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
|
|
||||||
response["limit"] = limit;
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::json::array txns;
|
boost::json::array txns;
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
auto [blobs, retCursor] = context.backend->fetchAccountTransactions(
|
auto [blobs, retCursor] = context.backend->fetchAccountTransactions(
|
||||||
*accountID, limit, forward, cursor, context.yield);
|
accountID, limit, forward, cursor, context.yield);
|
||||||
|
|
||||||
auto end = std::chrono::system_clock::now();
|
auto end = std::chrono::system_clock::now();
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
|
||||||
<< ((end - start).count() / 1000000000.0)
|
<< ((end - start).count() / 1000000000.0)
|
||||||
<< " num blobs = " << blobs.size();
|
<< " num blobs = " << blobs.size();
|
||||||
|
|
||||||
response["account"] = ripple::to_string(*accountID);
|
response[JS(account)] = ripple::to_string(accountID);
|
||||||
|
|
||||||
if (retCursor)
|
if (retCursor)
|
||||||
{
|
{
|
||||||
boost::json::object cursorJson;
|
boost::json::object cursorJson;
|
||||||
cursorJson["ledger"] = retCursor->ledgerSequence;
|
cursorJson[JS(ledger)] = retCursor->ledgerSequence;
|
||||||
cursorJson["seq"] = retCursor->transactionIndex;
|
cursorJson[JS(seq)] = retCursor->transactionIndex;
|
||||||
response["marker"] = cursorJson;
|
response[JS(marker)] = cursorJson;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<size_t> maxReturnedIndex;
|
std::optional<size_t> maxReturnedIndex;
|
||||||
@@ -206,18 +166,20 @@ doAccountTx(Context const& context)
|
|||||||
if (!binary)
|
if (!binary)
|
||||||
{
|
{
|
||||||
auto [txn, meta] = toExpandedJson(txnPlusMeta);
|
auto [txn, meta] = toExpandedJson(txnPlusMeta);
|
||||||
obj["meta"] = meta;
|
obj[JS(meta)] = meta;
|
||||||
obj["tx"] = txn;
|
obj[JS(tx)] = txn;
|
||||||
obj["tx"].as_object()["ledger_index"] = txnPlusMeta.ledgerSequence;
|
obj[JS(tx)].as_object()[JS(ledger_index)] =
|
||||||
obj["tx"].as_object()["date"] = txnPlusMeta.date;
|
txnPlusMeta.ledgerSequence;
|
||||||
|
obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
obj["meta"] = ripple::strHex(txnPlusMeta.metadata);
|
obj[JS(meta)] = ripple::strHex(txnPlusMeta.metadata);
|
||||||
obj["tx_blob"] = ripple::strHex(txnPlusMeta.transaction);
|
obj[JS(tx_blob)] = ripple::strHex(txnPlusMeta.transaction);
|
||||||
obj["ledger_index"] = txnPlusMeta.ledgerSequence;
|
obj[JS(ledger_index)] = txnPlusMeta.ledgerSequence;
|
||||||
obj["date"] = txnPlusMeta.date;
|
obj[JS(date)] = txnPlusMeta.date;
|
||||||
}
|
}
|
||||||
|
obj[JS(validated)] = true;
|
||||||
|
|
||||||
txns.push_back(obj);
|
txns.push_back(obj);
|
||||||
if (!minReturnedIndex || txnPlusMeta.ledgerSequence < *minReturnedIndex)
|
if (!minReturnedIndex || txnPlusMeta.ledgerSequence < *minReturnedIndex)
|
||||||
@@ -227,24 +189,18 @@ doAccountTx(Context const& context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert(cursor);
|
assert(cursor);
|
||||||
if (forward)
|
if (!forward)
|
||||||
{
|
{
|
||||||
response["ledger_index_min"] = cursor->ledgerSequence;
|
response[JS(ledger_index_min)] = cursor->ledgerSequence;
|
||||||
if (blobs.size() >= limit)
|
response[JS(ledger_index_max)] = maxIndex;
|
||||||
response["ledger_index_max"] = *maxReturnedIndex;
|
|
||||||
else
|
|
||||||
response["ledger_index_max"] = maxIndex;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
response["ledger_index_max"] = cursor->ledgerSequence;
|
response[JS(ledger_index_max)] = cursor->ledgerSequence;
|
||||||
if (blobs.size() >= limit)
|
response[JS(ledger_index_min)] = minIndex;
|
||||||
response["ledger_index_min"] = *minReturnedIndex;
|
|
||||||
else
|
|
||||||
response["ledger_index_min"] = minIndex;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response["transactions"] = txns;
|
response[JS(transactions)] = txns;
|
||||||
|
|
||||||
auto end2 = std::chrono::system_clock::now();
|
auto end2 = std::chrono::system_clock::now();
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "
|
||||||
|
|||||||
250
src/rpc/handlers/BookChanges.cpp
Normal file
250
src/rpc/handlers/BookChanges.cpp
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
#include <ripple/app/ledger/Ledger.h>
|
||||||
|
#include <ripple/basics/ToString.h>
|
||||||
|
|
||||||
|
#include <backend/BackendInterface.h>
|
||||||
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
namespace json = boost::json;
|
||||||
|
using namespace ripple;
|
||||||
|
|
||||||
|
namespace RPC {
|
||||||
|
|
||||||
|
struct BookChange
|
||||||
|
{
|
||||||
|
STAmount sideAVolume;
|
||||||
|
STAmount sideBVolume;
|
||||||
|
STAmount highRate;
|
||||||
|
STAmount lowRate;
|
||||||
|
STAmount openRate;
|
||||||
|
STAmount closeRate;
|
||||||
|
};
|
||||||
|
|
||||||
|
class BookChangesHandler
|
||||||
|
{
|
||||||
|
std::reference_wrapper<Context const> context_;
|
||||||
|
std::map<std::string, BookChange> tally_ = {};
|
||||||
|
std::optional<uint32_t> offerCancel_ = {};
|
||||||
|
|
||||||
|
public:
|
||||||
|
~BookChangesHandler() = default;
|
||||||
|
explicit BookChangesHandler(Context const& context)
|
||||||
|
: context_{std::cref(context)}
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
BookChangesHandler(BookChangesHandler const&) = delete;
|
||||||
|
BookChangesHandler(BookChangesHandler&&) = delete;
|
||||||
|
BookChangesHandler&
|
||||||
|
operator=(BookChangesHandler const&) = delete;
|
||||||
|
BookChangesHandler&
|
||||||
|
operator=(BookChangesHandler&&) = delete;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Handles the `book_change` request for given transactions
|
||||||
|
*
|
||||||
|
* @param transactions The transactions to compute changes for
|
||||||
|
* @return std::vector<BookChange> The changes
|
||||||
|
*/
|
||||||
|
std::vector<BookChange>
|
||||||
|
handle(LedgerInfo const& ledger)
|
||||||
|
{
|
||||||
|
reset();
|
||||||
|
|
||||||
|
for (auto const transactions =
|
||||||
|
context_.get().backend->fetchAllTransactionsInLedger(
|
||||||
|
ledger.seq, context_.get().yield);
|
||||||
|
auto const& tx : transactions)
|
||||||
|
{
|
||||||
|
handleBookChange(tx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: rewrite this with std::ranges when compilers catch up
|
||||||
|
std::vector<BookChange> changes;
|
||||||
|
std::transform(
|
||||||
|
std::make_move_iterator(std::begin(tally_)),
|
||||||
|
std::make_move_iterator(std::end(tally_)),
|
||||||
|
std::back_inserter(changes),
|
||||||
|
[](auto obj) { return obj.second; });
|
||||||
|
return changes;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
inline void
|
||||||
|
reset() noexcept
|
||||||
|
{
|
||||||
|
tally_.clear();
|
||||||
|
offerCancel_ = std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
handleAffectedNode(STObject const& node)
|
||||||
|
{
|
||||||
|
auto const& metaType = node.getFName();
|
||||||
|
auto const nodeType = node.getFieldU16(sfLedgerEntryType);
|
||||||
|
|
||||||
|
// we only care about ltOFFER objects being modified or
|
||||||
|
// deleted
|
||||||
|
if (nodeType != ltOFFER || metaType == sfCreatedNode)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// if either FF or PF are missing we can't compute
|
||||||
|
// but generally these are cancelled rather than crossed
|
||||||
|
// so skipping them is consistent
|
||||||
|
if (!node.isFieldPresent(sfFinalFields) ||
|
||||||
|
!node.isFieldPresent(sfPreviousFields))
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto const& finalFields =
|
||||||
|
node.peekAtField(sfFinalFields).downcast<STObject>();
|
||||||
|
auto const& previousFields =
|
||||||
|
node.peekAtField(sfPreviousFields).downcast<STObject>();
|
||||||
|
|
||||||
|
// defensive case that should never be hit
|
||||||
|
if (!finalFields.isFieldPresent(sfTakerGets) ||
|
||||||
|
!finalFields.isFieldPresent(sfTakerPays) ||
|
||||||
|
!previousFields.isFieldPresent(sfTakerGets) ||
|
||||||
|
!previousFields.isFieldPresent(sfTakerPays))
|
||||||
|
return;
|
||||||
|
|
||||||
|
// filter out any offers deleted by explicit offer cancels
|
||||||
|
if (metaType == sfDeletedNode && offerCancel_ &&
|
||||||
|
finalFields.getFieldU32(sfSequence) == *offerCancel_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// compute the difference in gets and pays actually
|
||||||
|
// affected onto the offer
|
||||||
|
auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) -
|
||||||
|
previousFields.getFieldAmount(sfTakerGets);
|
||||||
|
auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) -
|
||||||
|
previousFields.getFieldAmount(sfTakerPays);
|
||||||
|
|
||||||
|
auto const g = to_string(deltaGets.issue());
|
||||||
|
auto const p = to_string(deltaPays.issue());
|
||||||
|
|
||||||
|
auto const noswap =
|
||||||
|
isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p));
|
||||||
|
|
||||||
|
auto first = noswap ? deltaGets : deltaPays;
|
||||||
|
auto second = noswap ? deltaPays : deltaGets;
|
||||||
|
|
||||||
|
// defensively programmed, should (probably) never happen
|
||||||
|
if (second == beast::zero)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto const rate = divide(first, second, noIssue());
|
||||||
|
|
||||||
|
if (first < beast::zero)
|
||||||
|
first = -first;
|
||||||
|
|
||||||
|
if (second < beast::zero)
|
||||||
|
second = -second;
|
||||||
|
|
||||||
|
auto const key = noswap ? (g + '|' + p) : (p + '|' + g);
|
||||||
|
if (tally_.contains(key))
|
||||||
|
{
|
||||||
|
auto& entry = tally_.at(key);
|
||||||
|
|
||||||
|
entry.sideAVolume += first;
|
||||||
|
entry.sideBVolume += second;
|
||||||
|
|
||||||
|
if (entry.highRate < rate)
|
||||||
|
entry.highRate = rate;
|
||||||
|
|
||||||
|
if (entry.lowRate > rate)
|
||||||
|
entry.lowRate = rate;
|
||||||
|
|
||||||
|
entry.closeRate = rate;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// TODO: use paranthesized initialization when clang catches up
|
||||||
|
tally_[key] = {
|
||||||
|
first, // sideAVolume
|
||||||
|
second, // sideBVolume
|
||||||
|
rate, // highRate
|
||||||
|
rate, // lowRate
|
||||||
|
rate, // openRate
|
||||||
|
rate, // closeRate
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
handleBookChange(Backend::TransactionAndMetadata const& blob)
|
||||||
|
{
|
||||||
|
auto const [tx, meta] = deserializeTxPlusMeta(blob);
|
||||||
|
if (!tx || !meta || !tx->isFieldPresent(sfTransactionType))
|
||||||
|
return;
|
||||||
|
|
||||||
|
offerCancel_ = shouldCancelOffer(tx);
|
||||||
|
for (auto const& node : meta->getFieldArray(sfAffectedNodes))
|
||||||
|
handleAffectedNode(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<uint32_t>
|
||||||
|
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
|
||||||
|
{
|
||||||
|
switch (tx->getFieldU16(sfTransactionType))
|
||||||
|
{
|
||||||
|
// in future if any other ways emerge to cancel an offer
|
||||||
|
// this switch makes them easy to add
|
||||||
|
case ttOFFER_CANCEL:
|
||||||
|
case ttOFFER_CREATE:
|
||||||
|
if (tx->isFieldPresent(sfOfferSequence))
|
||||||
|
return tx->getFieldU32(sfOfferSequence);
|
||||||
|
default:
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void
|
||||||
|
tag_invoke(
|
||||||
|
const json::value_from_tag&,
|
||||||
|
json::value& jv,
|
||||||
|
BookChange const& change)
|
||||||
|
{
|
||||||
|
auto amountStr = [](STAmount const& amount) -> std::string {
|
||||||
|
return isXRP(amount) ? to_string(amount.xrp())
|
||||||
|
: to_string(amount.iou());
|
||||||
|
};
|
||||||
|
|
||||||
|
auto currencyStr = [](STAmount const& amount) -> std::string {
|
||||||
|
return isXRP(amount) ? "XRP_drops" : to_string(amount.issue());
|
||||||
|
};
|
||||||
|
|
||||||
|
jv = {
|
||||||
|
{JS(currency_a), currencyStr(change.sideAVolume)},
|
||||||
|
{JS(currency_b), currencyStr(change.sideBVolume)},
|
||||||
|
{JS(volume_a), amountStr(change.sideAVolume)},
|
||||||
|
{JS(volume_b), amountStr(change.sideBVolume)},
|
||||||
|
{JS(high), to_string(change.highRate.iou())},
|
||||||
|
{JS(low), to_string(change.lowRate.iou())},
|
||||||
|
{JS(open), to_string(change.openRate.iou())},
|
||||||
|
{JS(close), to_string(change.closeRate.iou())},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Result
|
||||||
|
doBookChanges(Context const& context)
|
||||||
|
{
|
||||||
|
auto const request = context.params;
|
||||||
|
auto const info = ledgerInfoFromRequest(context);
|
||||||
|
if (auto const status = std::get_if<Status>(&info))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
auto const lgrInfo = std::get<ripple::LedgerInfo>(info);
|
||||||
|
auto const changes = BookChangesHandler{context}.handle(lgrInfo);
|
||||||
|
return json::object{
|
||||||
|
{JS(type), "bookChanges"},
|
||||||
|
{JS(ledger_index), lgrInfo.seq},
|
||||||
|
{JS(ledger_hash), to_string(lgrInfo.hash)},
|
||||||
|
{JS(ledger_time), lgrInfo.closeTime.time_since_epoch().count()},
|
||||||
|
{JS(changes), json::value_from(changes)},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace RPC
|
||||||
@@ -48,42 +48,21 @@ doBookOffers(Context const& context)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::uint32_t limit = 200;
|
std::uint32_t limit;
|
||||||
if (request.contains("limit"))
|
if (auto const status = getLimit(context, limit); status)
|
||||||
{
|
return status;
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
|
|
||||||
|
|
||||||
limit = request.at("limit").as_int64();
|
|
||||||
if (limit <= 0)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
|
|
||||||
}
|
|
||||||
|
|
||||||
ripple::AccountID takerID = beast::zero;
|
ripple::AccountID takerID = beast::zero;
|
||||||
if (request.contains("taker"))
|
if (auto const status = getTaker(request, takerID); status)
|
||||||
{
|
return status;
|
||||||
auto parsed = parseTaker(request["taker"]);
|
|
||||||
if (auto status = std::get_if<Status>(&parsed))
|
|
||||||
return *status;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
takerID = std::get<ripple::AccountID>(parsed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ripple::uint256 cursor = beast::zero;
|
ripple::uint256 marker = beast::zero;
|
||||||
if (request.contains("cursor"))
|
if (auto const status = getHexMarker(request, marker); status)
|
||||||
{
|
return status;
|
||||||
if (!request.at("cursor").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "cursorNotString"};
|
|
||||||
|
|
||||||
if (!cursor.parseHex(request.at("cursor").as_string().c_str()))
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
|
|
||||||
}
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
auto [offers, retCursor] = context.backend->fetchBookOffers(
|
auto [offers, retMarker] = context.backend->fetchBookOffers(
|
||||||
bookBase, lgrInfo.seq, limit, cursor, context.yield);
|
bookBase, lgrInfo.seq, limit, marker, context.yield);
|
||||||
auto end = std::chrono::system_clock::now();
|
auto end = std::chrono::system_clock::now();
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
@@ -92,10 +71,10 @@ doBookOffers(Context const& context)
|
|||||||
.count()
|
.count()
|
||||||
<< " milliseconds - request = " << request;
|
<< " milliseconds - request = " << request;
|
||||||
|
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
response["offers"] = postProcessOrderBook(
|
response[JS(offers)] = postProcessOrderBook(
|
||||||
offers, book, takerID, *context.backend, lgrInfo.seq, context.yield);
|
offers, book, takerID, *context.backend, lgrInfo.seq, context.yield);
|
||||||
|
|
||||||
auto end2 = std::chrono::system_clock::now();
|
auto end2 = std::chrono::system_clock::now();
|
||||||
@@ -106,8 +85,8 @@ doBookOffers(Context const& context)
|
|||||||
.count()
|
.count()
|
||||||
<< " milliseconds - request = " << request;
|
<< " milliseconds - request = " << request;
|
||||||
|
|
||||||
if (retCursor)
|
if (retMarker)
|
||||||
response["marker"] = ripple::strHex(*retCursor);
|
response["marker"] = ripple::strHex(*retMarker);
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,19 +27,13 @@ doChannelAuthorize(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
if (!request.contains("channel_id"))
|
if (!request.contains(JS(amount)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
|
|
||||||
|
|
||||||
if (!request.at("channel_id").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
|
|
||||||
|
|
||||||
if (!request.contains("amount"))
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
||||||
|
|
||||||
if (!request.at("amount").is_string())
|
if (!request.at(JS(amount)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
||||||
|
|
||||||
if (!request.contains("key_type") && !request.contains("secret"))
|
if (!request.contains(JS(key_type)) && !request.contains(JS(secret)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"};
|
return Status{Error::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"};
|
||||||
|
|
||||||
auto v = keypairFromRequst(request);
|
auto v = keypairFromRequst(request);
|
||||||
@@ -50,10 +44,11 @@ doChannelAuthorize(Context const& context)
|
|||||||
std::get<std::pair<ripple::PublicKey, ripple::SecretKey>>(v);
|
std::get<std::pair<ripple::PublicKey, ripple::SecretKey>>(v);
|
||||||
|
|
||||||
ripple::uint256 channelId;
|
ripple::uint256 channelId;
|
||||||
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
|
if (auto const status = getChannelId(request, channelId); status)
|
||||||
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
|
return status;
|
||||||
|
|
||||||
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
|
auto optDrops =
|
||||||
|
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
|
||||||
|
|
||||||
if (!optDrops)
|
if (!optDrops)
|
||||||
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||||
@@ -67,7 +62,7 @@ doChannelAuthorize(Context const& context)
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto const buf = ripple::sign(pk, sk, msg.slice());
|
auto const buf = ripple::sign(pk, sk, msg.slice());
|
||||||
response["signature"] = ripple::strHex(buf);
|
response[JS(signature)] = ripple::strHex(buf);
|
||||||
}
|
}
|
||||||
catch (std::exception&)
|
catch (std::exception&)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -16,33 +16,28 @@ doChannelVerify(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
if (!request.contains("channel_id"))
|
if (!request.contains(JS(amount)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
|
|
||||||
|
|
||||||
if (!request.at("channel_id").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
|
|
||||||
|
|
||||||
if (!request.contains("amount"))
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
|
||||||
|
|
||||||
if (!request.at("amount").is_string())
|
if (!request.at(JS(amount)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
|
||||||
|
|
||||||
if (!request.contains("signature"))
|
if (!request.contains(JS(signature)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingSignature"};
|
return Status{Error::rpcINVALID_PARAMS, "missingSignature"};
|
||||||
|
|
||||||
if (!request.at("signature").is_string())
|
if (!request.at(JS(signature)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "signatureNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "signatureNotString"};
|
||||||
|
|
||||||
if (!request.contains("public_key"))
|
if (!request.contains(JS(public_key)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingPublicKey"};
|
return Status{Error::rpcINVALID_PARAMS, "missingPublicKey"};
|
||||||
|
|
||||||
if (!request.at("public_key").is_string())
|
if (!request.at(JS(public_key)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "publicKeyNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "publicKeyNotString"};
|
||||||
|
|
||||||
std::optional<ripple::PublicKey> pk;
|
std::optional<ripple::PublicKey> pk;
|
||||||
{
|
{
|
||||||
std::string const strPk = request.at("public_key").as_string().c_str();
|
std::string const strPk =
|
||||||
|
request.at(JS(public_key)).as_string().c_str();
|
||||||
pk = ripple::parseBase58<ripple::PublicKey>(
|
pk = ripple::parseBase58<ripple::PublicKey>(
|
||||||
ripple::TokenType::AccountPublic, strPk);
|
ripple::TokenType::AccountPublic, strPk);
|
||||||
|
|
||||||
@@ -62,17 +57,18 @@ doChannelVerify(Context const& context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ripple::uint256 channelId;
|
ripple::uint256 channelId;
|
||||||
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
|
if (auto const status = getChannelId(request, channelId); status)
|
||||||
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
|
return status;
|
||||||
|
|
||||||
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
|
auto optDrops =
|
||||||
|
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
|
||||||
|
|
||||||
if (!optDrops)
|
if (!optDrops)
|
||||||
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
|
||||||
|
|
||||||
std::uint64_t drops = *optDrops;
|
std::uint64_t drops = *optDrops;
|
||||||
|
|
||||||
auto sig = ripple::strUnHex(request.at("signature").as_string().c_str());
|
auto sig = ripple::strUnHex(request.at(JS(signature)).as_string().c_str());
|
||||||
|
|
||||||
if (!sig || !sig->size())
|
if (!sig || !sig->size())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "invalidSignature"};
|
return Status{Error::rpcINVALID_PARAMS, "invalidSignature"};
|
||||||
@@ -81,7 +77,7 @@ doChannelVerify(Context const& context)
|
|||||||
ripple::serializePayChanAuthorization(
|
ripple::serializePayChanAuthorization(
|
||||||
msg, channelId, ripple::XRPAmount(drops));
|
msg, channelId, ripple::XRPAmount(drops));
|
||||||
|
|
||||||
response["signature_verified"] =
|
response[JS(signature_verified)] =
|
||||||
ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true);
|
ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true);
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
|
|||||||
@@ -9,17 +9,9 @@ doGatewayBalances(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
if (!request.contains("account"))
|
ripple::AccountID accountID;
|
||||||
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
if (!request.at("account").is_string())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
|
|
||||||
|
|
||||||
auto accountID =
|
|
||||||
accountFromStringStrict(request.at("account").as_string().c_str());
|
|
||||||
|
|
||||||
if (!accountID)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
auto v = ledgerInfoFromRequest(context);
|
auto v = ledgerInfoFromRequest(context);
|
||||||
if (auto status = std::get_if<Status>(&v))
|
if (auto status = std::get_if<Status>(&v))
|
||||||
@@ -81,7 +73,7 @@ doGatewayBalances(Context const& context)
|
|||||||
|
|
||||||
if (!valid)
|
if (!valid)
|
||||||
{
|
{
|
||||||
response["error"] = "invalidHotWallet";
|
response[JS(error)] = "invalidHotWallet";
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -148,7 +140,7 @@ doGatewayBalances(Context const& context)
|
|||||||
|
|
||||||
traverseOwnedNodes(
|
traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
std::numeric_limits<std::uint32_t>::max(),
|
std::numeric_limits<std::uint32_t>::max(),
|
||||||
{},
|
{},
|
||||||
@@ -162,7 +154,7 @@ doGatewayBalances(Context const& context)
|
|||||||
{
|
{
|
||||||
obj[ripple::to_string(k)] = v.getText();
|
obj[ripple::to_string(k)] = v.getText();
|
||||||
}
|
}
|
||||||
response["obligations"] = std::move(obj);
|
response[JS(obligations)] = std::move(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto toJson =
|
auto toJson =
|
||||||
@@ -177,9 +169,9 @@ doGatewayBalances(Context const& context)
|
|||||||
for (auto const& balance : accBalances)
|
for (auto const& balance : accBalances)
|
||||||
{
|
{
|
||||||
boost::json::object entry;
|
boost::json::object entry;
|
||||||
entry["currency"] =
|
entry[JS(currency)] =
|
||||||
ripple::to_string(balance.issue().currency);
|
ripple::to_string(balance.issue().currency);
|
||||||
entry["value"] = balance.getText();
|
entry[JS(value)] = balance.getText();
|
||||||
arr.push_back(std::move(entry));
|
arr.push_back(std::move(entry));
|
||||||
}
|
}
|
||||||
obj[ripple::to_string(accId)] = std::move(arr);
|
obj[ripple::to_string(accId)] = std::move(arr);
|
||||||
@@ -189,14 +181,14 @@ doGatewayBalances(Context const& context)
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (auto balances = toJson(hotBalances); balances.size())
|
if (auto balances = toJson(hotBalances); balances.size())
|
||||||
response["balances"] = balances;
|
response[JS(balances)] = balances;
|
||||||
if (auto balances = toJson(frozenBalances); balances.size())
|
if (auto balances = toJson(frozenBalances); balances.size())
|
||||||
response["frozen_balances"] = balances;
|
response[JS(frozen_balances)] = balances;
|
||||||
if (auto balances = toJson(assets); assets.size())
|
if (auto balances = toJson(assets); assets.size())
|
||||||
response["assets"] = toJson(assets);
|
response[JS(assets)] = toJson(assets);
|
||||||
response["account"] = request.at("account");
|
response[JS(account)] = request.at(JS(account));
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -10,30 +10,30 @@ doLedger(Context const& context)
|
|||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
bool binary = false;
|
bool binary = false;
|
||||||
if (params.contains("binary"))
|
if (params.contains(JS(binary)))
|
||||||
{
|
{
|
||||||
if (!params.at("binary").is_bool())
|
if (!params.at(JS(binary)).is_bool())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||||
|
|
||||||
binary = params.at("binary").as_bool();
|
binary = params.at(JS(binary)).as_bool();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool transactions = false;
|
bool transactions = false;
|
||||||
if (params.contains("transactions"))
|
if (params.contains(JS(transactions)))
|
||||||
{
|
{
|
||||||
if (!params.at("transactions").is_bool())
|
if (!params.at(JS(transactions)).is_bool())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "transactionsFlagNotBool"};
|
return Status{Error::rpcINVALID_PARAMS, "transactionsFlagNotBool"};
|
||||||
|
|
||||||
transactions = params.at("transactions").as_bool();
|
transactions = params.at(JS(transactions)).as_bool();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool expand = false;
|
bool expand = false;
|
||||||
if (params.contains("expand"))
|
if (params.contains(JS(expand)))
|
||||||
{
|
{
|
||||||
if (!params.at("expand").is_bool())
|
if (!params.at(JS(expand)).is_bool())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "expandFlagNotBool"};
|
return Status{Error::rpcINVALID_PARAMS, "expandFlagNotBool"};
|
||||||
|
|
||||||
expand = params.at("expand").as_bool();
|
expand = params.at(JS(expand)).as_bool();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool diff = false;
|
bool diff = false;
|
||||||
@@ -54,35 +54,34 @@ doLedger(Context const& context)
|
|||||||
boost::json::object header;
|
boost::json::object header;
|
||||||
if (binary)
|
if (binary)
|
||||||
{
|
{
|
||||||
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
header["accepted"] = true;
|
header[JS(accepted)] = true;
|
||||||
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
|
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
|
||||||
header["close_flags"] = lgrInfo.closeFlags;
|
header[JS(close_flags)] = lgrInfo.closeFlags;
|
||||||
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
header[JS(close_time)] = lgrInfo.closeTime.time_since_epoch().count();
|
||||||
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
|
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
|
||||||
;
|
header[JS(close_time_resolution)] = lgrInfo.closeTimeResolution.count();
|
||||||
header["close_time_resolution"] = lgrInfo.closeTimeResolution.count();
|
header[JS(closed)] = true;
|
||||||
header["closed"] = true;
|
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
header["hash"] = ripple::strHex(lgrInfo.hash);
|
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
|
||||||
header["ledger_index"] = std::to_string(lgrInfo.seq);
|
header[JS(parent_close_time)] =
|
||||||
header["parent_close_time"] =
|
|
||||||
lgrInfo.parentCloseTime.time_since_epoch().count();
|
lgrInfo.parentCloseTime.time_since_epoch().count();
|
||||||
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
|
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
|
||||||
header["seqNum"] = std::to_string(lgrInfo.seq);
|
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
|
||||||
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
|
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
|
||||||
header["total_coins"] = ripple::to_string(lgrInfo.drops);
|
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
|
||||||
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
|
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
|
||||||
}
|
}
|
||||||
header["closed"] = true;
|
header[JS(closed)] = true;
|
||||||
|
|
||||||
if (transactions)
|
if (transactions)
|
||||||
{
|
{
|
||||||
header["transactions"] = boost::json::value(boost::json::array_kind);
|
header[JS(transactions)] = boost::json::value(boost::json::array_kind);
|
||||||
boost::json::array& jsonTxs = header.at("transactions").as_array();
|
boost::json::array& jsonTxs = header.at(JS(transactions)).as_array();
|
||||||
if (expand)
|
if (expand)
|
||||||
{
|
{
|
||||||
auto txns = context.backend->fetchAllTransactionsInLedger(
|
auto txns = context.backend->fetchAllTransactionsInLedger(
|
||||||
@@ -98,14 +97,14 @@ doLedger(Context const& context)
|
|||||||
{
|
{
|
||||||
auto [txn, meta] = toExpandedJson(obj);
|
auto [txn, meta] = toExpandedJson(obj);
|
||||||
entry = txn;
|
entry = txn;
|
||||||
entry["metaData"] = meta;
|
entry[JS(metaData)] = meta;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
entry["tx_blob"] = ripple::strHex(obj.transaction);
|
entry[JS(tx_blob)] = ripple::strHex(obj.transaction);
|
||||||
entry["meta"] = ripple::strHex(obj.metadata);
|
entry[JS(meta)] = ripple::strHex(obj.metadata);
|
||||||
}
|
}
|
||||||
// entry["ledger_index"] = obj.ledgerSequence;
|
// entry[JS(ledger_index)] = obj.ledgerSequence;
|
||||||
return entry;
|
return entry;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -133,7 +132,7 @@ doLedger(Context const& context)
|
|||||||
for (auto const& obj : diff)
|
for (auto const& obj : diff)
|
||||||
{
|
{
|
||||||
boost::json::object entry;
|
boost::json::object entry;
|
||||||
entry["id"] = ripple::strHex(obj.key);
|
entry["object_id"] = ripple::strHex(obj.key);
|
||||||
if (binary)
|
if (binary)
|
||||||
entry["object"] = ripple::strHex(obj.blob);
|
entry["object"] = ripple::strHex(obj.blob);
|
||||||
else if (obj.blob.size())
|
else if (obj.blob.size())
|
||||||
@@ -149,9 +148,9 @@ doLedger(Context const& context)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response["ledger"] = header;
|
response[JS(ledger)] = header;
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -28,23 +28,15 @@ doLedgerData(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
bool binary = false;
|
bool const binary = getBool(request, "binary", false);
|
||||||
if (request.contains("binary"))
|
|
||||||
{
|
|
||||||
if (!request.at("binary").is_bool())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
|
||||||
|
|
||||||
binary = request.at("binary").as_bool();
|
std::uint32_t limit;
|
||||||
}
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
std::size_t limit = binary ? 2048 : 256;
|
if (!binary)
|
||||||
if (request.contains("limit"))
|
limit = std::clamp(limit, {1}, {256});
|
||||||
{
|
|
||||||
if (!request.at("limit").is_int64())
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "limitNotInteger"};
|
|
||||||
|
|
||||||
limit = boost::json::value_to<int>(request.at("limit"));
|
|
||||||
}
|
|
||||||
bool outOfOrder = false;
|
bool outOfOrder = false;
|
||||||
if (request.contains("out_of_order"))
|
if (request.contains("out_of_order"))
|
||||||
{
|
{
|
||||||
@@ -53,18 +45,18 @@ doLedgerData(Context const& context)
|
|||||||
outOfOrder = request.at("out_of_order").as_bool();
|
outOfOrder = request.at("out_of_order").as_bool();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ripple::uint256> cursor;
|
std::optional<ripple::uint256> marker;
|
||||||
std::optional<uint32_t> diffCursor;
|
std::optional<uint32_t> diffMarker;
|
||||||
if (request.contains("marker"))
|
if (request.contains(JS(marker)))
|
||||||
{
|
{
|
||||||
if (!request.at("marker").is_string())
|
if (!request.at(JS(marker)).is_string())
|
||||||
{
|
{
|
||||||
if (outOfOrder)
|
if (outOfOrder)
|
||||||
{
|
{
|
||||||
if (!request.at("marker").is_int64())
|
if (!request.at(JS(marker)).is_int64())
|
||||||
return Status{
|
return Status{
|
||||||
Error::rpcINVALID_PARAMS, "markerNotStringOrInt"};
|
Error::rpcINVALID_PARAMS, "markerNotStringOrInt"};
|
||||||
diffCursor = value_to<uint32_t>(request.at("marker"));
|
diffMarker = value_to<uint32_t>(request.at(JS(marker)));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
@@ -73,8 +65,8 @@ doLedgerData(Context const& context)
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker";
|
||||||
|
|
||||||
cursor = ripple::uint256{};
|
marker = ripple::uint256{};
|
||||||
if (!cursor->parseHex(request.at("marker").as_string().c_str()))
|
if (!marker->parseHex(request.at(JS(marker)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "markerMalformed"};
|
return Status{Error::rpcINVALID_PARAMS, "markerMalformed"};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,49 +76,58 @@ doLedgerData(Context const& context)
|
|||||||
return *status;
|
return *status;
|
||||||
|
|
||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
boost::json::object header;
|
boost::json::object header;
|
||||||
// no cursor means this is the first call, so we return header info
|
// no marker means this is the first call, so we return header info
|
||||||
if (!cursor)
|
if (!request.contains(JS(marker)))
|
||||||
{
|
{
|
||||||
if (binary)
|
if (binary)
|
||||||
{
|
{
|
||||||
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
header["accepted"] = true;
|
header[JS(accepted)] = true;
|
||||||
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
|
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
|
||||||
header["close_flags"] = lgrInfo.closeFlags;
|
header[JS(close_flags)] = lgrInfo.closeFlags;
|
||||||
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
header[JS(close_time)] =
|
||||||
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
|
lgrInfo.closeTime.time_since_epoch().count();
|
||||||
;
|
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
|
||||||
header["close_time_resolution"] =
|
header[JS(close_time_resolution)] =
|
||||||
lgrInfo.closeTimeResolution.count();
|
lgrInfo.closeTimeResolution.count();
|
||||||
header["closed"] = true;
|
header[JS(closed)] = true;
|
||||||
header["hash"] = ripple::strHex(lgrInfo.hash);
|
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
header["ledger_index"] = std::to_string(lgrInfo.seq);
|
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
|
||||||
header["parent_close_time"] =
|
header[JS(parent_close_time)] =
|
||||||
lgrInfo.parentCloseTime.time_since_epoch().count();
|
lgrInfo.parentCloseTime.time_since_epoch().count();
|
||||||
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
|
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
|
||||||
header["seqNum"] = std::to_string(lgrInfo.seq);
|
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
|
||||||
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
|
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
|
||||||
header["total_coins"] = ripple::to_string(lgrInfo.drops);
|
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
|
||||||
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
|
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
|
||||||
|
|
||||||
response["ledger"] = header;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
response[JS(ledger)] = header;
|
||||||
}
|
}
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
else
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
{
|
||||||
|
if (!outOfOrder &&
|
||||||
|
!context.backend->fetchLedgerObject(
|
||||||
|
*marker, lgrInfo.seq, context.yield))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "markerDoesNotExist"};
|
||||||
|
}
|
||||||
|
|
||||||
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
std::vector<Backend::LedgerObject> results;
|
std::vector<Backend::LedgerObject> results;
|
||||||
if (diffCursor)
|
if (diffMarker)
|
||||||
{
|
{
|
||||||
assert(outOfOrder);
|
assert(outOfOrder);
|
||||||
auto diff =
|
auto diff =
|
||||||
context.backend->fetchLedgerDiff(*diffCursor, context.yield);
|
context.backend->fetchLedgerDiff(*diffMarker, context.yield);
|
||||||
std::vector<ripple::uint256> keys;
|
std::vector<ripple::uint256> keys;
|
||||||
for (auto&& [key, object] : diff)
|
for (auto&& [key, object] : diff)
|
||||||
{
|
{
|
||||||
@@ -143,13 +144,13 @@ doLedgerData(Context const& context)
|
|||||||
if (obj.size())
|
if (obj.size())
|
||||||
results.push_back({std::move(keys[i]), std::move(obj)});
|
results.push_back({std::move(keys[i]), std::move(obj)});
|
||||||
}
|
}
|
||||||
if (*diffCursor > lgrInfo.seq)
|
if (*diffMarker > lgrInfo.seq)
|
||||||
response["marker"] = *diffCursor - 1;
|
response["marker"] = *diffMarker - 1;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto page = context.backend->fetchLedgerPage(
|
auto page = context.backend->fetchLedgerPage(
|
||||||
cursor, lgrInfo.seq, limit, outOfOrder, context.yield);
|
marker, lgrInfo.seq, limit, outOfOrder, context.yield);
|
||||||
results = std::move(page.objects);
|
results = std::move(page.objects);
|
||||||
if (page.cursor)
|
if (page.cursor)
|
||||||
response["marker"] = ripple::strHex(*(page.cursor));
|
response["marker"] = ripple::strHex(*(page.cursor));
|
||||||
@@ -175,14 +176,14 @@ doLedgerData(Context const& context)
|
|||||||
if (binary)
|
if (binary)
|
||||||
{
|
{
|
||||||
boost::json::object entry;
|
boost::json::object entry;
|
||||||
entry["data"] = ripple::serializeHex(sle);
|
entry[JS(data)] = ripple::serializeHex(sle);
|
||||||
entry["index"] = ripple::to_string(sle.key());
|
entry[JS(index)] = ripple::to_string(sle.key());
|
||||||
objects.push_back(std::move(entry));
|
objects.push_back(std::move(entry));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
objects.push_back(toJson(sle));
|
objects.push_back(toJson(sle));
|
||||||
}
|
}
|
||||||
response["state"] = std::move(objects);
|
response[JS(state)] = std::move(objects);
|
||||||
auto end2 = std::chrono::system_clock::now();
|
auto end2 = std::chrono::system_clock::now();
|
||||||
|
|
||||||
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)
|
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)
|
||||||
|
|||||||
@@ -20,8 +20,7 @@ doLedgerEntry(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
bool binary =
|
bool const binary = getBool(request, "binary", false);
|
||||||
request.contains("binary") ? request.at("binary").as_bool() : false;
|
|
||||||
|
|
||||||
auto v = ledgerInfoFromRequest(context);
|
auto v = ledgerInfoFromRequest(context);
|
||||||
if (auto status = std::get_if<Status>(&v))
|
if (auto status = std::get_if<Status>(&v))
|
||||||
@@ -30,59 +29,64 @@ doLedgerEntry(Context const& context)
|
|||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
ripple::uint256 key;
|
ripple::uint256 key;
|
||||||
if (request.contains("index"))
|
if (request.contains(JS(index)))
|
||||||
{
|
{
|
||||||
if (!request.at("index").is_string())
|
if (!request.at(JS(index)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "indexNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "indexNotString"};
|
||||||
|
|
||||||
if (!key.parseHex(request.at("index").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(index)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedIndex"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedIndex"};
|
||||||
}
|
}
|
||||||
else if (request.contains("account_root"))
|
else if (request.contains(JS(account_root)))
|
||||||
{
|
{
|
||||||
if (!request.at("account_root").is_string())
|
if (!request.at(JS(account_root)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "account_rootNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "account_rootNotString"};
|
||||||
|
|
||||||
auto const account = ripple::parseBase58<ripple::AccountID>(
|
auto const account = ripple::parseBase58<ripple::AccountID>(
|
||||||
request.at("account_root").as_string().c_str());
|
request.at(JS(account_root)).as_string().c_str());
|
||||||
if (!account || account->isZero())
|
if (!account || account->isZero())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAddress"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedAddress"};
|
||||||
else
|
else
|
||||||
key = ripple::keylet::account(*account).key;
|
key = ripple::keylet::account(*account).key;
|
||||||
}
|
}
|
||||||
else if (request.contains("check"))
|
else if (request.contains(JS(check)))
|
||||||
{
|
{
|
||||||
if (!request.at("check").is_string())
|
if (!request.at(JS(check)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "checkNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "checkNotString"};
|
||||||
|
|
||||||
if (!key.parseHex(request.at("check").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(check)).as_string().c_str()))
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "checkMalformed"};
|
return Status{Error::rpcINVALID_PARAMS, "checkMalformed"};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (request.contains("deposit_preauth"))
|
else if (request.contains(JS(deposit_preauth)))
|
||||||
{
|
{
|
||||||
if (!request.at("deposit_preauth").is_object())
|
if (!request.at(JS(deposit_preauth)).is_object())
|
||||||
{
|
{
|
||||||
if (!request.at("deposit_preauth").is_string() ||
|
if (!request.at(JS(deposit_preauth)).is_string() ||
|
||||||
!key.parseHex(
|
!key.parseHex(
|
||||||
request.at("deposit_preauth").as_string().c_str()))
|
request.at(JS(deposit_preauth)).as_string().c_str()))
|
||||||
{
|
{
|
||||||
return Status{
|
return Status{
|
||||||
Error::rpcINVALID_PARAMS, "deposit_preauthMalformed"};
|
Error::rpcINVALID_PARAMS, "deposit_preauthMalformed"};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("deposit_preauth").as_object().contains("owner") ||
|
!request.at(JS(deposit_preauth)).as_object().contains(JS(owner)) ||
|
||||||
!request.at("deposit_preauth").as_object().at("owner").is_string())
|
!request.at(JS(deposit_preauth))
|
||||||
|
.as_object()
|
||||||
|
.at(JS(owner))
|
||||||
|
.is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ownerNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "ownerNotString"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("deposit_preauth").as_object().contains("authorized") ||
|
!request.at(JS(deposit_preauth))
|
||||||
!request.at("deposit_preauth")
|
|
||||||
.as_object()
|
.as_object()
|
||||||
.at("authorized")
|
.contains(JS(authorized)) ||
|
||||||
|
!request.at(JS(deposit_preauth))
|
||||||
|
.as_object()
|
||||||
|
.at(JS(authorized))
|
||||||
.is_string())
|
.is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "authorizedNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "authorizedNotString"};
|
||||||
@@ -90,13 +94,13 @@ doLedgerEntry(Context const& context)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
boost::json::object const& deposit_preauth =
|
boost::json::object const& deposit_preauth =
|
||||||
request.at("deposit_preauth").as_object();
|
request.at(JS(deposit_preauth)).as_object();
|
||||||
|
|
||||||
auto const owner = ripple::parseBase58<ripple::AccountID>(
|
auto const owner = ripple::parseBase58<ripple::AccountID>(
|
||||||
deposit_preauth.at("owner").as_string().c_str());
|
deposit_preauth.at(JS(owner)).as_string().c_str());
|
||||||
|
|
||||||
auto const authorized = ripple::parseBase58<ripple::AccountID>(
|
auto const authorized = ripple::parseBase58<ripple::AccountID>(
|
||||||
deposit_preauth.at("authorized").as_string().c_str());
|
deposit_preauth.at(JS(authorized)).as_string().c_str());
|
||||||
|
|
||||||
if (!owner)
|
if (!owner)
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
||||||
@@ -106,37 +110,37 @@ doLedgerEntry(Context const& context)
|
|||||||
key = ripple::keylet::depositPreauth(*owner, *authorized).key;
|
key = ripple::keylet::depositPreauth(*owner, *authorized).key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (request.contains("directory"))
|
else if (request.contains(JS(directory)))
|
||||||
{
|
{
|
||||||
if (!request.at("directory").is_object())
|
if (!request.at(JS(directory)).is_object())
|
||||||
{
|
{
|
||||||
if (!request.at("directory").is_string())
|
if (!request.at(JS(directory)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "directoryNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "directoryNotString"};
|
||||||
|
|
||||||
if (!key.parseHex(request.at("directory").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(directory)).as_string().c_str()))
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedDirectory"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedDirectory"};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
request.at("directory").as_object().contains("sub_index") &&
|
request.at(JS(directory)).as_object().contains(JS(sub_index)) &&
|
||||||
!request.at("directory").as_object().at("sub_index").is_int64())
|
!request.at(JS(directory)).as_object().at(JS(sub_index)).is_int64())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "sub_indexNotInt"};
|
return Status{Error::rpcINVALID_PARAMS, "sub_indexNotInt"};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto directory = request.at("directory").as_object();
|
auto directory = request.at(JS(directory)).as_object();
|
||||||
std::uint64_t subIndex = directory.contains("sub_index")
|
std::uint64_t subIndex = directory.contains(JS(sub_index))
|
||||||
? boost::json::value_to<std::uint64_t>(
|
? boost::json::value_to<std::uint64_t>(
|
||||||
directory.at("sub_index"))
|
directory.at(JS(sub_index)))
|
||||||
: 0;
|
: 0;
|
||||||
|
|
||||||
if (directory.contains("dir_root"))
|
if (directory.contains(JS(dir_root)))
|
||||||
{
|
{
|
||||||
ripple::uint256 uDirRoot;
|
ripple::uint256 uDirRoot;
|
||||||
|
|
||||||
if (directory.contains("owner"))
|
if (directory.contains(JS(owner)))
|
||||||
{
|
{
|
||||||
// May not specify both dir_root and owner.
|
// May not specify both dir_root and owner.
|
||||||
return Status{
|
return Status{
|
||||||
@@ -144,7 +148,7 @@ doLedgerEntry(Context const& context)
|
|||||||
"mayNotSpecifyBothDirRootAndOwner"};
|
"mayNotSpecifyBothDirRootAndOwner"};
|
||||||
}
|
}
|
||||||
else if (!uDirRoot.parseHex(
|
else if (!uDirRoot.parseHex(
|
||||||
directory.at("dir_root").as_string().c_str()))
|
directory.at(JS(dir_root)).as_string().c_str()))
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedDirRoot"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedDirRoot"};
|
||||||
}
|
}
|
||||||
@@ -153,10 +157,10 @@ doLedgerEntry(Context const& context)
|
|||||||
key = ripple::keylet::page(uDirRoot, subIndex).key;
|
key = ripple::keylet::page(uDirRoot, subIndex).key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (directory.contains("owner"))
|
else if (directory.contains(JS(owner)))
|
||||||
{
|
{
|
||||||
auto const ownerID = ripple::parseBase58<ripple::AccountID>(
|
auto const ownerID = ripple::parseBase58<ripple::AccountID>(
|
||||||
directory.at("owner").as_string().c_str());
|
directory.at(JS(owner)).as_string().c_str());
|
||||||
|
|
||||||
if (!ownerID)
|
if (!ownerID)
|
||||||
{
|
{
|
||||||
@@ -176,31 +180,31 @@ doLedgerEntry(Context const& context)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (request.contains("escrow"))
|
else if (request.contains(JS(escrow)))
|
||||||
{
|
{
|
||||||
if (!request.at("escrow").is_object())
|
if (!request.at(JS(escrow)).is_object())
|
||||||
{
|
{
|
||||||
if (!key.parseHex(request.at("escrow").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(escrow)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedEscrow"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedEscrow"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("escrow").as_object().contains("owner") ||
|
!request.at(JS(escrow)).as_object().contains(JS(owner)) ||
|
||||||
!request.at("escrow").as_object().at("owner").is_string())
|
!request.at(JS(escrow)).as_object().at(JS(owner)).is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("escrow").as_object().contains("seq") ||
|
!request.at(JS(escrow)).as_object().contains(JS(seq)) ||
|
||||||
!request.at("escrow").as_object().at("seq").is_int64())
|
!request.at(JS(escrow)).as_object().at(JS(seq)).is_int64())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto const id =
|
auto const id =
|
||||||
ripple::parseBase58<ripple::AccountID>(request.at("escrow")
|
ripple::parseBase58<ripple::AccountID>(request.at(JS(escrow))
|
||||||
.as_object()
|
.as_object()
|
||||||
.at("owner")
|
.at(JS(owner))
|
||||||
.as_string()
|
.as_string()
|
||||||
.c_str());
|
.c_str());
|
||||||
|
|
||||||
@@ -209,120 +213,122 @@ doLedgerEntry(Context const& context)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::uint32_t seq =
|
std::uint32_t seq =
|
||||||
request.at("escrow").as_object().at("seq").as_int64();
|
request.at(JS(escrow)).as_object().at(JS(seq)).as_int64();
|
||||||
key = ripple::keylet::escrow(*id, seq).key;
|
key = ripple::keylet::escrow(*id, seq).key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (request.contains("offer"))
|
else if (request.contains(JS(offer)))
|
||||||
{
|
{
|
||||||
if (!request.at("offer").is_object())
|
if (!request.at(JS(offer)).is_object())
|
||||||
{
|
{
|
||||||
if (!key.parseHex(request.at("offer").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(offer)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedOffer"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedOffer"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("offer").as_object().contains("account") ||
|
!request.at(JS(offer)).as_object().contains(JS(account)) ||
|
||||||
!request.at("offer").as_object().at("account").is_string())
|
!request.at(JS(offer)).as_object().at(JS(account)).is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("offer").as_object().contains("seq") ||
|
!request.at(JS(offer)).as_object().contains(JS(seq)) ||
|
||||||
!request.at("offer").as_object().at("seq").is_int64())
|
!request.at(JS(offer)).as_object().at(JS(seq)).is_int64())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto offer = request.at("offer").as_object();
|
auto offer = request.at(JS(offer)).as_object();
|
||||||
auto const id = ripple::parseBase58<ripple::AccountID>(
|
auto const id = ripple::parseBase58<ripple::AccountID>(
|
||||||
offer.at("account").as_string().c_str());
|
offer.at(JS(account)).as_string().c_str());
|
||||||
|
|
||||||
if (!id)
|
if (!id)
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::uint32_t seq =
|
std::uint32_t seq =
|
||||||
boost::json::value_to<std::uint32_t>(offer.at("seq"));
|
boost::json::value_to<std::uint32_t>(offer.at(JS(seq)));
|
||||||
key = ripple::keylet::offer(*id, seq).key;
|
key = ripple::keylet::offer(*id, seq).key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (request.contains("payment_channel"))
|
else if (request.contains(JS(payment_channel)))
|
||||||
{
|
{
|
||||||
if (!request.at("payment_channel").is_string())
|
if (!request.at(JS(payment_channel)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "paymentChannelNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "paymentChannelNotString"};
|
||||||
|
|
||||||
if (!key.parseHex(request.at("payment_channel").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(payment_channel)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedPaymentChannel"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedPaymentChannel"};
|
||||||
}
|
}
|
||||||
else if (request.contains("ripple_state"))
|
else if (request.contains(JS(ripple_state)))
|
||||||
{
|
{
|
||||||
if (!request.at("ripple_state").is_object())
|
if (!request.at(JS(ripple_state)).is_object())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "rippleStateNotObject"};
|
return Status{Error::rpcINVALID_PARAMS, "rippleStateNotObject"};
|
||||||
|
|
||||||
ripple::Currency currency;
|
ripple::Currency currency;
|
||||||
boost::json::object const& state =
|
boost::json::object const& state =
|
||||||
request.at("ripple_state").as_object();
|
request.at(JS(ripple_state)).as_object();
|
||||||
|
|
||||||
if (!state.contains("currency") || !state.at("currency").is_string())
|
if (!state.contains(JS(currency)) ||
|
||||||
|
!state.at(JS(currency)).is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!state.contains("accounts") || !state.at("accounts").is_array() ||
|
if (!state.contains(JS(accounts)) ||
|
||||||
2 != state.at("accounts").as_array().size() ||
|
!state.at(JS(accounts)).is_array() ||
|
||||||
!state.at("accounts").as_array().at(0).is_string() ||
|
2 != state.at(JS(accounts)).as_array().size() ||
|
||||||
!state.at("accounts").as_array().at(1).is_string() ||
|
!state.at(JS(accounts)).as_array().at(0).is_string() ||
|
||||||
(state.at("accounts").as_array().at(0).as_string() ==
|
!state.at(JS(accounts)).as_array().at(1).is_string() ||
|
||||||
state.at("accounts").as_array().at(1).as_string()))
|
(state.at(JS(accounts)).as_array().at(0).as_string() ==
|
||||||
|
state.at(JS(accounts)).as_array().at(1).as_string()))
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const id1 = ripple::parseBase58<ripple::AccountID>(
|
auto const id1 = ripple::parseBase58<ripple::AccountID>(
|
||||||
state.at("accounts").as_array().at(0).as_string().c_str());
|
state.at(JS(accounts)).as_array().at(0).as_string().c_str());
|
||||||
auto const id2 = ripple::parseBase58<ripple::AccountID>(
|
auto const id2 = ripple::parseBase58<ripple::AccountID>(
|
||||||
state.at("accounts").as_array().at(1).as_string().c_str());
|
state.at(JS(accounts)).as_array().at(1).as_string().c_str());
|
||||||
|
|
||||||
if (!id1 || !id2)
|
if (!id1 || !id2)
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
|
||||||
|
|
||||||
else if (!ripple::to_currency(
|
else if (!ripple::to_currency(
|
||||||
currency, state.at("currency").as_string().c_str()))
|
currency, state.at(JS(currency)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
|
||||||
|
|
||||||
key = ripple::keylet::line(*id1, *id2, currency).key;
|
key = ripple::keylet::line(*id1, *id2, currency).key;
|
||||||
}
|
}
|
||||||
else if (request.contains("ticket"))
|
else if (request.contains(JS(ticket)))
|
||||||
{
|
{
|
||||||
if (!request.at("ticket").is_object())
|
if (!request.at(JS(ticket)).is_object())
|
||||||
{
|
{
|
||||||
if (!request.at("ticket").is_string())
|
if (!request.at(JS(ticket)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "ticketNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "ticketNotString"};
|
||||||
|
|
||||||
if (!key.parseHex(request.at("ticket").as_string().c_str()))
|
if (!key.parseHex(request.at(JS(ticket)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicket"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedTicket"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("ticket").as_object().contains("account") ||
|
!request.at(JS(ticket)).as_object().contains(JS(account)) ||
|
||||||
!request.at("ticket").as_object().at("account").is_string())
|
!request.at(JS(ticket)).as_object().at(JS(account)).is_string())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
||||||
}
|
}
|
||||||
else if (
|
else if (
|
||||||
!request.at("ticket").as_object().contains("ticket_seq") ||
|
!request.at(JS(ticket)).as_object().contains(JS(ticket_seq)) ||
|
||||||
!request.at("ticket").as_object().at("ticket_seq").is_int64())
|
!request.at(JS(ticket)).as_object().at(JS(ticket_seq)).is_int64())
|
||||||
{
|
{
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedTicketSeq"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedTicketSeq"};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto const id =
|
auto const id =
|
||||||
ripple::parseBase58<ripple::AccountID>(request.at("ticket")
|
ripple::parseBase58<ripple::AccountID>(request.at(JS(ticket))
|
||||||
.as_object()
|
.as_object()
|
||||||
.at("account")
|
.at(JS(account))
|
||||||
.as_string()
|
.as_string()
|
||||||
.c_str());
|
.c_str());
|
||||||
|
|
||||||
@@ -331,8 +337,10 @@ doLedgerEntry(Context const& context)
|
|||||||
Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::uint32_t seq =
|
std::uint32_t seq = request.at(JS(offer))
|
||||||
request.at("offer").as_object().at("ticket_seq").as_int64();
|
.as_object()
|
||||||
|
.at(JS(ticket_seq))
|
||||||
|
.as_int64();
|
||||||
|
|
||||||
key = ripple::getTicketIndex(*id, seq);
|
key = ripple::getTicketIndex(*id, seq);
|
||||||
}
|
}
|
||||||
@@ -343,27 +351,25 @@ doLedgerEntry(Context const& context)
|
|||||||
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
|
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
auto dbResponse =
|
auto dbResponse =
|
||||||
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
|
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
if (!dbResponse or dbResponse->size() == 0)
|
if (!dbResponse or dbResponse->size() == 0)
|
||||||
return Status{Error::rpcOBJECT_NOT_FOUND, "entryNotFound"};
|
return Status{"entryNotFound"};
|
||||||
|
|
||||||
response["index"] = ripple::strHex(key);
|
response[JS(index)] = ripple::strHex(key);
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
|
|
||||||
if (binary)
|
if (binary)
|
||||||
{
|
{
|
||||||
response["node_binary"] = ripple::strHex(*dbResponse);
|
response[JS(node_binary)] = ripple::strHex(*dbResponse);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
ripple::STLedgerEntry sle{
|
ripple::STLedgerEntry sle{
|
||||||
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
|
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
|
||||||
response["node"] = toJson(sle);
|
response[JS(node)] = toJson(sle);
|
||||||
}
|
}
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
|
|||||||
@@ -16,11 +16,11 @@ doLedgerRange(Context const& context)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
response["ledger_index_min"] = range->minSequence;
|
response[JS(ledger_index_min)] = range->minSequence;
|
||||||
response["ledger_index_max"] = range->maxSequence;
|
response[JS(ledger_index_max)] = range->maxSequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
146
src/rpc/handlers/NFTInfo.cpp
Normal file
146
src/rpc/handlers/NFTInfo.cpp
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||||
|
#include <ripple/protocol/Indexes.h>
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
|
||||||
|
#include <backend/BackendInterface.h>
|
||||||
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
|
// {
|
||||||
|
// nft_id: <ident>
|
||||||
|
// ledger_hash: <ledger>
|
||||||
|
// ledger_index: <ledger_index>
|
||||||
|
// }
|
||||||
|
|
||||||
|
namespace RPC {
|
||||||
|
|
||||||
|
std::variant<std::monostate, std::string, Status>
|
||||||
|
getURI(Backend::NFT const& dbResponse, Context const& context)
|
||||||
|
{
|
||||||
|
// Fetch URI from ledger
|
||||||
|
// The correct page will be > bookmark and <= last. We need to calculate
|
||||||
|
// the first possible page however, since bookmark is not guaranteed to
|
||||||
|
// exist.
|
||||||
|
auto const bookmark = ripple::keylet::nftpage(
|
||||||
|
ripple::keylet::nftpage_min(dbResponse.owner), dbResponse.tokenID);
|
||||||
|
auto const last = ripple::keylet::nftpage_max(dbResponse.owner);
|
||||||
|
|
||||||
|
ripple::uint256 nextKey = last.key;
|
||||||
|
std::optional<ripple::STLedgerEntry> sle;
|
||||||
|
|
||||||
|
// when this loop terminates, `sle` will contain the correct page for
|
||||||
|
// this NFT.
|
||||||
|
//
|
||||||
|
// 1) We start at the last NFTokenPage, which is guaranteed to exist,
|
||||||
|
// grab the object from the DB and deserialize it.
|
||||||
|
//
|
||||||
|
// 2) If that NFTokenPage has a PreviousPageMin value and the
|
||||||
|
// PreviousPageMin value is > bookmark, restart loop. Otherwise
|
||||||
|
// terminate and use the `sle` from this iteration.
|
||||||
|
do
|
||||||
|
{
|
||||||
|
auto const blob = context.backend->fetchLedgerObject(
|
||||||
|
ripple::Keylet(ripple::ltNFTOKEN_PAGE, nextKey).key,
|
||||||
|
dbResponse.ledgerSequence,
|
||||||
|
context.yield);
|
||||||
|
|
||||||
|
if (!blob || blob->size() == 0)
|
||||||
|
return Status{
|
||||||
|
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
|
||||||
|
|
||||||
|
sle = ripple::STLedgerEntry(
|
||||||
|
ripple::SerialIter{blob->data(), blob->size()}, nextKey);
|
||||||
|
|
||||||
|
if (sle->isFieldPresent(ripple::sfPreviousPageMin))
|
||||||
|
nextKey = sle->getFieldH256(ripple::sfPreviousPageMin);
|
||||||
|
|
||||||
|
} while (sle && sle->key() != nextKey && nextKey > bookmark.key);
|
||||||
|
|
||||||
|
if (!sle)
|
||||||
|
return Status{
|
||||||
|
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
|
||||||
|
|
||||||
|
auto const nfts = sle->getFieldArray(ripple::sfNFTokens);
|
||||||
|
auto const nft = std::find_if(
|
||||||
|
nfts.begin(),
|
||||||
|
nfts.end(),
|
||||||
|
[&dbResponse](ripple::STObject const& candidate) {
|
||||||
|
return candidate.getFieldH256(ripple::sfNFTokenID) ==
|
||||||
|
dbResponse.tokenID;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (nft == nfts.end())
|
||||||
|
return Status{
|
||||||
|
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
|
||||||
|
|
||||||
|
ripple::Blob const uriField = nft->getFieldVL(ripple::sfURI);
|
||||||
|
|
||||||
|
// NOTE this cannot use a ternary or value_or because then the
|
||||||
|
// expression's type is unclear. We want to explicitly set the `uri`
|
||||||
|
// field to null when not present to avoid any confusion.
|
||||||
|
if (std::string const uri = std::string(uriField.begin(), uriField.end());
|
||||||
|
uri.size() > 0)
|
||||||
|
return uri;
|
||||||
|
return std::monostate{};
|
||||||
|
}
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTInfo(Context const& context)
|
||||||
|
{
|
||||||
|
auto request = context.params;
|
||||||
|
boost::json::object response = {};
|
||||||
|
|
||||||
|
if (!request.contains("nft_id"))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "Missing nft_id"};
|
||||||
|
|
||||||
|
auto const& jsonTokenID = request.at("nft_id");
|
||||||
|
if (!jsonTokenID.is_string())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "nft_id is not a string"};
|
||||||
|
|
||||||
|
ripple::uint256 tokenID;
|
||||||
|
if (!tokenID.parseHex(jsonTokenID.as_string().c_str()))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "Malformed nft_id"};
|
||||||
|
|
||||||
|
// We only need to fetch the ledger header because the ledger hash is
|
||||||
|
// supposed to be included in the response. The ledger sequence is specified
|
||||||
|
// in the request
|
||||||
|
auto v = ledgerInfoFromRequest(context);
|
||||||
|
if (auto status = std::get_if<Status>(&v))
|
||||||
|
return *status;
|
||||||
|
ripple::LedgerInfo lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
|
std::optional<Backend::NFT> dbResponse =
|
||||||
|
context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield);
|
||||||
|
if (!dbResponse)
|
||||||
|
return Status{Error::rpcOBJECT_NOT_FOUND, "NFT not found"};
|
||||||
|
|
||||||
|
response["nft_id"] = ripple::strHex(dbResponse->tokenID);
|
||||||
|
response["ledger_index"] = dbResponse->ledgerSequence;
|
||||||
|
response["owner"] = ripple::toBase58(dbResponse->owner);
|
||||||
|
response["is_burned"] = dbResponse->isBurned;
|
||||||
|
|
||||||
|
response["flags"] = ripple::nft::getFlags(dbResponse->tokenID);
|
||||||
|
response["transfer_fee"] = ripple::nft::getTransferFee(dbResponse->tokenID);
|
||||||
|
response["issuer"] =
|
||||||
|
ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID));
|
||||||
|
response["nft_taxon"] =
|
||||||
|
ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID));
|
||||||
|
response["nft_sequence"] = ripple::nft::getSerial(dbResponse->tokenID);
|
||||||
|
|
||||||
|
if (!dbResponse->isBurned)
|
||||||
|
{
|
||||||
|
auto const maybeURI = getURI(*dbResponse, context);
|
||||||
|
// An error occurred
|
||||||
|
if (Status const* status = std::get_if<Status>(&maybeURI))
|
||||||
|
return *status;
|
||||||
|
// A URI was found
|
||||||
|
if (std::string const* uri = std::get_if<std::string>(&maybeURI))
|
||||||
|
response["uri"] = *uri;
|
||||||
|
// A URI was not found, explicitly set to null
|
||||||
|
else
|
||||||
|
response["uri"] = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace RPC
|
||||||
178
src/rpc/handlers/NFTOffers.cpp
Normal file
178
src/rpc/handlers/NFTOffers.cpp
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
#include <ripple/app/ledger/Ledger.h>
|
||||||
|
#include <ripple/basics/StringUtilities.h>
|
||||||
|
#include <ripple/protocol/ErrorCodes.h>
|
||||||
|
#include <ripple/protocol/Indexes.h>
|
||||||
|
#include <ripple/protocol/STLedgerEntry.h>
|
||||||
|
#include <ripple/protocol/jss.h>
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
|
namespace RPC {
|
||||||
|
|
||||||
|
static void
|
||||||
|
appendNftOfferJson(ripple::SLE const& offer, boost::json::array& offers)
|
||||||
|
{
|
||||||
|
offers.push_back(boost::json::object_kind);
|
||||||
|
boost::json::object& obj(offers.back().as_object());
|
||||||
|
|
||||||
|
obj[JS(index)] = ripple::to_string(offer.key());
|
||||||
|
obj[JS(flags)] = (offer)[ripple::sfFlags];
|
||||||
|
obj[JS(owner)] = ripple::toBase58(offer.getAccountID(ripple::sfOwner));
|
||||||
|
|
||||||
|
if (offer.isFieldPresent(ripple::sfDestination))
|
||||||
|
obj[JS(destination)] =
|
||||||
|
ripple::toBase58(offer.getAccountID(ripple::sfDestination));
|
||||||
|
|
||||||
|
if (offer.isFieldPresent(ripple::sfExpiration))
|
||||||
|
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
|
||||||
|
|
||||||
|
obj[JS(amount)] = toBoostJson(offer.getFieldAmount(ripple::sfAmount)
|
||||||
|
.getJson(ripple::JsonOptions::none));
|
||||||
|
}
|
||||||
|
|
||||||
|
static Result
|
||||||
|
enumerateNFTOffers(
|
||||||
|
Context const& context,
|
||||||
|
ripple::uint256 const& tokenid,
|
||||||
|
ripple::Keylet const& directory)
|
||||||
|
{
|
||||||
|
auto const& request = context.params;
|
||||||
|
|
||||||
|
auto v = ledgerInfoFromRequest(context);
|
||||||
|
if (auto status = std::get_if<Status>(&v))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
|
// TODO: just check for existence without pulling
|
||||||
|
if (!context.backend->fetchLedgerObject(
|
||||||
|
directory.key, lgrInfo.seq, context.yield))
|
||||||
|
return Status{Error::rpcOBJECT_NOT_FOUND, "notFound"};
|
||||||
|
|
||||||
|
std::uint32_t limit;
|
||||||
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
boost::json::object response = {};
|
||||||
|
response[JS(nft_id)] = ripple::to_string(tokenid);
|
||||||
|
response[JS(offers)] = boost::json::value(boost::json::array_kind);
|
||||||
|
|
||||||
|
auto& jsonOffers = response[JS(offers)].as_array();
|
||||||
|
|
||||||
|
std::vector<ripple::SLE> offers;
|
||||||
|
std::uint64_t reserve(limit);
|
||||||
|
ripple::uint256 cursor;
|
||||||
|
|
||||||
|
if (request.contains(JS(marker)))
|
||||||
|
{
|
||||||
|
// We have a start point. Use limit - 1 from the result and use the
|
||||||
|
// very last one for the resume.
|
||||||
|
auto const& marker(request.at(JS(marker)));
|
||||||
|
|
||||||
|
if (!marker.is_string())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
|
||||||
|
|
||||||
|
if (!cursor.parseHex(marker.as_string().c_str()))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
|
||||||
|
|
||||||
|
auto const sle =
|
||||||
|
read(ripple::keylet::nftoffer(cursor), lgrInfo, context);
|
||||||
|
|
||||||
|
if (!sle || tokenid != sle->getFieldH256(ripple::sfNFTokenID))
|
||||||
|
return Status{Error::rpcOBJECT_NOT_FOUND, "notFound"};
|
||||||
|
|
||||||
|
if (tokenid != sle->getFieldH256(ripple::sfNFTokenID))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "invalidTokenid"};
|
||||||
|
|
||||||
|
appendNftOfferJson(*sle, jsonOffers);
|
||||||
|
offers.reserve(reserve);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// We have no start point, limit should be one higher than requested.
|
||||||
|
offers.reserve(++reserve);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto result = traverseOwnedNodes(
|
||||||
|
*context.backend,
|
||||||
|
directory,
|
||||||
|
cursor,
|
||||||
|
0,
|
||||||
|
lgrInfo.seq,
|
||||||
|
limit,
|
||||||
|
{},
|
||||||
|
context.yield,
|
||||||
|
[&offers](ripple::SLE const& offer) {
|
||||||
|
if (offer.getType() == ripple::ltNFTOKEN_OFFER)
|
||||||
|
{
|
||||||
|
offers.emplace_back(offer);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (auto status = std::get_if<RPC::Status>(&result))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
if (offers.size() == reserve)
|
||||||
|
{
|
||||||
|
response[JS(limit)] = limit;
|
||||||
|
response[JS(marker)] = to_string(offers.back().key());
|
||||||
|
offers.pop_back();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto const& offer : offers)
|
||||||
|
appendNftOfferJson(offer, jsonOffers);
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::variant<ripple::uint256, Status>
|
||||||
|
getTokenid(boost::json::object const& request)
|
||||||
|
{
|
||||||
|
if (!request.contains(JS(nft_id)))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "missingTokenid"};
|
||||||
|
|
||||||
|
if (!request.at(JS(nft_id)).is_string())
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "tokenidNotString"};
|
||||||
|
|
||||||
|
ripple::uint256 tokenid;
|
||||||
|
if (!tokenid.parseHex(request.at(JS(nft_id)).as_string().c_str()))
|
||||||
|
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
|
||||||
|
|
||||||
|
return tokenid;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTOffers(Context const& context, bool sells)
|
||||||
|
{
|
||||||
|
auto const v = getTokenid(context.params);
|
||||||
|
if (auto const status = std::get_if<Status>(&v))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
auto const getKeylet = [sells, &v]() {
|
||||||
|
if (sells)
|
||||||
|
return ripple::keylet::nft_sells(std::get<ripple::uint256>(v));
|
||||||
|
|
||||||
|
return ripple::keylet::nft_buys(std::get<ripple::uint256>(v));
|
||||||
|
};
|
||||||
|
|
||||||
|
return enumerateNFTOffers(
|
||||||
|
context, std::get<ripple::uint256>(v), getKeylet());
|
||||||
|
}
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTSellOffers(Context const& context)
|
||||||
|
{
|
||||||
|
return doNFTOffers(context, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result
|
||||||
|
doNFTBuyOffers(Context const& context)
|
||||||
|
{
|
||||||
|
return doNFTOffers(context, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace RPC
|
||||||
@@ -10,9 +10,9 @@ getBaseTx(
|
|||||||
ripple::Fees const& fees)
|
ripple::Fees const& fees)
|
||||||
{
|
{
|
||||||
boost::json::object tx;
|
boost::json::object tx;
|
||||||
tx["Sequence"] = accountSeq;
|
tx[JS(Sequence)] = accountSeq;
|
||||||
tx["Account"] = ripple::toBase58(accountID);
|
tx[JS(Account)] = ripple::toBase58(accountID);
|
||||||
tx["Fee"] = RPC::toBoostJson(fees.units.jsonClipped());
|
tx[JS(Fee)] = RPC::toBoostJson(fees.units.jsonClipped());
|
||||||
return tx;
|
return tx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,11 +21,9 @@ doNoRippleCheck(Context const& context)
|
|||||||
{
|
{
|
||||||
auto const& request = context.params;
|
auto const& request = context.params;
|
||||||
|
|
||||||
auto accountID =
|
ripple::AccountID accountID;
|
||||||
accountFromStringStrict(getRequiredString(request, "account"));
|
if (auto const status = getAccount(request, accountID); status)
|
||||||
|
return status;
|
||||||
if (!accountID)
|
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
|
|
||||||
|
|
||||||
std::string role = getRequiredString(request, "role");
|
std::string role = getRequiredString(request, "role");
|
||||||
bool roleGateway = false;
|
bool roleGateway = false;
|
||||||
@@ -36,7 +34,9 @@ doNoRippleCheck(Context const& context)
|
|||||||
return Status{Error::rpcINVALID_PARAMS, "role field is invalid"};
|
return Status{Error::rpcINVALID_PARAMS, "role field is invalid"};
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t limit = getUInt(request, "limit", 300);
|
std::uint32_t limit = 300;
|
||||||
|
if (auto const status = getLimit(context, limit); status)
|
||||||
|
return status;
|
||||||
|
|
||||||
bool includeTxs = getBool(request, "transactions", false);
|
bool includeTxs = getBool(request, "transactions", false);
|
||||||
|
|
||||||
@@ -51,11 +51,11 @@ doNoRippleCheck(Context const& context)
|
|||||||
|
|
||||||
boost::json::array transactions;
|
boost::json::array transactions;
|
||||||
|
|
||||||
auto keylet = ripple::keylet::account(*accountID);
|
auto keylet = ripple::keylet::account(accountID);
|
||||||
auto accountObj = context.backend->fetchLedgerObject(
|
auto accountObj = context.backend->fetchLedgerObject(
|
||||||
keylet.key, lgrInfo.seq, context.yield);
|
keylet.key, lgrInfo.seq, context.yield);
|
||||||
if (!accountObj)
|
if (!accountObj)
|
||||||
throw AccountNotFoundError(ripple::toBase58(*accountID));
|
throw AccountNotFoundError(ripple::toBase58(accountID));
|
||||||
|
|
||||||
ripple::SerialIter it{accountObj->data(), accountObj->size()};
|
ripple::SerialIter it{accountObj->data(), accountObj->size()};
|
||||||
ripple::SLE sle{it, keylet.key};
|
ripple::SLE sle{it, keylet.key};
|
||||||
@@ -79,16 +79,16 @@ doNoRippleCheck(Context const& context)
|
|||||||
"You should immediately set your default ripple flag");
|
"You should immediately set your default ripple flag");
|
||||||
if (includeTxs)
|
if (includeTxs)
|
||||||
{
|
{
|
||||||
auto tx = getBaseTx(*accountID, accountSeq++, *fees);
|
auto tx = getBaseTx(accountID, accountSeq++, *fees);
|
||||||
tx["TransactionType"] = "AccountSet";
|
tx[JS(TransactionType)] = JS(AccountSet);
|
||||||
tx["SetFlag"] = 8;
|
tx[JS(SetFlag)] = 8;
|
||||||
transactions.push_back(tx);
|
transactions.push_back(tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
traverseOwnedNodes(
|
traverseOwnedNodes(
|
||||||
*context.backend,
|
*context.backend,
|
||||||
*accountID,
|
accountID,
|
||||||
lgrInfo.seq,
|
lgrInfo.seq,
|
||||||
std::numeric_limits<std::uint32_t>::max(),
|
std::numeric_limits<std::uint32_t>::max(),
|
||||||
{},
|
{},
|
||||||
@@ -141,12 +141,12 @@ doNoRippleCheck(Context const& context)
|
|||||||
ripple::STAmount limitAmount(ownedItem.getFieldAmount(
|
ripple::STAmount limitAmount(ownedItem.getFieldAmount(
|
||||||
bLow ? ripple::sfLowLimit : ripple::sfHighLimit));
|
bLow ? ripple::sfLowLimit : ripple::sfHighLimit));
|
||||||
limitAmount.setIssuer(peer);
|
limitAmount.setIssuer(peer);
|
||||||
auto tx = getBaseTx(*accountID, accountSeq++, *fees);
|
auto tx = getBaseTx(accountID, accountSeq++, *fees);
|
||||||
tx["TransactionType"] = "TrustSet";
|
tx[JS(TransactionType)] = JS(TrustSet);
|
||||||
tx["LimitAmount"] = RPC::toBoostJson(
|
tx[JS(LimitAmount)] = RPC::toBoostJson(
|
||||||
limitAmount.getJson(ripple::JsonOptions::none));
|
limitAmount.getJson(ripple::JsonOptions::none));
|
||||||
tx["Flags"] = bNoRipple ? ripple::tfClearNoRipple
|
tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple
|
||||||
: ripple::tfSetNoRipple;
|
: ripple::tfSetNoRipple;
|
||||||
transactions.push_back(tx);
|
transactions.push_back(tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,11 +158,11 @@ doNoRippleCheck(Context const& context)
|
|||||||
});
|
});
|
||||||
|
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
response["problems"] = std::move(problems);
|
response["problems"] = std::move(problems);
|
||||||
if (includeTxs)
|
if (includeTxs)
|
||||||
response["transactions"] = std::move(transactions);
|
response[JS(transactions)] = std::move(transactions);
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
|
// rngfill.h doesn't compile without this include
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
#include <ripple/beast/utility/rngfill.h>
|
#include <ripple/beast/utility/rngfill.h>
|
||||||
#include <ripple/crypto/csprng.h>
|
#include <ripple/crypto/csprng.h>
|
||||||
#include <rpc/RPCHelpers.h>
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
namespace RPC {
|
namespace RPC {
|
||||||
|
|
||||||
Result
|
Result
|
||||||
@@ -10,7 +14,8 @@ doRandom(Context const& context)
|
|||||||
|
|
||||||
beast::rngfill(rand.begin(), rand.size(), ripple::crypto_prng());
|
beast::rngfill(rand.begin(), rand.size(), ripple::crypto_prng());
|
||||||
boost::json::object result;
|
boost::json::object result;
|
||||||
result["random"] = ripple::strHex(rand);
|
result[JS(random)] = ripple::strHex(rand);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
#include <backend/BackendInterface.h>
|
#include <backend/BackendInterface.h>
|
||||||
#include <etl/ETLSource.h>
|
#include <etl/ETLSource.h>
|
||||||
#include <etl/ReportingETL.h>
|
#include <etl/ReportingETL.h>
|
||||||
|
#include <main/Build.h>
|
||||||
#include <rpc/RPCHelpers.h>
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
namespace RPC {
|
namespace RPC {
|
||||||
@@ -36,56 +37,68 @@ doServerInfo(Context const& context)
|
|||||||
if (age < 0)
|
if (age < 0)
|
||||||
age = 0;
|
age = 0;
|
||||||
|
|
||||||
response["info"] = boost::json::object{};
|
response[JS(info)] = boost::json::object{};
|
||||||
boost::json::object& info = response["info"].as_object();
|
boost::json::object& info = response[JS(info)].as_object();
|
||||||
|
|
||||||
info["complete_ledgers"] = std::to_string(range->minSequence) + "-" +
|
info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" +
|
||||||
std::to_string(range->maxSequence);
|
std::to_string(range->maxSequence);
|
||||||
|
|
||||||
info["counters"] = boost::json::object{};
|
bool admin = context.clientIp == "127.0.0.1";
|
||||||
info["counters"].as_object()["rpc"] = context.counters.report();
|
|
||||||
|
if (admin)
|
||||||
|
{
|
||||||
|
info[JS(counters)] = boost::json::object{};
|
||||||
|
info[JS(counters)].as_object()[JS(rpc)] = context.counters.report();
|
||||||
|
info[JS(counters)].as_object()["subscriptions"] =
|
||||||
|
context.subscriptions->report();
|
||||||
|
}
|
||||||
|
|
||||||
auto serverInfoRippled = context.balancer->forwardToRippled(
|
auto serverInfoRippled = context.balancer->forwardToRippled(
|
||||||
{{"command", "server_info"}}, context.clientIp, context.yield);
|
{{"command", "server_info"}}, context.clientIp, context.yield);
|
||||||
|
|
||||||
info["load_factor"] = 1;
|
info[JS(load_factor)] = 1;
|
||||||
if (serverInfoRippled && !serverInfoRippled->contains("error"))
|
info["clio_version"] = Build::getClioVersionString();
|
||||||
|
if (serverInfoRippled && !serverInfoRippled->contains(JS(error)))
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto& rippledResult = serverInfoRippled->at("result").as_object();
|
auto& rippledResult = serverInfoRippled->at(JS(result)).as_object();
|
||||||
auto& rippledInfo = rippledResult.at("info").as_object();
|
auto& rippledInfo = rippledResult.at(JS(info)).as_object();
|
||||||
info["load_factor"] = rippledInfo["load_factor"];
|
info[JS(load_factor)] = rippledInfo[JS(load_factor)];
|
||||||
info["validation_quorum"] = rippledInfo["validation_quorum"];
|
info[JS(validation_quorum)] = rippledInfo[JS(validation_quorum)];
|
||||||
|
info["rippled_version"] = rippledInfo[JS(build_version)];
|
||||||
}
|
}
|
||||||
catch (std::exception const&)
|
catch (std::exception const&)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info["validated_ledger"] = boost::json::object{};
|
info[JS(validated_ledger)] = boost::json::object{};
|
||||||
boost::json::object& validated = info["validated_ledger"].as_object();
|
boost::json::object& validated = info[JS(validated_ledger)].as_object();
|
||||||
|
|
||||||
validated["age"] = age;
|
validated[JS(age)] = age;
|
||||||
validated["hash"] = ripple::strHex(lgrInfo->hash);
|
validated[JS(hash)] = ripple::strHex(lgrInfo->hash);
|
||||||
validated["seq"] = lgrInfo->seq;
|
validated[JS(seq)] = lgrInfo->seq;
|
||||||
validated["base_fee_xrp"] = fees->base.decimalXRP();
|
validated[JS(base_fee_xrp)] = fees->base.decimalXRP();
|
||||||
validated["reserve_base_xrp"] = fees->reserve.decimalXRP();
|
validated[JS(reserve_base_xrp)] = fees->reserve.decimalXRP();
|
||||||
validated["reserve_inc_xrp"] = fees->increment.decimalXRP();
|
validated[JS(reserve_inc_xrp)] = fees->increment.decimalXRP();
|
||||||
|
|
||||||
response["cache"] = boost::json::object{};
|
info["cache"] = boost::json::object{};
|
||||||
auto& cache = response["cache"].as_object();
|
auto& cache = info["cache"].as_object();
|
||||||
|
|
||||||
cache["size"] = context.backend->cache().size();
|
cache["size"] = context.backend->cache().size();
|
||||||
cache["is_full"] = context.backend->cache().isFull();
|
cache["is_full"] = context.backend->cache().isFull();
|
||||||
cache["latest_ledger_seq"] =
|
cache["latest_ledger_seq"] =
|
||||||
context.backend->cache().latestLedgerSequence();
|
context.backend->cache().latestLedgerSequence();
|
||||||
|
cache["object_hit_rate"] = context.backend->cache().getObjectHitRate();
|
||||||
|
cache["successor_hit_rate"] =
|
||||||
|
context.backend->cache().getSuccessorHitRate();
|
||||||
|
|
||||||
response["etl"] = context.etl->getInfo();
|
if (admin)
|
||||||
|
{
|
||||||
|
info["etl"] = context.etl->getInfo();
|
||||||
|
}
|
||||||
|
|
||||||
response["note"] =
|
|
||||||
"This is a clio server. If you want to talk to rippled, include "
|
|
||||||
"\"ledger_index\":\"current\" in your request";
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
} // namespace RPC
|
} // namespace RPC
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ static std::unordered_set<std::string> validCommonStreams{
|
|||||||
Status
|
Status
|
||||||
validateStreams(boost::json::object const& request)
|
validateStreams(boost::json::object const& request)
|
||||||
{
|
{
|
||||||
boost::json::array const& streams = request.at("streams").as_array();
|
boost::json::array const& streams = request.at(JS(streams)).as_array();
|
||||||
|
|
||||||
for (auto const& stream : streams)
|
for (auto const& stream : streams)
|
||||||
{
|
{
|
||||||
@@ -40,7 +40,7 @@ subscribeToStreams(
|
|||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& streams = request.at("streams").as_array();
|
boost::json::array const& streams = request.at(JS(streams)).as_array();
|
||||||
|
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
for (auto const& stream : streams)
|
for (auto const& stream : streams)
|
||||||
@@ -69,7 +69,7 @@ unsubscribeToStreams(
|
|||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& streams = request.at("streams").as_array();
|
boost::json::array const& streams = request.at(JS(streams)).as_array();
|
||||||
|
|
||||||
for (auto const& stream : streams)
|
for (auto const& stream : streams)
|
||||||
{
|
{
|
||||||
@@ -114,7 +114,7 @@ subscribeToAccounts(
|
|||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& accounts = request.at("accounts").as_array();
|
boost::json::array const& accounts = request.at(JS(accounts)).as_array();
|
||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
{
|
{
|
||||||
@@ -138,7 +138,7 @@ unsubscribeToAccounts(
|
|||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& accounts = request.at("accounts").as_array();
|
boost::json::array const& accounts = request.at(JS(accounts)).as_array();
|
||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
{
|
{
|
||||||
@@ -163,7 +163,7 @@ subscribeToAccountsProposed(
|
|||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& accounts =
|
boost::json::array const& accounts =
|
||||||
request.at("accounts_proposed").as_array();
|
request.at(JS(accounts_proposed)).as_array();
|
||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
{
|
{
|
||||||
@@ -188,7 +188,7 @@ unsubscribeToAccountsProposed(
|
|||||||
SubscriptionManager& manager)
|
SubscriptionManager& manager)
|
||||||
{
|
{
|
||||||
boost::json::array const& accounts =
|
boost::json::array const& accounts =
|
||||||
request.at("accounts_proposed").as_array();
|
request.at(JS(accounts_proposed)).as_array();
|
||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
{
|
{
|
||||||
@@ -212,68 +212,57 @@ validateAndGetBooks(
|
|||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
std::shared_ptr<Backend::BackendInterface const> const& backend)
|
std::shared_ptr<Backend::BackendInterface const> const& backend)
|
||||||
{
|
{
|
||||||
if (!request.at("books").is_array())
|
if (!request.at(JS(books)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "booksNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "booksNotArray"};
|
||||||
boost::json::array const& books = request.at("books").as_array();
|
boost::json::array const& books = request.at(JS(books)).as_array();
|
||||||
|
|
||||||
std::vector<ripple::Book> booksToSub;
|
std::vector<ripple::Book> booksToSub;
|
||||||
std::optional<Backend::LedgerRange> rng;
|
std::optional<Backend::LedgerRange> rng;
|
||||||
boost::json::array snapshot;
|
boost::json::array snapshot;
|
||||||
for (auto const& book : books)
|
for (auto const& book : books)
|
||||||
{
|
{
|
||||||
auto parsed = parseBook(book.as_object());
|
auto parsedBook = parseBook(book.as_object());
|
||||||
if (auto status = std::get_if<Status>(&parsed))
|
if (auto status = std::get_if<Status>(&parsedBook))
|
||||||
return *status;
|
return *status;
|
||||||
else
|
|
||||||
|
auto b = std::get<ripple::Book>(parsedBook);
|
||||||
|
booksToSub.push_back(b);
|
||||||
|
bool both = book.as_object().contains(JS(both));
|
||||||
|
if (both)
|
||||||
|
booksToSub.push_back(ripple::reversed(b));
|
||||||
|
|
||||||
|
if (book.as_object().contains(JS(snapshot)))
|
||||||
{
|
{
|
||||||
auto b = std::get<ripple::Book>(parsed);
|
if (!rng)
|
||||||
booksToSub.push_back(b);
|
rng = backend->fetchLedgerRange();
|
||||||
bool both = book.as_object().contains("both");
|
ripple::AccountID takerID = beast::zero;
|
||||||
|
if (book.as_object().contains(JS(taker)))
|
||||||
|
if (auto const status = getTaker(book.as_object(), takerID);
|
||||||
|
status)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
auto getOrderBook = [&snapshot, &backend, &rng, &takerID](
|
||||||
|
auto book,
|
||||||
|
boost::asio::yield_context& yield) {
|
||||||
|
auto bookBase = getBookBase(book);
|
||||||
|
auto [offers, retMarker] = backend->fetchBookOffers(
|
||||||
|
bookBase, rng->maxSequence, 200, {}, yield);
|
||||||
|
|
||||||
|
auto orderBook = postProcessOrderBook(
|
||||||
|
offers, book, takerID, *backend, rng->maxSequence, yield);
|
||||||
|
std::copy(
|
||||||
|
orderBook.begin(),
|
||||||
|
orderBook.end(),
|
||||||
|
std::back_inserter(snapshot));
|
||||||
|
};
|
||||||
|
getOrderBook(b, yield);
|
||||||
if (both)
|
if (both)
|
||||||
booksToSub.push_back(ripple::reversed(b));
|
getOrderBook(ripple::reversed(b), yield);
|
||||||
|
|
||||||
if (book.as_object().contains("snapshot"))
|
|
||||||
{
|
|
||||||
if (!rng)
|
|
||||||
rng = backend->fetchLedgerRange();
|
|
||||||
ripple::AccountID takerID = beast::zero;
|
|
||||||
if (book.as_object().contains("taker"))
|
|
||||||
{
|
|
||||||
auto parsed = parseTaker(request.at("taker"));
|
|
||||||
if (auto status = std::get_if<Status>(&parsed))
|
|
||||||
return *status;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
takerID = std::get<ripple::AccountID>(parsed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
auto getOrderBook = [&snapshot, &backend, &rng, &takerID](
|
|
||||||
auto book,
|
|
||||||
boost::asio::yield_context& yield) {
|
|
||||||
auto bookBase = getBookBase(book);
|
|
||||||
auto [offers, retCursor] = backend->fetchBookOffers(
|
|
||||||
bookBase, rng->maxSequence, 200, {}, yield);
|
|
||||||
|
|
||||||
auto orderBook = postProcessOrderBook(
|
|
||||||
offers,
|
|
||||||
book,
|
|
||||||
takerID,
|
|
||||||
*backend,
|
|
||||||
rng->maxSequence,
|
|
||||||
yield);
|
|
||||||
std::copy(
|
|
||||||
orderBook.begin(),
|
|
||||||
orderBook.end(),
|
|
||||||
std::back_inserter(snapshot));
|
|
||||||
};
|
|
||||||
getOrderBook(b, yield);
|
|
||||||
if (both)
|
|
||||||
getOrderBook(ripple::reversed(b), yield);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_pair(booksToSub, snapshot);
|
return std::make_pair(booksToSub, snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
subscribeToBooks(
|
subscribeToBooks(
|
||||||
std::vector<ripple::Book> const& books,
|
std::vector<ripple::Book> const& books,
|
||||||
@@ -285,14 +274,33 @@ subscribeToBooks(
|
|||||||
manager.subBook(book, session);
|
manager.subBook(book, session);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
unsubscribeToBooks(
|
||||||
|
std::vector<ripple::Book> const& books,
|
||||||
|
std::shared_ptr<WsBase> session,
|
||||||
|
SubscriptionManager& manager)
|
||||||
|
{
|
||||||
|
for (auto const& book : books)
|
||||||
|
{
|
||||||
|
manager.unsubBook(book, session);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Result
|
Result
|
||||||
doSubscribe(Context const& context)
|
doSubscribe(Context const& context)
|
||||||
{
|
{
|
||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
|
|
||||||
if (request.contains("streams"))
|
if (!request.contains(JS(streams)) && !request.contains(JS(accounts)) &&
|
||||||
|
!request.contains(JS(accounts_proposed)) &&
|
||||||
|
!request.contains(JS(books)))
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, "does not contain valid subscription"};
|
||||||
|
|
||||||
|
if (request.contains(JS(streams)))
|
||||||
{
|
{
|
||||||
if (!request.at("streams").is_array())
|
if (!request.at(JS(streams)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
|
||||||
|
|
||||||
auto status = validateStreams(request);
|
auto status = validateStreams(request);
|
||||||
@@ -301,33 +309,34 @@ doSubscribe(Context const& context)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("accounts"))
|
if (request.contains(JS(accounts)))
|
||||||
{
|
{
|
||||||
if (!request.at("accounts").is_array())
|
if (!request.at(JS(accounts)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
|
||||||
|
|
||||||
boost::json::array accounts = request.at("accounts").as_array();
|
boost::json::array accounts = request.at(JS(accounts)).as_array();
|
||||||
auto status = validateAccounts(accounts);
|
auto status = validateAccounts(accounts);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("accounts_proposed"))
|
if (request.contains(JS(accounts_proposed)))
|
||||||
{
|
{
|
||||||
if (!request.at("accounts_proposed").is_array())
|
if (!request.at(JS(accounts_proposed)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
|
||||||
|
|
||||||
boost::json::array accounts =
|
boost::json::array accounts =
|
||||||
request.at("accounts_proposed").as_array();
|
request.at(JS(accounts_proposed)).as_array();
|
||||||
auto status = validateAccounts(accounts);
|
auto status = validateAccounts(accounts);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<ripple::Book> books;
|
std::vector<ripple::Book> books;
|
||||||
boost::json::array snapshot;
|
boost::json::array snapshot;
|
||||||
if (request.contains("books"))
|
if (request.contains(JS(books)))
|
||||||
{
|
{
|
||||||
auto parsed =
|
auto parsed =
|
||||||
validateAndGetBooks(context.yield, request, context.backend);
|
validateAndGetBooks(context.yield, request, context.backend);
|
||||||
@@ -341,22 +350,22 @@ doSubscribe(Context const& context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
if (request.contains("streams"))
|
if (request.contains(JS(streams)))
|
||||||
response = subscribeToStreams(
|
response = subscribeToStreams(
|
||||||
context.yield, request, context.session, *context.subscriptions);
|
context.yield, request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (request.contains("accounts"))
|
if (request.contains(JS(accounts)))
|
||||||
subscribeToAccounts(request, context.session, *context.subscriptions);
|
subscribeToAccounts(request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (request.contains("accounts_proposed"))
|
if (request.contains(JS(accounts_proposed)))
|
||||||
subscribeToAccountsProposed(
|
subscribeToAccountsProposed(
|
||||||
request, context.session, *context.subscriptions);
|
request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (request.contains("books"))
|
if (request.contains(JS(books)))
|
||||||
subscribeToBooks(books, context.session, *context.subscriptions);
|
subscribeToBooks(books, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (snapshot.size())
|
if (snapshot.size())
|
||||||
response["offers"] = snapshot;
|
response[JS(offers)] = snapshot;
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -365,9 +374,15 @@ doUnsubscribe(Context const& context)
|
|||||||
{
|
{
|
||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
|
|
||||||
if (request.contains("streams"))
|
if (!request.contains(JS(streams)) && !request.contains(JS(accounts)) &&
|
||||||
|
!request.contains(JS(accounts_proposed)) &&
|
||||||
|
!request.contains(JS(books)))
|
||||||
|
return Status{
|
||||||
|
Error::rpcINVALID_PARAMS, "does not contain valid subscription"};
|
||||||
|
|
||||||
|
if (request.contains(JS(streams)))
|
||||||
{
|
{
|
||||||
if (!request.at("streams").is_array())
|
if (!request.at(JS(streams)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
|
||||||
|
|
||||||
auto status = validateStreams(request);
|
auto status = validateStreams(request);
|
||||||
@@ -376,41 +391,60 @@ doUnsubscribe(Context const& context)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("accounts"))
|
if (request.contains(JS(accounts)))
|
||||||
{
|
{
|
||||||
if (!request.at("accounts").is_array())
|
if (!request.at(JS(accounts)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
|
||||||
|
|
||||||
boost::json::array accounts = request.at("accounts").as_array();
|
boost::json::array accounts = request.at(JS(accounts)).as_array();
|
||||||
auto status = validateAccounts(accounts);
|
auto status = validateAccounts(accounts);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("accounts_proposed"))
|
if (request.contains(JS(accounts_proposed)))
|
||||||
{
|
{
|
||||||
if (!request.at("accounts_proposed").is_array())
|
if (!request.at(JS(accounts_proposed)).is_array())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
|
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
|
||||||
|
|
||||||
boost::json::array accounts =
|
boost::json::array accounts =
|
||||||
request.at("accounts_proposed").as_array();
|
request.at(JS(accounts_proposed)).as_array();
|
||||||
auto status = validateAccounts(accounts);
|
auto status = validateAccounts(accounts);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.contains("streams"))
|
std::vector<ripple::Book> books;
|
||||||
|
if (request.contains(JS(books)))
|
||||||
|
{
|
||||||
|
auto parsed =
|
||||||
|
validateAndGetBooks(context.yield, request, context.backend);
|
||||||
|
|
||||||
|
if (auto status = std::get_if<Status>(&parsed))
|
||||||
|
return *status;
|
||||||
|
|
||||||
|
auto [bks, snap] =
|
||||||
|
std::get<std::pair<std::vector<ripple::Book>, boost::json::array>>(
|
||||||
|
parsed);
|
||||||
|
|
||||||
|
books = std::move(bks);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (request.contains(JS(streams)))
|
||||||
unsubscribeToStreams(request, context.session, *context.subscriptions);
|
unsubscribeToStreams(request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (request.contains("accounts"))
|
if (request.contains(JS(accounts)))
|
||||||
unsubscribeToAccounts(request, context.session, *context.subscriptions);
|
unsubscribeToAccounts(request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
if (request.contains("accounts_proposed"))
|
if (request.contains(JS(accounts_proposed)))
|
||||||
unsubscribeToAccountsProposed(
|
unsubscribeToAccountsProposed(
|
||||||
request, context.session, *context.subscriptions);
|
request, context.session, *context.subscriptions);
|
||||||
|
|
||||||
|
if (request.contains("books"))
|
||||||
|
unsubscribeToBooks(books, context.session, *context.subscriptions);
|
||||||
|
|
||||||
boost::json::object response = {{"status", "success"}};
|
boost::json::object response = {{"status", "success"}};
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ doTransactionEntry(Context const& context)
|
|||||||
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
|
||||||
|
|
||||||
ripple::uint256 hash;
|
ripple::uint256 hash;
|
||||||
if (!hash.parseHex(getRequiredString(context.params, "tx_hash")))
|
if (!hash.parseHex(getRequiredString(context.params, JS(tx_hash))))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
|
||||||
|
|
||||||
auto dbResponse = context.backend->fetchTransaction(hash, context.yield);
|
auto dbResponse = context.backend->fetchTransaction(hash, context.yield);
|
||||||
@@ -33,10 +33,10 @@ doTransactionEntry(Context const& context)
|
|||||||
"Transaction not found."};
|
"Transaction not found."};
|
||||||
|
|
||||||
auto [txn, meta] = toExpandedJson(*dbResponse);
|
auto [txn, meta] = toExpandedJson(*dbResponse);
|
||||||
response["tx_json"] = std::move(txn);
|
response[JS(tx_json)] = std::move(txn);
|
||||||
response["metadata"] = std::move(meta);
|
response[JS(metadata)] = std::move(meta);
|
||||||
response["ledger_index"] = lgrInfo.seq;
|
response[JS(ledger_index)] = lgrInfo.seq;
|
||||||
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,23 +14,23 @@ doTx(Context const& context)
|
|||||||
auto request = context.params;
|
auto request = context.params;
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
|
|
||||||
if (!request.contains("transaction"))
|
if (!request.contains(JS(transaction)))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "specifyTransaction"};
|
return Status{Error::rpcINVALID_PARAMS, "specifyTransaction"};
|
||||||
|
|
||||||
if (!request.at("transaction").is_string())
|
if (!request.at(JS(transaction)).is_string())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "transactionNotString"};
|
return Status{Error::rpcINVALID_PARAMS, "transactionNotString"};
|
||||||
|
|
||||||
ripple::uint256 hash;
|
ripple::uint256 hash;
|
||||||
if (!hash.parseHex(request.at("transaction").as_string().c_str()))
|
if (!hash.parseHex(request.at(JS(transaction)).as_string().c_str()))
|
||||||
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
|
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
|
||||||
|
|
||||||
bool binary = false;
|
bool binary = false;
|
||||||
if (request.contains("binary"))
|
if (request.contains(JS(binary)))
|
||||||
{
|
{
|
||||||
if (!request.at("binary").is_bool())
|
if (!request.at(JS(binary)).is_bool())
|
||||||
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
|
||||||
|
|
||||||
binary = request.at("binary").as_bool();
|
binary = request.at(JS(binary)).as_bool();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto range = context.backend->fetchLedgerRange();
|
auto range = context.backend->fetchLedgerRange();
|
||||||
@@ -45,16 +45,16 @@ doTx(Context const& context)
|
|||||||
{
|
{
|
||||||
auto [txn, meta] = toExpandedJson(*dbResponse);
|
auto [txn, meta] = toExpandedJson(*dbResponse);
|
||||||
response = txn;
|
response = txn;
|
||||||
response["meta"] = meta;
|
response[JS(meta)] = meta;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
response["tx"] = ripple::strHex(dbResponse->transaction);
|
response[JS(tx)] = ripple::strHex(dbResponse->transaction);
|
||||||
response["meta"] = ripple::strHex(dbResponse->metadata);
|
response[JS(meta)] = ripple::strHex(dbResponse->metadata);
|
||||||
response["hash"] = std::move(request.at("transaction").as_string());
|
response[JS(hash)] = std::move(request.at(JS(transaction)).as_string());
|
||||||
}
|
}
|
||||||
response["date"] = dbResponse->date;
|
response[JS(date)] = dbResponse->date;
|
||||||
response["ledger_index"] = dbResponse->ledgerSequence;
|
response[JS(ledger_index)] = dbResponse->ledgerSequence;
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|||||||
40
src/subscriptions/Message.h
Normal file
40
src/subscriptions/Message.h
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#ifndef CLIO_SUBSCRIPTION_MESSAGE_H
|
||||||
|
#define CLIO_SUBSCRIPTION_MESSAGE_H
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// This class should only be constructed once, then it can
|
||||||
|
// be read from in parallel by many websocket senders
|
||||||
|
class Message
|
||||||
|
{
|
||||||
|
std::string message_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
Message() = delete;
|
||||||
|
Message(std::string&& message) : message_(std::move(message))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
Message(Message const&) = delete;
|
||||||
|
Message(Message&&) = delete;
|
||||||
|
Message&
|
||||||
|
operator=(Message const&) = delete;
|
||||||
|
Message&
|
||||||
|
operator=(Message&&) = delete;
|
||||||
|
|
||||||
|
~Message() = default;
|
||||||
|
|
||||||
|
char*
|
||||||
|
data()
|
||||||
|
{
|
||||||
|
return message_.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t
|
||||||
|
size()
|
||||||
|
{
|
||||||
|
return message_.size();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // CLIO_SUBSCRIPTION_MESSAGE_H
|
||||||
@@ -5,25 +5,24 @@
|
|||||||
template <class T>
|
template <class T>
|
||||||
inline void
|
inline void
|
||||||
sendToSubscribers(
|
sendToSubscribers(
|
||||||
std::string const& message,
|
std::shared_ptr<Message> const& message,
|
||||||
T& subscribers,
|
T& subscribers,
|
||||||
boost::asio::io_context::strand& strand)
|
std::atomic_uint64_t& counter)
|
||||||
{
|
{
|
||||||
boost::asio::post(strand, [&subscribers, message]() {
|
for (auto it = subscribers.begin(); it != subscribers.end();)
|
||||||
for (auto it = subscribers.begin(); it != subscribers.end();)
|
{
|
||||||
|
auto& session = *it;
|
||||||
|
if (session->dead())
|
||||||
{
|
{
|
||||||
auto& session = *it;
|
it = subscribers.erase(it);
|
||||||
if (session->dead())
|
--counter;
|
||||||
{
|
|
||||||
it = subscribers.erase(it);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
session->send(message);
|
|
||||||
++it;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
else
|
||||||
|
{
|
||||||
|
session->send(message);
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
@@ -31,11 +30,13 @@ inline void
|
|||||||
addSession(
|
addSession(
|
||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
T& subscribers,
|
T& subscribers,
|
||||||
boost::asio::io_context::strand& strand)
|
std::atomic_uint64_t& counter)
|
||||||
{
|
{
|
||||||
boost::asio::post(strand, [&subscribers, s = std::move(session)]() {
|
if (!subscribers.contains(session))
|
||||||
subscribers.emplace(s);
|
{
|
||||||
});
|
subscribers.insert(session);
|
||||||
|
++counter;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
@@ -43,29 +44,37 @@ inline void
|
|||||||
removeSession(
|
removeSession(
|
||||||
std::shared_ptr<WsBase> session,
|
std::shared_ptr<WsBase> session,
|
||||||
T& subscribers,
|
T& subscribers,
|
||||||
boost::asio::io_context::strand& strand)
|
std::atomic_uint64_t& counter)
|
||||||
{
|
{
|
||||||
boost::asio::post(strand, [&subscribers, s = std::move(session)]() {
|
if (subscribers.contains(session))
|
||||||
subscribers.erase(s);
|
{
|
||||||
});
|
subscribers.erase(session);
|
||||||
|
--counter;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Subscription::subscribe(std::shared_ptr<WsBase> const& session)
|
Subscription::subscribe(std::shared_ptr<WsBase> const& session)
|
||||||
{
|
{
|
||||||
addSession(session, subscribers_, strand_);
|
boost::asio::post(strand_, [this, session]() {
|
||||||
|
addSession(session, subscribers_, subCount_);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Subscription::unsubscribe(std::shared_ptr<WsBase> const& session)
|
Subscription::unsubscribe(std::shared_ptr<WsBase> const& session)
|
||||||
{
|
{
|
||||||
removeSession(session, subscribers_, strand_);
|
boost::asio::post(strand_, [this, session]() {
|
||||||
|
removeSession(session, subscribers_, subCount_);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Subscription::publish(std::string const& message)
|
Subscription::publish(std::shared_ptr<Message>& message)
|
||||||
{
|
{
|
||||||
sendToSubscribers(message, subscribers_, strand_);
|
boost::asio::post(strand_, [this, message]() {
|
||||||
|
sendToSubscribers(message, subscribers_, subCount_);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Key>
|
template <class Key>
|
||||||
@@ -74,7 +83,9 @@ SubscriptionMap<Key>::subscribe(
|
|||||||
std::shared_ptr<WsBase> const& session,
|
std::shared_ptr<WsBase> const& session,
|
||||||
Key const& account)
|
Key const& account)
|
||||||
{
|
{
|
||||||
addSession(session, subscribers_[account], strand_);
|
boost::asio::post(strand_, [this, session, account]() {
|
||||||
|
addSession(session, subscribers_[account], subCount_);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Key>
|
template <class Key>
|
||||||
@@ -83,14 +94,36 @@ SubscriptionMap<Key>::unsubscribe(
|
|||||||
std::shared_ptr<WsBase> const& session,
|
std::shared_ptr<WsBase> const& session,
|
||||||
Key const& account)
|
Key const& account)
|
||||||
{
|
{
|
||||||
removeSession(session, subscribers_[account], strand_);
|
boost::asio::post(strand_, [this, account, session]() {
|
||||||
|
if (!subscribers_.contains(account))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!subscribers_[account].contains(session))
|
||||||
|
return;
|
||||||
|
|
||||||
|
--subCount_;
|
||||||
|
|
||||||
|
subscribers_[account].erase(session);
|
||||||
|
|
||||||
|
if (subscribers_[account].size() == 0)
|
||||||
|
{
|
||||||
|
subscribers_.erase(account);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Key>
|
template <class Key>
|
||||||
void
|
void
|
||||||
SubscriptionMap<Key>::publish(std::string const& message, Key const& account)
|
SubscriptionMap<Key>::publish(
|
||||||
|
std::shared_ptr<Message>& message,
|
||||||
|
Key const& account)
|
||||||
{
|
{
|
||||||
sendToSubscribers(message, subscribers_[account], strand_);
|
boost::asio::post(strand_, [this, account, message]() {
|
||||||
|
if (!subscribers_.contains(account))
|
||||||
|
return;
|
||||||
|
|
||||||
|
sendToSubscribers(message, subscribers_[account], subCount_);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
@@ -120,7 +153,7 @@ getLedgerPubMessage(
|
|||||||
boost::json::object
|
boost::json::object
|
||||||
SubscriptionManager::subLedger(
|
SubscriptionManager::subLedger(
|
||||||
boost::asio::yield_context& yield,
|
boost::asio::yield_context& yield,
|
||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
ledgerSubscribers_.subscribe(session);
|
ledgerSubscribers_.subscribe(session);
|
||||||
|
|
||||||
@@ -144,19 +177,19 @@ SubscriptionManager::subLedger(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubLedger(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::unsubLedger(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
ledgerSubscribers_.unsubscribe(session);
|
ledgerSubscribers_.unsubscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::subTransactions(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::subTransactions(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
txSubscribers_.subscribe(session);
|
txSubscribers_.subscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubTransactions(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::unsubTransactions(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
txSubscribers_.unsubscribe(session);
|
txSubscribers_.unsubscribe(session);
|
||||||
}
|
}
|
||||||
@@ -167,6 +200,11 @@ SubscriptionManager::subAccount(
|
|||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase>& session)
|
||||||
{
|
{
|
||||||
accountSubscribers_.subscribe(session, account);
|
accountSubscribers_.subscribe(session, account);
|
||||||
|
|
||||||
|
std::unique_lock lk(cleanupMtx_);
|
||||||
|
cleanupFuncs_[session].emplace_back([this, account](session_ptr session) {
|
||||||
|
unsubAccount(account, session);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -180,15 +218,19 @@ SubscriptionManager::unsubAccount(
|
|||||||
void
|
void
|
||||||
SubscriptionManager::subBook(
|
SubscriptionManager::subBook(
|
||||||
ripple::Book const& book,
|
ripple::Book const& book,
|
||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
bookSubscribers_.subscribe(session, book);
|
bookSubscribers_.subscribe(session, book);
|
||||||
|
|
||||||
|
std::unique_lock lk(cleanupMtx_);
|
||||||
|
cleanupFuncs_[session].emplace_back(
|
||||||
|
[this, book](session_ptr session) { unsubBook(book, session); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubBook(
|
SubscriptionManager::unsubBook(
|
||||||
ripple::Book const& book,
|
ripple::Book const& book,
|
||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
bookSubscribers_.unsubscribe(session, book);
|
bookSubscribers_.unsubscribe(session, book);
|
||||||
}
|
}
|
||||||
@@ -200,8 +242,10 @@ SubscriptionManager::pubLedger(
|
|||||||
std::string const& ledgerRange,
|
std::string const& ledgerRange,
|
||||||
std::uint32_t txnCount)
|
std::uint32_t txnCount)
|
||||||
{
|
{
|
||||||
ledgerSubscribers_.publish(boost::json::serialize(
|
auto message = std::make_shared<Message>(boost::json::serialize(
|
||||||
getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
|
getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
|
||||||
|
|
||||||
|
ledgerSubscribers_.publish(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -213,7 +257,8 @@ SubscriptionManager::pubTransaction(
|
|||||||
boost::json::object pubObj;
|
boost::json::object pubObj;
|
||||||
pubObj["transaction"] = RPC::toJson(*tx);
|
pubObj["transaction"] = RPC::toJson(*tx);
|
||||||
pubObj["meta"] = RPC::toJson(*meta);
|
pubObj["meta"] = RPC::toJson(*meta);
|
||||||
RPC::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta);
|
RPC::insertDeliveredAmount(
|
||||||
|
pubObj["meta"].as_object(), tx, meta, blobs.date);
|
||||||
pubObj["type"] = "transaction";
|
pubObj["type"] = "transaction";
|
||||||
pubObj["validated"] = true;
|
pubObj["validated"] = true;
|
||||||
pubObj["status"] = "closed";
|
pubObj["status"] = "closed";
|
||||||
@@ -250,10 +295,9 @@ SubscriptionManager::pubTransaction(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string pubMsg{boost::json::serialize(pubObj)};
|
auto pubMsg = std::make_shared<Message>(boost::json::serialize(pubObj));
|
||||||
txSubscribers_.publish(pubMsg);
|
txSubscribers_.publish(pubMsg);
|
||||||
|
|
||||||
auto journal = ripple::debugLog();
|
|
||||||
auto accounts = meta->getAffectedAccounts();
|
auto accounts = meta->getAffectedAccounts();
|
||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
@@ -305,7 +349,7 @@ void
|
|||||||
SubscriptionManager::forwardProposedTransaction(
|
SubscriptionManager::forwardProposedTransaction(
|
||||||
boost::json::object const& response)
|
boost::json::object const& response)
|
||||||
{
|
{
|
||||||
std::string pubMsg{boost::json::serialize(response)};
|
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
|
||||||
txProposedSubscribers_.publish(pubMsg);
|
txProposedSubscribers_.publish(pubMsg);
|
||||||
|
|
||||||
auto transaction = response.at("transaction").as_object();
|
auto transaction = response.at("transaction").as_object();
|
||||||
@@ -318,45 +362,45 @@ SubscriptionManager::forwardProposedTransaction(
|
|||||||
void
|
void
|
||||||
SubscriptionManager::forwardManifest(boost::json::object const& response)
|
SubscriptionManager::forwardManifest(boost::json::object const& response)
|
||||||
{
|
{
|
||||||
std::string pubMsg{boost::json::serialize(response)};
|
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
|
||||||
manifestSubscribers_.publish(pubMsg);
|
manifestSubscribers_.publish(pubMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::forwardValidation(boost::json::object const& response)
|
SubscriptionManager::forwardValidation(boost::json::object const& response)
|
||||||
{
|
{
|
||||||
std::string pubMsg{boost::json::serialize(response)};
|
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
|
||||||
validationsSubscribers_.publish(std::move(pubMsg));
|
validationsSubscribers_.publish(pubMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::subProposedAccount(
|
SubscriptionManager::subProposedAccount(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
accountProposedSubscribers_.subscribe(session, account);
|
accountProposedSubscribers_.subscribe(session, account);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::subManifest(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::subManifest(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
manifestSubscribers_.subscribe(session);
|
manifestSubscribers_.subscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubManifest(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::unsubManifest(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
manifestSubscribers_.unsubscribe(session);
|
manifestSubscribers_.unsubscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::subValidation(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::subValidation(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
validationsSubscribers_.subscribe(session);
|
validationsSubscribers_.subscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubValidation(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::unsubValidation(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
validationsSubscribers_.unsubscribe(session);
|
validationsSubscribers_.unsubscribe(session);
|
||||||
}
|
}
|
||||||
@@ -364,19 +408,34 @@ SubscriptionManager::unsubValidation(std::shared_ptr<WsBase>& session)
|
|||||||
void
|
void
|
||||||
SubscriptionManager::unsubProposedAccount(
|
SubscriptionManager::unsubProposedAccount(
|
||||||
ripple::AccountID const& account,
|
ripple::AccountID const& account,
|
||||||
std::shared_ptr<WsBase>& session)
|
std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
accountProposedSubscribers_.unsubscribe(session, account);
|
accountProposedSubscribers_.unsubscribe(session, account);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::subProposedTransactions(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::subProposedTransactions(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
txProposedSubscribers_.subscribe(session);
|
txProposedSubscribers_.subscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SubscriptionManager::unsubProposedTransactions(std::shared_ptr<WsBase>& session)
|
SubscriptionManager::unsubProposedTransactions(std::shared_ptr<WsBase> session)
|
||||||
{
|
{
|
||||||
txProposedSubscribers_.unsubscribe(session);
|
txProposedSubscribers_.unsubscribe(session);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SubscriptionManager::cleanup(std::shared_ptr<WsBase> session)
|
||||||
|
{
|
||||||
|
std::unique_lock lk(cleanupMtx_);
|
||||||
|
if (!cleanupFuncs_.contains(session))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (auto f : cleanupFuncs_[session])
|
||||||
|
{
|
||||||
|
f(session);
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanupFuncs_.erase(session);
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include <backend/BackendInterface.h>
|
#include <backend/BackendInterface.h>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <subscriptions/Message.h>
|
||||||
|
|
||||||
class WsBase;
|
class WsBase;
|
||||||
|
|
||||||
@@ -10,6 +11,7 @@ class Subscription
|
|||||||
{
|
{
|
||||||
boost::asio::io_context::strand strand_;
|
boost::asio::io_context::strand strand_;
|
||||||
std::unordered_set<std::shared_ptr<WsBase>> subscribers_ = {};
|
std::unordered_set<std::shared_ptr<WsBase>> subscribers_ = {};
|
||||||
|
std::atomic_uint64_t subCount_ = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Subscription() = delete;
|
Subscription() = delete;
|
||||||
@@ -29,16 +31,24 @@ public:
|
|||||||
unsubscribe(std::shared_ptr<WsBase> const& session);
|
unsubscribe(std::shared_ptr<WsBase> const& session);
|
||||||
|
|
||||||
void
|
void
|
||||||
publish(std::string const& message);
|
publish(std::shared_ptr<Message>& message);
|
||||||
|
|
||||||
|
std::uint64_t
|
||||||
|
count()
|
||||||
|
{
|
||||||
|
return subCount_.load();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class Key>
|
template <class Key>
|
||||||
class SubscriptionMap
|
class SubscriptionMap
|
||||||
{
|
{
|
||||||
using subscribers = std::unordered_set<std::shared_ptr<WsBase>>;
|
using ptr = std::shared_ptr<WsBase>;
|
||||||
|
using subscribers = std::set<ptr>;
|
||||||
|
|
||||||
boost::asio::io_context::strand strand_;
|
boost::asio::io_context::strand strand_;
|
||||||
std::unordered_map<Key, subscribers> subscribers_ = {};
|
std::unordered_map<Key, subscribers> subscribers_ = {};
|
||||||
|
std::atomic_uint64_t subCount_ = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SubscriptionMap() = delete;
|
SubscriptionMap() = delete;
|
||||||
@@ -58,11 +68,19 @@ public:
|
|||||||
unsubscribe(std::shared_ptr<WsBase> const& session, Key const& key);
|
unsubscribe(std::shared_ptr<WsBase> const& session, Key const& key);
|
||||||
|
|
||||||
void
|
void
|
||||||
publish(std::string const& message, Key const& key);
|
publish(std::shared_ptr<Message>& message, Key const& key);
|
||||||
|
|
||||||
|
std::uint64_t
|
||||||
|
count()
|
||||||
|
{
|
||||||
|
return subCount_.load();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SubscriptionManager
|
class SubscriptionManager
|
||||||
{
|
{
|
||||||
|
using session_ptr = std::shared_ptr<WsBase>;
|
||||||
|
|
||||||
std::vector<std::thread> workers_;
|
std::vector<std::thread> workers_;
|
||||||
boost::asio::io_context ioc_;
|
boost::asio::io_context ioc_;
|
||||||
std::optional<boost::asio::io_context::work> work_;
|
std::optional<boost::asio::io_context::work> work_;
|
||||||
@@ -132,9 +150,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
subLedger(
|
subLedger(boost::asio::yield_context& yield, session_ptr session);
|
||||||
boost::asio::yield_context& yield,
|
|
||||||
std::shared_ptr<WsBase>& session);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
pubLedger(
|
pubLedger(
|
||||||
@@ -144,13 +160,13 @@ public:
|
|||||||
std::uint32_t txnCount);
|
std::uint32_t txnCount);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubLedger(std::shared_ptr<WsBase>& session);
|
unsubLedger(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
subTransactions(std::shared_ptr<WsBase>& session);
|
subTransactions(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubTransactions(std::shared_ptr<WsBase>& session);
|
unsubTransactions(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
pubTransaction(
|
pubTransaction(
|
||||||
@@ -158,32 +174,28 @@ public:
|
|||||||
ripple::LedgerInfo const& lgrInfo);
|
ripple::LedgerInfo const& lgrInfo);
|
||||||
|
|
||||||
void
|
void
|
||||||
subAccount(
|
subAccount(ripple::AccountID const& account, session_ptr& session);
|
||||||
ripple::AccountID const& account,
|
|
||||||
std::shared_ptr<WsBase>& session);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubAccount(
|
unsubAccount(ripple::AccountID const& account, session_ptr& session);
|
||||||
ripple::AccountID const& account,
|
|
||||||
std::shared_ptr<WsBase>& session);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
subBook(ripple::Book const& book, std::shared_ptr<WsBase>& session);
|
subBook(ripple::Book const& book, session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubBook(ripple::Book const& book, std::shared_ptr<WsBase>& session);
|
unsubBook(ripple::Book const& book, session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
subManifest(std::shared_ptr<WsBase>& session);
|
subManifest(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubManifest(std::shared_ptr<WsBase>& session);
|
unsubManifest(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
subValidation(std::shared_ptr<WsBase>& session);
|
subValidation(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubValidation(std::shared_ptr<WsBase>& session);
|
unsubValidation(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
forwardProposedTransaction(boost::json::object const& response);
|
forwardProposedTransaction(boost::json::object const& response);
|
||||||
@@ -195,26 +207,51 @@ public:
|
|||||||
forwardValidation(boost::json::object const& response);
|
forwardValidation(boost::json::object const& response);
|
||||||
|
|
||||||
void
|
void
|
||||||
subProposedAccount(
|
subProposedAccount(ripple::AccountID const& account, session_ptr session);
|
||||||
ripple::AccountID const& account,
|
|
||||||
std::shared_ptr<WsBase>& session);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubProposedAccount(
|
unsubProposedAccount(ripple::AccountID const& account, session_ptr session);
|
||||||
ripple::AccountID const& account,
|
|
||||||
std::shared_ptr<WsBase>& session);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
subProposedTransactions(std::shared_ptr<WsBase>& session);
|
subProposedTransactions(session_ptr session);
|
||||||
|
|
||||||
void
|
void
|
||||||
unsubProposedTransactions(std::shared_ptr<WsBase>& session);
|
unsubProposedTransactions(session_ptr session);
|
||||||
|
|
||||||
|
void
|
||||||
|
cleanup(session_ptr session);
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
report()
|
||||||
|
{
|
||||||
|
boost::json::object counts = {};
|
||||||
|
|
||||||
|
counts["ledger"] = ledgerSubscribers_.count();
|
||||||
|
counts["transactions"] = txSubscribers_.count();
|
||||||
|
counts["transactions_proposed"] = txProposedSubscribers_.count();
|
||||||
|
counts["manifests"] = manifestSubscribers_.count();
|
||||||
|
counts["validations"] = validationsSubscribers_.count();
|
||||||
|
counts["account"] = accountSubscribers_.count();
|
||||||
|
counts["accounts_proposed"] = accountProposedSubscribers_.count();
|
||||||
|
counts["books"] = bookSubscribers_.count();
|
||||||
|
|
||||||
|
return counts;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void
|
void
|
||||||
sendAll(
|
sendAll(std::string const& pubMsg, std::unordered_set<session_ptr>& subs);
|
||||||
std::string const& pubMsg,
|
|
||||||
std::unordered_set<std::shared_ptr<WsBase>>& subs);
|
/**
|
||||||
|
* This is how we chose to cleanup subscriptions that have been closed.
|
||||||
|
* Each time we add a subscriber, we add the opposite lambda that
|
||||||
|
* unsubscribes that subscriber when cleanup is called with the session that
|
||||||
|
* closed.
|
||||||
|
*/
|
||||||
|
using CleanupFunction = std::function<void(session_ptr)>;
|
||||||
|
std::mutex cleanupMtx_;
|
||||||
|
std::unordered_map<session_ptr, std::vector<CleanupFunction>>
|
||||||
|
cleanupFuncs_ = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SUBSCRIPTION_MANAGER_H
|
#endif // SUBSCRIPTION_MANAGER_H
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ class DOSGuard
|
|||||||
std::uint32_t const maxFetches_;
|
std::uint32_t const maxFetches_;
|
||||||
std::uint32_t const sweepInterval_;
|
std::uint32_t const sweepInterval_;
|
||||||
|
|
||||||
|
// Load config setting for DOSGuard
|
||||||
std::optional<boost::json::object>
|
std::optional<boost::json::object>
|
||||||
getConfig(boost::json::object const& config) const
|
getConfig(boost::json::object const& config) const
|
||||||
{
|
{
|
||||||
@@ -92,6 +93,12 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isWhiteListed(std::string const& ip)
|
||||||
|
{
|
||||||
|
return whitelist_.contains(ip);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
isOk(std::string const& ip)
|
isOk(std::string const& ip)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#include <rpc/Counters.h>
|
#include <rpc/Counters.h>
|
||||||
#include <rpc/RPC.h>
|
#include <rpc/RPC.h>
|
||||||
|
#include <rpc/WorkQueue.h>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <webserver/DOSGuard.h>
|
#include <webserver/DOSGuard.h>
|
||||||
|
|
||||||
@@ -92,6 +93,7 @@ class HttpBase
|
|||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
RPC::Counters& counters_;
|
RPC::Counters& counters_;
|
||||||
|
WorkQueue& workQueue_;
|
||||||
send_lambda lambda_;
|
send_lambda lambda_;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@@ -146,6 +148,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer buffer)
|
boost::beast::flat_buffer buffer)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
, backend_(backend)
|
, backend_(backend)
|
||||||
@@ -154,6 +157,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, workQueue_(queue)
|
||||||
, lambda_(*this)
|
, lambda_(*this)
|
||||||
, buffer_(std::move(buffer))
|
, buffer_(std::move(buffer))
|
||||||
{
|
{
|
||||||
@@ -208,7 +212,8 @@ public:
|
|||||||
balancer_,
|
balancer_,
|
||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_);
|
counters_,
|
||||||
|
workQueue_);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto ip = derived().ip();
|
auto ip = derived().ip();
|
||||||
@@ -220,21 +225,36 @@ public:
|
|||||||
|
|
||||||
// Requests are handed using coroutines. Here we spawn a coroutine
|
// Requests are handed using coroutines. Here we spawn a coroutine
|
||||||
// which will asynchronously handle a request.
|
// which will asynchronously handle a request.
|
||||||
boost::asio::spawn(
|
if (!workQueue_.postCoro(
|
||||||
derived().stream().get_executor(),
|
[this, ip, session](boost::asio::yield_context yield) {
|
||||||
[this, ip, session](boost::asio::yield_context yield) {
|
handle_request(
|
||||||
handle_request(
|
yield,
|
||||||
yield,
|
std::move(req_),
|
||||||
std::move(req_),
|
lambda_,
|
||||||
lambda_,
|
backend_,
|
||||||
backend_,
|
subscriptions_,
|
||||||
balancer_,
|
balancer_,
|
||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_,
|
counters_,
|
||||||
*ip,
|
*ip,
|
||||||
session);
|
session);
|
||||||
});
|
},
|
||||||
|
dosGuard_.isWhiteListed(*ip)))
|
||||||
|
{
|
||||||
|
// Non-whitelist connection rejected due to full connection queue
|
||||||
|
http::response<http::string_body> res{
|
||||||
|
http::status::ok, req_.version()};
|
||||||
|
res.set(
|
||||||
|
http::field::server,
|
||||||
|
"clio-server-" + Build::getClioVersionString());
|
||||||
|
res.set(http::field::content_type, "application/json");
|
||||||
|
res.keep_alive(req_.keep_alive());
|
||||||
|
res.body() = boost::json::serialize(
|
||||||
|
RPC::make_error(RPC::Error::rpcTOO_BUSY));
|
||||||
|
res.prepare_payload();
|
||||||
|
lambda_(std::move(res));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -275,6 +295,7 @@ handle_request(
|
|||||||
request<Body, boost::beast::http::basic_fields<Allocator>>&& req,
|
request<Body, boost::beast::http::basic_fields<Allocator>>&& req,
|
||||||
Send&& send,
|
Send&& send,
|
||||||
std::shared_ptr<BackendInterface const> backend,
|
std::shared_ptr<BackendInterface const> backend,
|
||||||
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
@@ -287,7 +308,9 @@ handle_request(
|
|||||||
std::string content_type,
|
std::string content_type,
|
||||||
std::string message) {
|
std::string message) {
|
||||||
http::response<http::string_body> res{status, req.version()};
|
http::response<http::string_body> res{status, req.version()};
|
||||||
res.set(http::field::server, "xrpl-reporting-server-v0.0.0");
|
res.set(
|
||||||
|
http::field::server,
|
||||||
|
"clio-server-" + Build::getClioVersionString());
|
||||||
res.set(http::field::content_type, content_type);
|
res.set(http::field::content_type, content_type);
|
||||||
res.keep_alive(req.keep_alive());
|
res.keep_alive(req.keep_alive());
|
||||||
res.body() = std::string(message);
|
res.body() = std::string(message);
|
||||||
@@ -307,13 +330,13 @@ handle_request(
|
|||||||
|
|
||||||
if (!dosGuard.isOk(ip))
|
if (!dosGuard.isOk(ip))
|
||||||
return send(httpResponse(
|
return send(httpResponse(
|
||||||
http::status::ok,
|
http::status::service_unavailable,
|
||||||
"application/json",
|
"text/plain",
|
||||||
boost::json::serialize(RPC::make_error(RPC::Error::rpcSLOW_DOWN))));
|
"Server is overloaded"));
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(info) << "Received request: " << req.body();
|
BOOST_LOG_TRIVIAL(debug) << "Received request: " << req.body();
|
||||||
|
|
||||||
boost::json::object request;
|
boost::json::object request;
|
||||||
std::string responseStr = "";
|
std::string responseStr = "";
|
||||||
@@ -333,13 +356,6 @@ handle_request(
|
|||||||
RPC::make_error(RPC::Error::rpcBAD_SYNTAX))));
|
RPC::make_error(RPC::Error::rpcBAD_SYNTAX))));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dosGuard.isOk(ip))
|
|
||||||
return send(httpResponse(
|
|
||||||
http::status::ok,
|
|
||||||
"application/json",
|
|
||||||
boost::json::serialize(
|
|
||||||
RPC::make_error(RPC::Error::rpcSLOW_DOWN))));
|
|
||||||
|
|
||||||
auto range = backend->fetchLedgerRange();
|
auto range = backend->fetchLedgerRange();
|
||||||
if (!range)
|
if (!range)
|
||||||
return send(httpResponse(
|
return send(httpResponse(
|
||||||
@@ -349,7 +365,15 @@ handle_request(
|
|||||||
RPC::make_error(RPC::Error::rpcNOT_READY))));
|
RPC::make_error(RPC::Error::rpcNOT_READY))));
|
||||||
|
|
||||||
std::optional<RPC::Context> context = RPC::make_HttpContext(
|
std::optional<RPC::Context> context = RPC::make_HttpContext(
|
||||||
yc, request, backend, nullptr, balancer, etl, *range, counters, ip);
|
yc,
|
||||||
|
request,
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
balancer,
|
||||||
|
etl,
|
||||||
|
*range,
|
||||||
|
counters,
|
||||||
|
ip);
|
||||||
|
|
||||||
if (!context)
|
if (!context)
|
||||||
return send(httpResponse(
|
return send(httpResponse(
|
||||||
@@ -377,7 +401,6 @@ handle_request(
|
|||||||
|
|
||||||
result = error;
|
result = error;
|
||||||
|
|
||||||
responseStr = boost::json::serialize(response);
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " Encountered error: " << responseStr;
|
<< __func__ << " Encountered error: " << responseStr;
|
||||||
}
|
}
|
||||||
@@ -391,13 +414,23 @@ handle_request(
|
|||||||
|
|
||||||
if (!result.contains("error"))
|
if (!result.contains("error"))
|
||||||
result["status"] = "success";
|
result["status"] = "success";
|
||||||
|
|
||||||
responseStr = boost::json::serialize(response);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::json::array warnings;
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_CLIO));
|
||||||
|
auto lastCloseAge = context->etl->lastCloseAgeSeconds();
|
||||||
|
if (lastCloseAge >= 60)
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_OUTDATED));
|
||||||
|
response["warnings"] = warnings;
|
||||||
|
responseStr = boost::json::serialize(response);
|
||||||
if (!dosGuard.add(ip, responseStr.size()))
|
if (!dosGuard.add(ip, responseStr.size()))
|
||||||
result["warning"] = "Too many requests";
|
{
|
||||||
|
response["warning"] = "load";
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_RATE_LIMIT));
|
||||||
|
response["warnings"] = warnings;
|
||||||
|
// reserialize when we need to include this warning
|
||||||
|
responseStr = boost::json::serialize(response);
|
||||||
|
}
|
||||||
return send(
|
return send(
|
||||||
httpResponse(http::status::ok, "application/json", responseStr));
|
httpResponse(http::status::ok, "application/json", responseStr));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer buffer)
|
boost::beast::flat_buffer buffer)
|
||||||
: HttpBase<HttpSession>(
|
: HttpBase<HttpSession>(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -34,6 +35,7 @@ public:
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(buffer))
|
std::move(buffer))
|
||||||
, stream_(std::move(socket))
|
, stream_(std::move(socket))
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ class Detector
|
|||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
RPC::Counters& counters_;
|
RPC::Counters& counters_;
|
||||||
|
WorkQueue& queue_;
|
||||||
boost::beast::flat_buffer buffer_;
|
boost::beast::flat_buffer buffer_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -42,7 +43,8 @@ public:
|
|||||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters)
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
, stream_(std::move(socket))
|
, stream_(std::move(socket))
|
||||||
, ctx_(ctx)
|
, ctx_(ctx)
|
||||||
@@ -52,6 +54,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,6 +104,7 @@ public:
|
|||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_,
|
counters_,
|
||||||
|
queue_,
|
||||||
std::move(buffer_))
|
std::move(buffer_))
|
||||||
->run();
|
->run();
|
||||||
return;
|
return;
|
||||||
@@ -116,6 +120,7 @@ public:
|
|||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_,
|
counters_,
|
||||||
|
queue_,
|
||||||
std::move(buffer_))
|
std::move(buffer_))
|
||||||
->run();
|
->run();
|
||||||
}
|
}
|
||||||
@@ -132,7 +137,8 @@ make_websocket_session(
|
|||||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters)
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue)
|
||||||
{
|
{
|
||||||
std::make_shared<WsUpgrader>(
|
std::make_shared<WsUpgrader>(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -143,6 +149,7 @@ make_websocket_session(
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(buffer),
|
std::move(buffer),
|
||||||
std::move(req))
|
std::move(req))
|
||||||
->run();
|
->run();
|
||||||
@@ -159,7 +166,8 @@ make_websocket_session(
|
|||||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters)
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue)
|
||||||
{
|
{
|
||||||
std::make_shared<SslWsUpgrader>(
|
std::make_shared<SslWsUpgrader>(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -170,6 +178,7 @@ make_websocket_session(
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(buffer),
|
std::move(buffer),
|
||||||
std::move(req))
|
std::move(req))
|
||||||
->run();
|
->run();
|
||||||
@@ -190,11 +199,14 @@ class Listener
|
|||||||
std::shared_ptr<ETLLoadBalancer> balancer_;
|
std::shared_ptr<ETLLoadBalancer> balancer_;
|
||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
|
WorkQueue queue_;
|
||||||
RPC::Counters counters_;
|
RPC::Counters counters_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Listener(
|
Listener(
|
||||||
boost::asio::io_context& ioc,
|
boost::asio::io_context& ioc,
|
||||||
|
uint32_t numWorkerThreads,
|
||||||
|
uint32_t maxQueueSize,
|
||||||
std::optional<std::reference_wrapper<ssl::context>> ctx,
|
std::optional<std::reference_wrapper<ssl::context>> ctx,
|
||||||
tcp::endpoint endpoint,
|
tcp::endpoint endpoint,
|
||||||
std::shared_ptr<BackendInterface const> backend,
|
std::shared_ptr<BackendInterface const> backend,
|
||||||
@@ -210,6 +222,7 @@ public:
|
|||||||
, balancer_(balancer)
|
, balancer_(balancer)
|
||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
|
, queue_(numWorkerThreads, maxQueueSize)
|
||||||
{
|
{
|
||||||
boost::beast::error_code ec;
|
boost::beast::error_code ec;
|
||||||
|
|
||||||
@@ -271,7 +284,8 @@ private:
|
|||||||
balancer_,
|
balancer_,
|
||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_)
|
counters_,
|
||||||
|
queue_)
|
||||||
->run();
|
->run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,8 +320,19 @@ make_HttpServer(
|
|||||||
auto const port =
|
auto const port =
|
||||||
static_cast<unsigned short>(serverConfig.at("port").as_int64());
|
static_cast<unsigned short>(serverConfig.at("port").as_int64());
|
||||||
|
|
||||||
|
uint32_t numThreads = std::thread::hardware_concurrency();
|
||||||
|
if (config.contains("workers"))
|
||||||
|
numThreads = config.at("workers").as_int64();
|
||||||
|
uint32_t maxQueueSize = 0; // no max
|
||||||
|
if (serverConfig.contains("max_queue_size"))
|
||||||
|
maxQueueSize = serverConfig.at("max_queue_size").as_int64();
|
||||||
|
BOOST_LOG_TRIVIAL(info) << __func__ << " Number of workers = " << numThreads
|
||||||
|
<< ". Max queue size = " << maxQueueSize;
|
||||||
|
|
||||||
auto server = std::make_shared<HttpServer>(
|
auto server = std::make_shared<HttpServer>(
|
||||||
ioc,
|
ioc,
|
||||||
|
numThreads,
|
||||||
|
maxQueueSize,
|
||||||
sslCtx,
|
sslCtx,
|
||||||
boost::asio::ip::tcp::endpoint{address, port},
|
boost::asio::ip::tcp::endpoint{address, port},
|
||||||
backend,
|
backend,
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& buffer)
|
boost::beast::flat_buffer&& buffer)
|
||||||
: WsSession(
|
: WsSession(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -47,6 +48,7 @@ public:
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(buffer))
|
std::move(buffer))
|
||||||
, ws_(std::move(socket))
|
, ws_(std::move(socket))
|
||||||
{
|
{
|
||||||
@@ -91,6 +93,7 @@ class WsUpgrader : public std::enable_shared_from_this<WsUpgrader>
|
|||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
RPC::Counters& counters_;
|
RPC::Counters& counters_;
|
||||||
|
WorkQueue& queue_;
|
||||||
http::request<http::string_body> req_;
|
http::request<http::string_body> req_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -103,6 +106,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& b)
|
boost::beast::flat_buffer&& b)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
, http_(std::move(socket))
|
, http_(std::move(socket))
|
||||||
@@ -113,6 +117,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
WsUpgrader(
|
WsUpgrader(
|
||||||
@@ -124,6 +129,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& b,
|
boost::beast::flat_buffer&& b,
|
||||||
http::request<http::string_body> req)
|
http::request<http::string_body> req)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
@@ -135,6 +141,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
, req_(std::move(req))
|
, req_(std::move(req))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@@ -190,6 +197,7 @@ private:
|
|||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_,
|
counters_,
|
||||||
|
queue_,
|
||||||
std::move(buffer_))
|
std::move(buffer_))
|
||||||
->run(std::move(req_));
|
->run(std::move(req_));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer buffer)
|
boost::beast::flat_buffer buffer)
|
||||||
: HttpBase<SslHttpSession>(
|
: HttpBase<SslHttpSession>(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -35,6 +36,7 @@ public:
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(buffer))
|
std::move(buffer))
|
||||||
, stream_(std::move(socket), ctx)
|
, stream_(std::move(socket), ctx)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& b)
|
boost::beast::flat_buffer&& b)
|
||||||
: WsSession(
|
: WsSession(
|
||||||
ioc,
|
ioc,
|
||||||
@@ -45,6 +46,7 @@ public:
|
|||||||
etl,
|
etl,
|
||||||
dosGuard,
|
dosGuard,
|
||||||
counters,
|
counters,
|
||||||
|
queue,
|
||||||
std::move(b))
|
std::move(b))
|
||||||
, ws_(std::move(stream))
|
, ws_(std::move(stream))
|
||||||
{
|
{
|
||||||
@@ -88,6 +90,7 @@ class SslWsUpgrader : public std::enable_shared_from_this<SslWsUpgrader>
|
|||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
RPC::Counters& counters_;
|
RPC::Counters& counters_;
|
||||||
|
WorkQueue& queue_;
|
||||||
http::request<http::string_body> req_;
|
http::request<http::string_body> req_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -101,6 +104,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& b)
|
boost::beast::flat_buffer&& b)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
, https_(std::move(socket), ctx)
|
, https_(std::move(socket), ctx)
|
||||||
@@ -111,6 +115,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
SslWsUpgrader(
|
SslWsUpgrader(
|
||||||
@@ -122,6 +127,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& b,
|
boost::beast::flat_buffer&& b,
|
||||||
http::request<http::string_body> req)
|
http::request<http::string_body> req)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
@@ -133,6 +139,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
, req_(std::move(req))
|
, req_(std::move(req))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@@ -203,6 +210,7 @@ private:
|
|||||||
etl_,
|
etl_,
|
||||||
dosGuard_,
|
dosGuard_,
|
||||||
counters_,
|
counters_,
|
||||||
|
queue_,
|
||||||
std::move(buffer_))
|
std::move(buffer_))
|
||||||
->run(std::move(req_));
|
->run(std::move(req_));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,11 @@
|
|||||||
|
|
||||||
#include <backend/BackendInterface.h>
|
#include <backend/BackendInterface.h>
|
||||||
#include <etl/ETLSource.h>
|
#include <etl/ETLSource.h>
|
||||||
|
#include <etl/ReportingETL.h>
|
||||||
#include <rpc/Counters.h>
|
#include <rpc/Counters.h>
|
||||||
#include <rpc/RPC.h>
|
#include <rpc/RPC.h>
|
||||||
|
#include <rpc/WorkQueue.h>
|
||||||
|
#include <subscriptions/Message.h>
|
||||||
#include <subscriptions/SubscriptionManager.h>
|
#include <subscriptions/SubscriptionManager.h>
|
||||||
#include <webserver/DOSGuard.h>
|
#include <webserver/DOSGuard.h>
|
||||||
|
|
||||||
@@ -49,7 +52,7 @@ protected:
|
|||||||
public:
|
public:
|
||||||
// Send, that enables SubscriptionManager to publish to clients
|
// Send, that enables SubscriptionManager to publish to clients
|
||||||
virtual void
|
virtual void
|
||||||
send(std::string const& msg) = 0;
|
send(std::shared_ptr<Message> msg) = 0;
|
||||||
|
|
||||||
virtual ~WsBase()
|
virtual ~WsBase()
|
||||||
{
|
{
|
||||||
@@ -84,10 +87,11 @@ class WsSession : public WsBase,
|
|||||||
std::shared_ptr<ReportingETL const> etl_;
|
std::shared_ptr<ReportingETL const> etl_;
|
||||||
DOSGuard& dosGuard_;
|
DOSGuard& dosGuard_;
|
||||||
RPC::Counters& counters_;
|
RPC::Counters& counters_;
|
||||||
|
WorkQueue& queue_;
|
||||||
std::mutex mtx_;
|
std::mutex mtx_;
|
||||||
|
|
||||||
bool sending_ = false;
|
bool sending_ = false;
|
||||||
std::queue<std::string> messages_;
|
std::queue<std::shared_ptr<Message>> messages_;
|
||||||
|
|
||||||
void
|
void
|
||||||
wsFail(boost::beast::error_code ec, char const* what)
|
wsFail(boost::beast::error_code ec, char const* what)
|
||||||
@@ -98,6 +102,9 @@ class WsSession : public WsBase,
|
|||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< "wsFail: " << what << ": " << ec.message();
|
<< "wsFail: " << what << ": " << ec.message();
|
||||||
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
|
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
|
||||||
|
|
||||||
|
if (auto manager = subscriptions_.lock(); manager)
|
||||||
|
manager->cleanup(derived().shared_from_this());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,6 +117,7 @@ public:
|
|||||||
std::shared_ptr<ReportingETL const> etl,
|
std::shared_ptr<ReportingETL const> etl,
|
||||||
DOSGuard& dosGuard,
|
DOSGuard& dosGuard,
|
||||||
RPC::Counters& counters,
|
RPC::Counters& counters,
|
||||||
|
WorkQueue& queue,
|
||||||
boost::beast::flat_buffer&& buffer)
|
boost::beast::flat_buffer&& buffer)
|
||||||
: buffer_(std::move(buffer))
|
: buffer_(std::move(buffer))
|
||||||
, ioc_(ioc)
|
, ioc_(ioc)
|
||||||
@@ -119,6 +127,7 @@ public:
|
|||||||
, etl_(etl)
|
, etl_(etl)
|
||||||
, dosGuard_(dosGuard)
|
, dosGuard_(dosGuard)
|
||||||
, counters_(counters)
|
, counters_(counters)
|
||||||
|
, queue_(queue)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
virtual ~WsSession()
|
virtual ~WsSession()
|
||||||
@@ -138,7 +147,7 @@ public:
|
|||||||
{
|
{
|
||||||
sending_ = true;
|
sending_ = true;
|
||||||
derived().ws().async_write(
|
derived().ws().async_write(
|
||||||
net::buffer(messages_.front()),
|
net::buffer(messages_.front()->data(), messages_.front()->size()),
|
||||||
boost::beast::bind_front_handler(
|
boost::beast::bind_front_handler(
|
||||||
&WsSession::on_write, derived().shared_from_this()));
|
&WsSession::on_write, derived().shared_from_this()));
|
||||||
}
|
}
|
||||||
@@ -168,18 +177,25 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
send(std::string const& msg) override
|
send(std::shared_ptr<Message> msg) override
|
||||||
{
|
{
|
||||||
net::dispatch(
|
net::dispatch(
|
||||||
derived().ws().get_executor(),
|
derived().ws().get_executor(),
|
||||||
[this,
|
[this,
|
||||||
self = derived().shared_from_this(),
|
self = derived().shared_from_this(),
|
||||||
msg = std::string(msg)]() {
|
msg = std::move(msg)]() {
|
||||||
messages_.push(std::move(msg));
|
messages_.push(std::move(msg));
|
||||||
maybe_send_next();
|
maybe_send_next();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
send(std::string&& msg)
|
||||||
|
{
|
||||||
|
auto sharedMsg = std::make_shared<Message>(std::move(msg));
|
||||||
|
send(sharedMsg);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
run(http::request<http::string_body> req)
|
run(http::request<http::string_body> req)
|
||||||
{
|
{
|
||||||
@@ -229,21 +245,26 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
handle_request(std::string const&& msg, boost::asio::yield_context& yc)
|
handle_request(
|
||||||
|
boost::json::object const&& request,
|
||||||
|
boost::json::value const& id,
|
||||||
|
boost::asio::yield_context& yield)
|
||||||
{
|
{
|
||||||
auto ip = derived().ip();
|
auto ip = derived().ip();
|
||||||
if (!ip)
|
if (!ip)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
boost::json::object response = {};
|
boost::json::object response = {};
|
||||||
auto sendError = [this](auto error) {
|
auto sendError = [this, &request, id](auto error) {
|
||||||
send(boost::json::serialize(RPC::make_error(error)));
|
auto e = RPC::make_error(error);
|
||||||
|
if (!id.is_null())
|
||||||
|
e["id"] = id;
|
||||||
|
e["request"] = request;
|
||||||
|
this->send(boost::json::serialize(e));
|
||||||
};
|
};
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
boost::json::value raw = boost::json::parse(msg);
|
|
||||||
boost::json::object request = raw.as_object();
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
|
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@@ -252,7 +273,7 @@ public:
|
|||||||
return sendError(RPC::Error::rpcNOT_READY);
|
return sendError(RPC::Error::rpcNOT_READY);
|
||||||
|
|
||||||
std::optional<RPC::Context> context = RPC::make_WsContext(
|
std::optional<RPC::Context> context = RPC::make_WsContext(
|
||||||
yc,
|
yield,
|
||||||
request,
|
request,
|
||||||
backend_,
|
backend_,
|
||||||
subscriptions_.lock(),
|
subscriptions_.lock(),
|
||||||
@@ -266,8 +287,6 @@ public:
|
|||||||
if (!context)
|
if (!context)
|
||||||
return sendError(RPC::Error::rpcBAD_SYNTAX);
|
return sendError(RPC::Error::rpcBAD_SYNTAX);
|
||||||
|
|
||||||
auto id = request.contains("id") ? request.at("id") : nullptr;
|
|
||||||
|
|
||||||
response = getDefaultWsResponse(id);
|
response = getDefaultWsResponse(id);
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
@@ -310,8 +329,23 @@ public:
|
|||||||
return sendError(RPC::Error::rpcINTERNAL);
|
return sendError(RPC::Error::rpcINTERNAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::json::array warnings;
|
||||||
|
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_CLIO));
|
||||||
|
|
||||||
|
auto lastCloseAge = etl_->lastCloseAgeSeconds();
|
||||||
|
if (lastCloseAge >= 60)
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_OUTDATED));
|
||||||
|
response["warnings"] = warnings;
|
||||||
std::string responseStr = boost::json::serialize(response);
|
std::string responseStr = boost::json::serialize(response);
|
||||||
dosGuard_.add(*ip, responseStr.size());
|
if (!dosGuard_.add(*ip, responseStr.size()))
|
||||||
|
{
|
||||||
|
response["warning"] = "load";
|
||||||
|
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_RATE_LIMIT));
|
||||||
|
response["warnings"] = warnings;
|
||||||
|
// reserialize if we need to include this warning
|
||||||
|
responseStr = boost::json::serialize(response);
|
||||||
|
}
|
||||||
send(std::move(responseStr));
|
send(std::move(responseStr));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -332,25 +366,55 @@ public:
|
|||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " received request from ip = " << *ip;
|
<< __func__ << " received request from ip = " << *ip;
|
||||||
if (!dosGuard_.isOk(*ip))
|
|
||||||
{
|
|
||||||
boost::json::object response;
|
|
||||||
response["error"] = "Too many requests. Slow down";
|
|
||||||
std::string responseStr = boost::json::serialize(response);
|
|
||||||
|
|
||||||
|
auto sendError = [this, ip](
|
||||||
|
auto error,
|
||||||
|
boost::json::value const& id,
|
||||||
|
boost::json::object const& request) {
|
||||||
|
auto e = RPC::make_error(error);
|
||||||
|
|
||||||
|
if (!id.is_null())
|
||||||
|
e["id"] = id;
|
||||||
|
e["request"] = request;
|
||||||
|
|
||||||
|
auto responseStr = boost::json::serialize(e);
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
|
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
|
||||||
|
|
||||||
dosGuard_.add(*ip, responseStr.size());
|
dosGuard_.add(*ip, responseStr.size());
|
||||||
send(std::move(responseStr));
|
send(std::move(responseStr));
|
||||||
|
};
|
||||||
|
|
||||||
|
boost::json::value raw = [](std::string const&& msg) {
|
||||||
|
try
|
||||||
|
{
|
||||||
|
return boost::json::parse(msg);
|
||||||
|
}
|
||||||
|
catch (std::exception&)
|
||||||
|
{
|
||||||
|
return boost::json::value{nullptr};
|
||||||
|
}
|
||||||
|
}(std::move(msg));
|
||||||
|
|
||||||
|
boost::json::object request;
|
||||||
|
if (!raw.is_object())
|
||||||
|
return sendError(RPC::Error::rpcINVALID_PARAMS, nullptr, request);
|
||||||
|
request = raw.as_object();
|
||||||
|
|
||||||
|
auto id = request.contains("id") ? request.at("id") : nullptr;
|
||||||
|
|
||||||
|
if (!dosGuard_.isOk(*ip))
|
||||||
|
{
|
||||||
|
sendError(RPC::Error::rpcSLOW_DOWN, id, request);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
boost::asio::spawn(
|
if (!queue_.postCoro(
|
||||||
derived().ws().get_executor(),
|
[shared_this = shared_from_this(),
|
||||||
[m = std::move(msg), shared_this = shared_from_this()](
|
r = std::move(request),
|
||||||
boost::asio::yield_context yield) {
|
id](boost::asio::yield_context yield) {
|
||||||
shared_this->handle_request(std::move(m), yield);
|
shared_this->handle_request(std::move(r), id, yield);
|
||||||
});
|
},
|
||||||
|
dosGuard_.isWhiteListed(*ip)))
|
||||||
|
sendError(RPC::Error::rpcTOO_BUSY, id, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
do_read();
|
do_read();
|
||||||
|
|||||||
57
test.py
57
test.py
@@ -475,14 +475,13 @@ async def ledger_data(ip, port, ledger, limit, binary, cursor):
|
|||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
def writeLedgerData(data,filename):
|
def writeLedgerData(state,filename):
|
||||||
print(len(data[0]))
|
print(len(state))
|
||||||
|
|
||||||
with open(filename,'w') as f:
|
with open(filename,'w') as f:
|
||||||
data[0].sort()
|
for k,v in state.items():
|
||||||
data[1].sort()
|
|
||||||
for k,v in zip(data[0],data[1]):
|
|
||||||
f.write(k)
|
f.write(k)
|
||||||
f.write('\n')
|
f.write(':')
|
||||||
f.write(v)
|
f.write(v)
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
|
|
||||||
@@ -490,15 +489,14 @@ def writeLedgerData(data,filename):
|
|||||||
async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1, marker = None):
|
async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1, marker = None):
|
||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
try:
|
try:
|
||||||
blobs = []
|
state = {}
|
||||||
keys = []
|
|
||||||
async with websockets.connect(address,max_size=1000000000) as ws:
|
async with websockets.connect(address,max_size=1000000000) as ws:
|
||||||
if int(limit) < 2048:
|
if int(limit) < 2048:
|
||||||
limit = 2048
|
limit = 2048
|
||||||
while True:
|
while True:
|
||||||
res = {}
|
res = {}
|
||||||
if marker is None:
|
if marker is None:
|
||||||
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":binary, "limit":int(limit)}))
|
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":binary, "limit":int(limit),"out_of_order":True}))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -520,16 +518,15 @@ async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1,
|
|||||||
if binary:
|
if binary:
|
||||||
if typ is None or x["data"][2:6] == typ:
|
if typ is None or x["data"][2:6] == typ:
|
||||||
#print(json.dumps(x))
|
#print(json.dumps(x))
|
||||||
keys.append(x["index"])
|
state[x["index"]] = x["data"]
|
||||||
else:
|
else:
|
||||||
if typ is None or x["LedgerEntryType"] == typ:
|
if typ is None or x["LedgerEntryType"] == typ:
|
||||||
blobs.append(x)
|
state[x["index"]] = x
|
||||||
keys.append(x["index"])
|
if count != -1 and len(state) > count:
|
||||||
if count != -1 and len(keys) > count:
|
|
||||||
print("stopping early")
|
print("stopping early")
|
||||||
print(len(keys))
|
print(len(state))
|
||||||
print("done")
|
print("done")
|
||||||
return (keys,blobs)
|
return state
|
||||||
if "cursor" in res:
|
if "cursor" in res:
|
||||||
marker = res["cursor"]
|
marker = res["cursor"]
|
||||||
print(marker)
|
print(marker)
|
||||||
@@ -538,7 +535,7 @@ async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1,
|
|||||||
print(marker)
|
print(marker)
|
||||||
else:
|
else:
|
||||||
print("done")
|
print("done")
|
||||||
return (keys, blobs)
|
return state
|
||||||
|
|
||||||
|
|
||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
@@ -574,7 +571,19 @@ def compare_book_offers(aldous, p2p):
|
|||||||
print("offers match!")
|
print("offers match!")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
async def book_changes(ip, port, ledger):
|
||||||
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
|
try:
|
||||||
|
async with websockets.connect(address) as ws:
|
||||||
|
await ws.send(json.dumps({
|
||||||
|
"command" : "book_changes",
|
||||||
|
"ledger_index" : ledger
|
||||||
|
}))
|
||||||
|
res = json.loads(await ws.recv())
|
||||||
|
print(json.dumps(res, indent=4, sort_keys=True))
|
||||||
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
async def book_offerses(ip, port, ledger, books, numCalls):
|
async def book_offerses(ip, port, ledger, books, numCalls):
|
||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
random.seed()
|
random.seed()
|
||||||
@@ -792,6 +801,7 @@ async def fee(ip, port):
|
|||||||
print(json.dumps(res,indent=4,sort_keys=True))
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
async def server_info(ip, port):
|
async def server_info(ip, port):
|
||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
try:
|
try:
|
||||||
@@ -971,7 +981,7 @@ async def verifySubscribe(ip,clioPort,ripdPort):
|
|||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
||||||
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_offerses","ledger_diff","perf","fee","server_info", "gaps","subscribe","verify_subscribe","call"])
|
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_changes","book_offerses","ledger_diff","perf","fee","server_info", "gaps","subscribe","verify_subscribe","call"])
|
||||||
|
|
||||||
parser.add_argument('--ip', default='127.0.0.1')
|
parser.add_argument('--ip', default='127.0.0.1')
|
||||||
parser.add_argument('--port', default='8080')
|
parser.add_argument('--port', default='8080')
|
||||||
@@ -1159,14 +1169,17 @@ def run(args):
|
|||||||
end = datetime.datetime.now().timestamp()
|
end = datetime.datetime.now().timestamp()
|
||||||
num = int(args.numRunners) * int(args.numCalls)
|
num = int(args.numRunners) * int(args.numCalls)
|
||||||
print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second")
|
print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second")
|
||||||
|
|
||||||
|
elif args.action == "book_changes":
|
||||||
|
asyncio.get_event_loop().run_until_complete(book_changes(args.ip, args.port, int(args.ledger)))
|
||||||
|
|
||||||
elif args.action == "book_offerses":
|
elif args.action == "book_offerses":
|
||||||
books = getBooks(args.filename)
|
books = getBooks(args.filename)
|
||||||
async def runner():
|
async def runner():
|
||||||
|
|
||||||
tasks = []
|
tasks = []
|
||||||
for x in range(0,int(args.numRunners)):
|
for x in range(0, int(args.numRunners)):
|
||||||
tasks.append(asyncio.create_task(book_offerses(args.ip, args.port,int(args.ledger),books, int(args.numCalls))))
|
tasks.append(asyncio.create_task(book_offerses(args.ip, args.port, int(args.ledger), books, int(args.numCalls))))
|
||||||
for t in tasks:
|
for t in tasks:
|
||||||
await t
|
await t
|
||||||
|
|
||||||
@@ -1263,7 +1276,7 @@ def run(args):
|
|||||||
|
|
||||||
res = asyncio.get_event_loop().run_until_complete(
|
res = asyncio.get_event_loop().run_until_complete(
|
||||||
ledger_data_full(args.ip, args.port, args.ledger, bool(args.binary), args.limit,args.type, int(args.count), args.marker))
|
ledger_data_full(args.ip, args.port, args.ledger, bool(args.binary), args.limit,args.type, int(args.count), args.marker))
|
||||||
print(len(res[0]))
|
print(len(res))
|
||||||
if args.verify:
|
if args.verify:
|
||||||
writeLedgerData(res,args.filename)
|
writeLedgerData(res,args.filename)
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <backend/DBHelpers.h>
|
#include <backend/DBHelpers.h>
|
||||||
|
#include <etl/ReportingETL.h>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
#include <rpc/RPCHelpers.h>
|
#include <rpc/RPCHelpers.h>
|
||||||
|
|
||||||
@@ -296,6 +297,122 @@ TEST(BackendTest, Basic)
|
|||||||
"E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C"
|
"E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C"
|
||||||
"004F06";
|
"004F06";
|
||||||
|
|
||||||
|
// An NFTokenMint tx
|
||||||
|
std::string nftTxnHex =
|
||||||
|
"1200192200000008240011CC9B201B001F71D6202A0000000168400000"
|
||||||
|
"000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E"
|
||||||
|
"4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F"
|
||||||
|
"C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA"
|
||||||
|
"34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F"
|
||||||
|
"677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16"
|
||||||
|
"DE3538F248662FC73C";
|
||||||
|
|
||||||
|
std::string nftTxnMeta =
|
||||||
|
"201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E"
|
||||||
|
"07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0"
|
||||||
|
"22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00"
|
||||||
|
"0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000"
|
||||||
|
"0006751868747470733A2F2F677265677765697362726F642E636F6DE1"
|
||||||
|
"EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1"
|
||||||
|
"C200000028751868747470733A2F2F677265677765697362726F642E63"
|
||||||
|
"6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C"
|
||||||
|
"9808B6B90000001D751868747470733A2F2F677265677765697362726F"
|
||||||
|
"642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866"
|
||||||
|
"2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973"
|
||||||
|
"62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538"
|
||||||
|
"F248662FC73CA048C0A300000007751868747470733A2F2F6772656777"
|
||||||
|
"65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16"
|
||||||
|
"DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772"
|
||||||
|
"65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0"
|
||||||
|
"22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F"
|
||||||
|
"2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21"
|
||||||
|
"D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470"
|
||||||
|
"733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203"
|
||||||
|
"F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868"
|
||||||
|
"747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800"
|
||||||
|
"006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A"
|
||||||
|
"751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A"
|
||||||
|
"000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00"
|
||||||
|
"00001F751868747470733A2F2F677265677765697362726F642E636F6D"
|
||||||
|
"E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4"
|
||||||
|
"5DAE00000014751868747470733A2F2F677265677765697362726F642E"
|
||||||
|
"636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7"
|
||||||
|
"3CCE1462A500000009751868747470733A2F2F67726567776569736272"
|
||||||
|
"6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248"
|
||||||
|
"662FC73CD89A24C70000002B751868747470733A2F2F67726567776569"
|
||||||
|
"7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35"
|
||||||
|
"38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567"
|
||||||
|
"7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB"
|
||||||
|
"16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67"
|
||||||
|
"7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6"
|
||||||
|
"E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A"
|
||||||
|
"2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C"
|
||||||
|
"21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474"
|
||||||
|
"70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062"
|
||||||
|
"03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518"
|
||||||
|
"68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008"
|
||||||
|
"00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000"
|
||||||
|
"0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1"
|
||||||
|
"E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C"
|
||||||
|
"662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB"
|
||||||
|
"16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67"
|
||||||
|
"7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6"
|
||||||
|
"E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A"
|
||||||
|
"2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C"
|
||||||
|
"21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474"
|
||||||
|
"70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062"
|
||||||
|
"03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518"
|
||||||
|
"68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008"
|
||||||
|
"00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000"
|
||||||
|
"07751868747470733A2F2F677265677765697362726F642E636F6DE1EC"
|
||||||
|
"5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5"
|
||||||
|
"00000029751868747470733A2F2F677265677765697362726F642E636F"
|
||||||
|
"6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE"
|
||||||
|
"EE87B80000001E751868747470733A2F2F677265677765697362726F64"
|
||||||
|
"2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F"
|
||||||
|
"C73CB30E8CAF00000013751868747470733A2F2F677265677765697362"
|
||||||
|
"726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2"
|
||||||
|
"48662FC73CB72E91A200000008751868747470733A2F2F677265677765"
|
||||||
|
"697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE"
|
||||||
|
"3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265"
|
||||||
|
"677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022"
|
||||||
|
"CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F"
|
||||||
|
"677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5"
|
||||||
|
"D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073"
|
||||||
|
"3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4"
|
||||||
|
"9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874"
|
||||||
|
"7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000"
|
||||||
|
"6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75"
|
||||||
|
"1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00"
|
||||||
|
"0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000"
|
||||||
|
"0020751868747470733A2F2F677265677765697362726F642E636F6DE1"
|
||||||
|
"EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E"
|
||||||
|
"B100000015751868747470733A2F2F677265677765697362726F642E63"
|
||||||
|
"6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C"
|
||||||
|
"E4FA33A40000000A751868747470733A2F2F677265677765697362726F"
|
||||||
|
"642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866"
|
||||||
|
"2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973"
|
||||||
|
"62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538"
|
||||||
|
"F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777"
|
||||||
|
"65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16"
|
||||||
|
"DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772"
|
||||||
|
"65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0"
|
||||||
|
"22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F"
|
||||||
|
"2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71"
|
||||||
|
"B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435"
|
||||||
|
"40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94"
|
||||||
|
"883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3"
|
||||||
|
"ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000"
|
||||||
|
"066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866"
|
||||||
|
"2FC73CE1E1F1031000";
|
||||||
|
std::string nftTxnHashHex =
|
||||||
|
"6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757"
|
||||||
|
"E9284E";
|
||||||
|
ripple::uint256 nftID;
|
||||||
|
EXPECT_TRUE(
|
||||||
|
nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662"
|
||||||
|
"FC73CEF7FF5C60000002C"));
|
||||||
|
|
||||||
std::string metaBlob = hexStringToBinaryString(metaHex);
|
std::string metaBlob = hexStringToBinaryString(metaHex);
|
||||||
std::string txnBlob = hexStringToBinaryString(txnHex);
|
std::string txnBlob = hexStringToBinaryString(txnHex);
|
||||||
std::string hashBlob = hexStringToBinaryString(hashHex);
|
std::string hashBlob = hexStringToBinaryString(hashHex);
|
||||||
@@ -304,6 +421,10 @@ TEST(BackendTest, Basic)
|
|||||||
hexStringToBinaryString(accountIndexHex);
|
hexStringToBinaryString(accountIndexHex);
|
||||||
std::vector<ripple::AccountID> affectedAccounts;
|
std::vector<ripple::AccountID> affectedAccounts;
|
||||||
|
|
||||||
|
std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex);
|
||||||
|
std::string nftTxnMetaBlob =
|
||||||
|
hexStringToBinaryString(nftTxnMeta);
|
||||||
|
|
||||||
{
|
{
|
||||||
backend->startWrites();
|
backend->startWrites();
|
||||||
lgrInfoNext.seq = lgrInfoNext.seq + 1;
|
lgrInfoNext.seq = lgrInfoNext.seq + 1;
|
||||||
@@ -322,23 +443,62 @@ TEST(BackendTest, Basic)
|
|||||||
{
|
{
|
||||||
affectedAccounts.push_back(a);
|
affectedAccounts.push_back(a);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<AccountTransactionsData> accountTxData;
|
std::vector<AccountTransactionsData> accountTxData;
|
||||||
accountTxData.emplace_back(txMeta, hash256, journal);
|
accountTxData.emplace_back(txMeta, hash256, journal);
|
||||||
|
|
||||||
|
ripple::uint256 nftHash256;
|
||||||
|
EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex));
|
||||||
|
ripple::TxMeta nftTxMeta{
|
||||||
|
nftHash256, lgrInfoNext.seq, nftTxnMetaBlob};
|
||||||
|
ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()};
|
||||||
|
ripple::STTx sttx{it};
|
||||||
|
auto const [parsedNFTTxsRef, parsedNFT] =
|
||||||
|
getNFTData(nftTxMeta, sttx);
|
||||||
|
// need to copy the nft txns so we can std::move later
|
||||||
|
std::vector<NFTTransactionsData> parsedNFTTxs;
|
||||||
|
parsedNFTTxs.insert(
|
||||||
|
parsedNFTTxs.end(),
|
||||||
|
parsedNFTTxsRef.begin(),
|
||||||
|
parsedNFTTxsRef.end());
|
||||||
|
EXPECT_EQ(parsedNFTTxs.size(), 1);
|
||||||
|
EXPECT_TRUE(parsedNFT.has_value());
|
||||||
|
EXPECT_EQ(parsedNFT->tokenID, nftID);
|
||||||
|
std::vector<NFTsData> nftData;
|
||||||
|
nftData.push_back(*parsedNFT);
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
backend->writeTransaction(
|
backend->writeTransaction(
|
||||||
std::move(std::string{hashBlob}),
|
std::string{hashBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
lgrInfoNext.closeTime.time_since_epoch().count(),
|
lgrInfoNext.closeTime.time_since_epoch().count(),
|
||||||
std::move(std::string{txnBlob}),
|
std::string{txnBlob},
|
||||||
std::move(std::string{metaBlob}));
|
std::string{metaBlob});
|
||||||
backend->writeAccountTransactions(std::move(accountTxData));
|
backend->writeAccountTransactions(std::move(accountTxData));
|
||||||
|
|
||||||
|
// NFT writing not yet implemented for pg
|
||||||
|
if (config == cassandraConfig)
|
||||||
|
{
|
||||||
|
backend->writeNFTs(std::move(nftData));
|
||||||
|
backend->writeNFTTransactions(std::move(parsedNFTTxs));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
EXPECT_THROW(
|
||||||
|
{ backend->writeNFTs(std::move(nftData)); },
|
||||||
|
std::runtime_error);
|
||||||
|
EXPECT_THROW(
|
||||||
|
{
|
||||||
|
backend->writeNFTTransactions(
|
||||||
|
std::move(parsedNFTTxs));
|
||||||
|
},
|
||||||
|
std::runtime_error);
|
||||||
|
}
|
||||||
|
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{accountBlob}));
|
std::string{accountBlob});
|
||||||
backend->writeSuccessor(
|
backend->writeSuccessor(
|
||||||
uint256ToString(Backend::firstKey),
|
uint256ToString(Backend::firstKey),
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
@@ -384,6 +544,34 @@ TEST(BackendTest, Basic)
|
|||||||
EXPECT_FALSE(cursor);
|
EXPECT_FALSE(cursor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NFT fetching not yet implemented for pg
|
||||||
|
if (config == cassandraConfig)
|
||||||
|
{
|
||||||
|
auto nft =
|
||||||
|
backend->fetchNFT(nftID, lgrInfoNext.seq, yield);
|
||||||
|
EXPECT_TRUE(nft.has_value());
|
||||||
|
auto [nftTxns, cursor] = backend->fetchNFTTransactions(
|
||||||
|
nftID, 100, true, {}, yield);
|
||||||
|
EXPECT_EQ(nftTxns.size(), 1);
|
||||||
|
EXPECT_EQ(nftTxns[0], nftTxns[0]);
|
||||||
|
EXPECT_FALSE(cursor);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
EXPECT_THROW(
|
||||||
|
{
|
||||||
|
backend->fetchNFT(
|
||||||
|
nftID, lgrInfoNext.seq, yield);
|
||||||
|
},
|
||||||
|
std::runtime_error);
|
||||||
|
EXPECT_THROW(
|
||||||
|
{
|
||||||
|
backend->fetchNFTTransactions(
|
||||||
|
nftID, 100, true, {}, yield);
|
||||||
|
},
|
||||||
|
std::runtime_error);
|
||||||
|
}
|
||||||
|
|
||||||
ripple::uint256 key256;
|
ripple::uint256 key256;
|
||||||
EXPECT_TRUE(key256.parseHex(accountIndexHex));
|
EXPECT_TRUE(key256.parseHex(accountIndexHex));
|
||||||
auto obj = backend->fetchLedgerObject(
|
auto obj = backend->fetchLedgerObject(
|
||||||
@@ -417,16 +605,15 @@ TEST(BackendTest, Basic)
|
|||||||
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
std::shuffle(
|
std::shuffle(
|
||||||
accountBlob.begin(),
|
accountBlob.begin(),
|
||||||
accountBlob.end(),
|
accountBlob.end(),
|
||||||
std::default_random_engine(seed));
|
std::default_random_engine(seed));
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{accountBlob}));
|
std::string{accountBlob});
|
||||||
|
|
||||||
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
||||||
}
|
}
|
||||||
@@ -480,12 +667,11 @@ TEST(BackendTest, Basic)
|
|||||||
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{}));
|
std::string{});
|
||||||
backend->writeSuccessor(
|
backend->writeSuccessor(
|
||||||
uint256ToString(Backend::firstKey),
|
uint256ToString(Backend::firstKey),
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
@@ -527,9 +713,8 @@ TEST(BackendTest, Basic)
|
|||||||
EXPECT_FALSE(obj);
|
EXPECT_FALSE(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto generateObjects = [seed](
|
auto generateObjects = [](size_t numObjects,
|
||||||
size_t numObjects,
|
uint32_t ledgerSequence) {
|
||||||
uint32_t ledgerSequence) {
|
|
||||||
std::vector<std::pair<std::string, std::string>> res{
|
std::vector<std::pair<std::string, std::string>> res{
|
||||||
numObjects};
|
numObjects};
|
||||||
ripple::uint256 key;
|
ripple::uint256 key;
|
||||||
@@ -551,26 +736,26 @@ TEST(BackendTest, Basic)
|
|||||||
}
|
}
|
||||||
return objs;
|
return objs;
|
||||||
};
|
};
|
||||||
auto generateTxns =
|
auto generateTxns = [](size_t numTxns,
|
||||||
[seed](size_t numTxns, uint32_t ledgerSequence) {
|
uint32_t ledgerSequence) {
|
||||||
std::vector<
|
std::vector<
|
||||||
std::tuple<std::string, std::string, std::string>>
|
std::tuple<std::string, std::string, std::string>>
|
||||||
res{numTxns};
|
res{numTxns};
|
||||||
ripple::uint256 base;
|
ripple::uint256 base;
|
||||||
base = ledgerSequence * 100000;
|
base = ledgerSequence * 100000;
|
||||||
for (auto& blob : res)
|
for (auto& blob : res)
|
||||||
{
|
{
|
||||||
++base;
|
++base;
|
||||||
std::string hashStr{
|
std::string hashStr{
|
||||||
(const char*)base.data(), base.size()};
|
(const char*)base.data(), base.size()};
|
||||||
std::string txnStr =
|
std::string txnStr =
|
||||||
"tx" + std::to_string(ledgerSequence) + hashStr;
|
"tx" + std::to_string(ledgerSequence) + hashStr;
|
||||||
std::string metaStr = "meta" +
|
std::string metaStr =
|
||||||
std::to_string(ledgerSequence) + hashStr;
|
"meta" + std::to_string(ledgerSequence) + hashStr;
|
||||||
blob = std::make_tuple(hashStr, txnStr, metaStr);
|
blob = std::make_tuple(hashStr, txnStr, metaStr);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
};
|
};
|
||||||
auto generateAccounts = [](uint32_t ledgerSequence,
|
auto generateAccounts = [](uint32_t ledgerSequence,
|
||||||
uint32_t numAccounts) {
|
uint32_t numAccounts) {
|
||||||
std::vector<ripple::AccountID> accounts;
|
std::vector<ripple::AccountID> accounts;
|
||||||
@@ -635,7 +820,7 @@ TEST(BackendTest, Basic)
|
|||||||
backend->startWrites();
|
backend->startWrites();
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo)));
|
lgrInfo, ledgerInfoToBinaryString(lgrInfo));
|
||||||
for (auto [hash, txn, meta] : txns)
|
for (auto [hash, txn, meta] : txns)
|
||||||
{
|
{
|
||||||
backend->writeTransaction(
|
backend->writeTransaction(
|
||||||
@@ -729,8 +914,7 @@ TEST(BackendTest, Basic)
|
|||||||
for (auto [account, data] : accountTx)
|
for (auto [account, data] : accountTx)
|
||||||
{
|
{
|
||||||
std::vector<Backend::TransactionAndMetadata> retData;
|
std::vector<Backend::TransactionAndMetadata> retData;
|
||||||
std::optional<Backend::AccountTransactionsCursor>
|
std::optional<Backend::TransactionsCursor> cursor;
|
||||||
cursor;
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
uint32_t limit = 10;
|
uint32_t limit = 10;
|
||||||
@@ -1854,12 +2038,11 @@ TEST(Backend, cacheIntegration)
|
|||||||
lgrInfoNext.hash++;
|
lgrInfoNext.hash++;
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{accountBlob}));
|
std::string{accountBlob});
|
||||||
auto key =
|
auto key =
|
||||||
ripple::uint256::fromVoidChecked(accountIndexBlob);
|
ripple::uint256::fromVoidChecked(accountIndexBlob);
|
||||||
backend->cache().update(
|
backend->cache().update(
|
||||||
@@ -1921,8 +2104,7 @@ TEST(Backend, cacheIntegration)
|
|||||||
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
std::shuffle(
|
std::shuffle(
|
||||||
accountBlob.begin(),
|
accountBlob.begin(),
|
||||||
accountBlob.end(),
|
accountBlob.end(),
|
||||||
@@ -1933,9 +2115,9 @@ TEST(Backend, cacheIntegration)
|
|||||||
{{*key, {accountBlob.begin(), accountBlob.end()}}},
|
{{*key, {accountBlob.begin(), accountBlob.end()}}},
|
||||||
lgrInfoNext.seq);
|
lgrInfoNext.seq);
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{accountBlob}));
|
std::string{accountBlob});
|
||||||
|
|
||||||
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
||||||
}
|
}
|
||||||
@@ -1983,15 +2165,14 @@ TEST(Backend, cacheIntegration)
|
|||||||
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
||||||
|
|
||||||
backend->writeLedger(
|
backend->writeLedger(
|
||||||
lgrInfoNext,
|
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
|
||||||
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
|
||||||
auto key =
|
auto key =
|
||||||
ripple::uint256::fromVoidChecked(accountIndexBlob);
|
ripple::uint256::fromVoidChecked(accountIndexBlob);
|
||||||
backend->cache().update({{*key, {}}}, lgrInfoNext.seq);
|
backend->cache().update({{*key, {}}}, lgrInfoNext.seq);
|
||||||
backend->writeLedgerObject(
|
backend->writeLedgerObject(
|
||||||
std::move(std::string{accountIndexBlob}),
|
std::string{accountIndexBlob},
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
std::move(std::string{}));
|
std::string{});
|
||||||
backend->writeSuccessor(
|
backend->writeSuccessor(
|
||||||
uint256ToString(Backend::firstKey),
|
uint256ToString(Backend::firstKey),
|
||||||
lgrInfoNext.seq,
|
lgrInfoNext.seq,
|
||||||
@@ -2027,9 +2208,8 @@ TEST(Backend, cacheIntegration)
|
|||||||
EXPECT_FALSE(obj);
|
EXPECT_FALSE(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto generateObjects = [seed](
|
auto generateObjects = [](size_t numObjects,
|
||||||
size_t numObjects,
|
uint32_t ledgerSequence) {
|
||||||
uint32_t ledgerSequence) {
|
|
||||||
std::vector<std::pair<std::string, std::string>> res{
|
std::vector<std::pair<std::string, std::string>> res{
|
||||||
numObjects};
|
numObjects};
|
||||||
ripple::uint256 key;
|
ripple::uint256 key;
|
||||||
|
|||||||
Reference in New Issue
Block a user