Initial Commit

This commit is contained in:
CJ Cobb
2020-12-14 20:39:54 -05:00
committed by ledhed2222
commit 9ef14f919e
172 changed files with 38825 additions and 0 deletions

87
.clang-format Normal file
View File

@@ -0,0 +1,87 @@
---
Language: Cpp
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
AlignOperands: false
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: All
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
BreakBeforeBinaryOperators: false
BreakBeforeBraces: Custom
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^<(BeastConfig)'
Priority: 0
- Regex: '^<(ripple)/'
Priority: 2
- Regex: '^<(boost)/'
Priority: 3
- Regex: '.*'
Priority: 4
IncludeIsMainRegex: '$'
IndentCaseLabels: true
IndentFunctionDeclarationAfterType: false
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: false
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
build/

9
.git-blame-ignore-revs Normal file
View File

@@ -0,0 +1,9 @@
# These might help too
# Mark any lines that have had a commit skipped using --ignore-rev with a `?`
#git config --global blame.markIgnoredLines true
# Mark any lines that were added in a skipped commit and can not be attributed with a `*`
#git config --global blame.markUnblamableLines true
# clang-format
e41150248a97e4bdc1cf21b54650c4bb7c63928e
2e542e7b0d94451a933c88778461cc8d3d7e6417

20
.githooks/ensure_release_tag Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Pushing a release branch requires an annotated tag at the released commit
branch=$(git rev-parse --abbrev-ref HEAD)
if [[ $branch =~ master ]]; then
# check if HEAD commit is tagged
if ! git describe --exact-match HEAD; then
echo "Commits to master must be tagged"
exit 1
fi
elif [[ $branch =~ release/* ]]; then
IFS=/ read -r branch rel_ver <<< ${branch}
tag=$(git describe --tags --abbrev=0)
if [[ "${rel_ver}" != "${tag}" ]]; then
echo "release/${rel_ver} branches must have annotated tag ${rel_ver}"
echo "git tag -am\"${rel_ver}\" ${rel_ver}"
exit 1
fi
fi

27
.githooks/pre-commit Executable file
View File

@@ -0,0 +1,27 @@
#!/bin/bash
exec 1>&2
# paths to check and re-format
sources="src unittests"
formatter="clang-format -i"
first=$(git diff $sources)
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
second=$(git diff $sources)
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
if [ "$changes" != "0" ]; then
cat <<\EOF
WARNING
-----------------------------------------------------------------------------
Automatically re-formatted code with `clang-format` - commit was aborted.
Please manually add any updated files and commit again.
-----------------------------------------------------------------------------
EOF
exit 1
fi
.githooks/ensure_release_tag

13
.github/actions/lint/action.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
runs:
using: composite
steps:
# Github's ubuntu-20.04 image already has clang-format-11 installed
- run: |
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
shell: bash
- name: Check for differences
id: assert
shell: bash
run: |
git diff --color --exit-code | tee "clang-format.patch"

6
.github/actions/test/Dockerfile vendored Normal file
View File

@@ -0,0 +1,6 @@
FROM cassandra:4.0.4
RUN apt-get update && apt-get install -y postgresql
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

8
.github/actions/test/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
pg_ctlcluster 12 main start
su postgres -c"psql -c\"alter user postgres with password 'postgres'\""
su cassandra -c "/opt/cassandra/bin/cassandra -R"
sleep 90
chmod +x ./clio_tests
./clio_tests

196
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,196 @@
name: Build Clio
on:
push:
branches: [master, release/*, develop, develop-next]
pull_request:
branches: [master, release/*, develop, develop-next]
workflow_dispatch:
jobs:
lint:
name: Lint
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Run clang-format
uses: ./.github/actions/lint
build_clio:
name: Build Clio
runs-on: [self-hosted, Linux, heavy]
needs: lint
strategy:
fail-fast: false
matrix:
type:
- suffix: deb
image: rippleci/clio-dpkg-builder:2022-09-17
script: dpkg
- suffix: rpm
image: rippleci/clio-rpm-builder:2022-09-17
script: rpm
container:
image: ${{ matrix.type.image }}
steps:
- uses: actions/checkout@v3
with:
path: clio
fetch-depth: 0
- name: Clone Clio packaging repo
uses: actions/checkout@v3
with:
path: clio-packages
repository: XRPLF/clio-packages
ref: main
- name: Build
shell: bash
run: |
export CLIO_ROOT=$(realpath clio)
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
source /opt/rh/devtoolset-11/enable
fi
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
cmake --build clio-packages/build --parallel $(nproc)
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
- name: Artifact packages
uses: actions/upload-artifact@v3
with:
name: clio_${{ matrix.type.suffix }}_packages
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
- name: Artifact clio_tests
uses: actions/upload-artifact@v3
with:
name: clio_tests-${{ matrix.type.suffix }}
path: ${{ github.workspace }}/clio_tests
build_dev:
name: Build on Mac/Clang14 and run tests
needs: lint
continue-on-error: false
runs-on: macos-12
steps:
- uses: actions/checkout@v3
with:
path: clio
- name: Check Boost cache
id: boost
uses: actions/cache@v3
with:
path: boost
key: ${{ runner.os }}-boost
- name: Build boost
if: steps.boost.outputs.cache-hit != 'true'
run: |
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
tar zxf boost_1_77_0.tar.gz
mv boost_1_77_0 boost
cd boost
./bootstrap.sh
./b2 cxxflags="-std=c++14"
- name: install deps
run: |
brew install pkg-config protobuf openssl ninja cassandra-cpp-driver bison
- name: Build clio
run: |
export BOOST_ROOT=$(pwd)/boost
cd clio
cmake -B build
if ! cmake --build build -j$(nproc); then
echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY
exit 1
fi
- name: Run Test
run: |
cd clio/build
./clio_tests --gtest_filter="-Backend*"
test_clio:
name: Test Clio
runs-on: [self-hosted, Linux]
needs: build_clio
strategy:
fail-fast: false
matrix:
suffix: [rpm, deb]
steps:
- uses: actions/checkout@v3
- name: Get clio_tests artifact
uses: actions/download-artifact@v3
with:
name: clio_tests-${{ matrix.suffix }}
- name: Run tests
timeout-minutes: 10
uses: ./.github/actions/test
code_coverage:
name: Build on Linux and code coverage
needs: lint
continue-on-error: false
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
with:
path: clio
- name: Check Boost cache
id: boost
uses: actions/cache@v3
with:
path: boost
key: ${{ runner.os }}-boost
- name: Build boost
if: steps.boost.outputs.cache-hit != 'true'
run: |
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
tar zxf boost_1_77_0.tar.gz
mv boost_1_77_0 boost
cd boost
./bootstrap.sh
./b2
- name: install deps
run: |
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr
- name: Build clio
run: |
export BOOST_ROOT=$(pwd)/boost
cd clio
cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-Backend*"'
if ! cmake --build build -j$(nproc); then
echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
exit 1
fi
cd build
make clio_tests-ccov
- name: Code Coverage Summary Report
uses: irongut/CodeCoverageSummary@v1.2.0
with:
filename: clio/build/clio_tests-gcc-cov/out.xml
badge: true
output: both
format: markdown
- name: Save PR number and ccov report
run: |
mkdir -p ./UnitTestCoverage
echo ${{ github.event.number }} > ./UnitTestCoverage/NR
cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html
cp code-coverage-results.md ./UnitTestCoverage/out.md
- uses: actions/upload-artifact@v2
with:
name: UnitTestCoverage
path: UnitTestCoverage/

6
.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
*clio*.log
build*/
.vscode
.python-version
config.json
src/main/impl/Build.cpp

39
CMake/Build.cpp.in Normal file
View File

@@ -0,0 +1,39 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <main/Build.h>
namespace Build {
static constexpr char versionString[] = "@VERSION@";
std::string const&
getClioVersionString()
{
static std::string const value = versionString;
return value;
}
std::string const&
getClioFullVersionString()
{
static std::string const value = "clio-" + getClioVersionString();
return value;
}
} // namespace Build

33
CMake/ClioVersion.cmake Normal file
View File

@@ -0,0 +1,33 @@
#[===================================================================[
write version to source
#]===================================================================]
find_package(Git REQUIRED)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
set(GIT_COMMAND branch --show-current)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
if(BRANCH STREQUAL "")
set(BRANCH "dev")
endif()
if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
set(VERSION "${DATE}-${BRANCH}-${REV}")
else()
set(GIT_COMMAND describe --tags)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
set(VERSION "${TAG_VERSION}-${REV}")
endif()
if(CMAKE_BUILD_TYPE MATCHES Debug)
set(VERSION "${VERSION}+DEBUG")
endif()
message(STATUS "Build version: ${VERSION}")
set(clio_version "${VERSION}")
configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)

126
CMake/coverage.cmake Normal file
View File

@@ -0,0 +1,126 @@
# call add_converage(module_name) to add coverage targets for the given module
function(add_converage module)
if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
message("[Coverage] Building with llvm Code Coverage Tools")
# Using llvm gcov ; llvm install by xcode
set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
message(FATAL_ERROR "llvm-cov not found! Aborting.")
endif()
# set Flags
target_compile_options(${module} PRIVATE -fprofile-instr-generate
-fcoverage-mapping)
target_link_options(${module} PUBLIC -fprofile-instr-generate
-fcoverage-mapping)
target_compile_options(clio PRIVATE -fprofile-instr-generate
-fcoverage-mapping)
target_link_options(clio PUBLIC -fprofile-instr-generate
-fcoverage-mapping)
# llvm-cov
add_custom_target(
${module}-ccov-preprocessing
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
${module}.profdata
DEPENDS ${module})
add_custom_target(
${module}-ccov-show
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata -show-line-counts-or-regions
DEPENDS ${module}-ccov-preprocessing)
# add summary for CI parse
add_custom_target(
${module}-ccov-report
COMMAND
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata
-ignore-filename-regex=".*_makefiles|.*unittests"
-show-region-summary=false
DEPENDS ${module}-ccov-preprocessing)
# exclude libs and unittests self
add_custom_target(
${module}-ccov
COMMAND
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata -show-line-counts-or-regions
-output-dir=${module}-llvm-cov -format="html"
-ignore-filename-regex=".*_makefiles|.*unittests" > /dev/null 2>&1
DEPENDS ${module}-ccov-preprocessing)
add_custom_command(
TARGET ${module}-ccov
POST_BUILD
COMMENT
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
)
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}"
MATCHES "GNU")
message("[Coverage] Building with Gcc Code Coverage Tools")
find_program(GCOV_PATH gcov)
if(NOT GCOV_PATH)
message(FATAL_ERROR "gcov not found! Aborting...")
endif() # NOT GCOV_PATH
find_program(GCOVR_PATH gcovr)
if(NOT GCOVR_PATH)
message(FATAL_ERROR "gcovr not found! Aborting...")
endif() # NOT GCOVR_PATH
set(COV_OUTPUT_PATH ${module}-gcc-cov)
target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage
-fPIC)
target_link_libraries(${module} PRIVATE gcov)
target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage
-fPIC)
target_link_libraries(clio PRIVATE gcov)
# this target is used for CI as well generate the summary out.xml will send
# to github action to generate markdown, we can paste it to comments or
# readme
add_custom_target(
${module}-ccov
COMMAND ${module} ${TEST_PARAMETER}
COMMAND rm -rf ${COV_OUTPUT_PATH}
COMMAND mkdir ${COV_OUTPUT_PATH}
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} -x
${COV_OUTPUT_PATH}/out.xml --exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
--html ${COV_OUTPUT_PATH}/report.html
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMENT "Running gcovr to produce Cobertura code coverage report.")
# generate the detail report
add_custom_target(
${module}-ccov-report
COMMAND ${module} ${TEST_PARAMETER}
COMMAND rm -rf ${COV_OUTPUT_PATH}
COMMAND mkdir ${COV_OUTPUT_PATH}
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
--html-details ${COV_OUTPUT_PATH}/index.html
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMENT "Running gcovr to produce Cobertura code coverage report.")
add_custom_command(
TARGET ${module}-ccov-report
POST_BUILD
COMMENT
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
)
else()
message(FATAL_ERROR "Complier not support yet")
endif()
endfunction()

6
CMake/deps/Boost.cmake Normal file
View File

@@ -0,0 +1,6 @@
set(Boost_USE_STATIC_LIBS ON)
set(Boost_USE_STATIC_RUNTIME ON)
find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED)
target_link_libraries(clio PUBLIC ${Boost_LIBRARIES})

View File

@@ -0,0 +1,24 @@
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
From: CJ Cobb <ccobb@ripple.com>
Date: Wed, 10 Aug 2022 12:30:01 -0400
Subject: [PATCH] Remove bitset operator !=
---
src/ripple/protocol/Feature.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
index b3ecb099b..6424be411 100644
--- a/src/ripple/protocol/Feature.h
+++ b/src/ripple/protocol/Feature.h
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
public:
using base::bitset;
using base::operator==;
- using base::operator!=;
using base::all;
using base::any;
--
2.32.0

View File

@@ -0,0 +1,11 @@
include(CheckIncludeFileCXX)
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
if(SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
endif()
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
endif()

153
CMake/deps/cassandra.cmake Normal file
View File

@@ -0,0 +1,153 @@
find_package(ZLIB REQUIRED)
find_library(cassandra NAMES cassandra)
if(NOT cassandra)
message("System installed Cassandra cpp driver not found. Will build")
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
if(NOT zlib)
message("zlib not found. will build")
add_library(zlib STATIC IMPORTED GLOBAL)
ExternalProject_Add(zlib_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/madler/zlib.git
GIT_TAG v1.2.12
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
)
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
ExternalProject_Get_Property (zlib_src BINARY_DIR)
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
set_target_properties (zlib PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(zlib zlib_src)
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
endif()
find_library(krb5 NAMES krb5-dev libkrb5-dev)
if(NOT krb5)
message("krb5 not found. will build")
add_library(krb5 STATIC IMPORTED GLOBAL)
ExternalProject_Add(krb5_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/krb5/krb5.git
GIT_TAG krb5-1.20
UPDATE_COMMAND ""
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
BUILD_IN_SOURCE 1
BUILD_COMMAND make
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
)
message(${ep_lib_prefix}/krb5.a)
message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a)
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
ExternalProject_Get_Property (krb5_src BINARY_DIR)
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
set_target_properties (krb5 PROPERTIES
IMPORTED_LOCATION
${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(krb5 krb5_src)
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
endif()
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
if(NOT libuv1)
message("libuv1 not found, will build")
add_library(libuv1 STATIC IMPORTED GLOBAL)
ExternalProject_Add(libuv_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/libuv/libuv.git
GIT_TAG v1.44.1
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
)
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
ExternalProject_Get_Property (libuv_src BINARY_DIR)
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
set_target_properties (libuv1 PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(libuv1 libuv_src)
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
endif()
add_library (cassandra STATIC IMPORTED GLOBAL)
ExternalProject_Add(cassandra_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
GIT_TAG 2.16.2
CMAKE_ARGS
-DLIBUV_ROOT_DIR=${BINARY_DIR}
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
-DCASS_BUILD_STATIC=ON
-DCASS_BUILD_SHARED=OFF
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
)
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
set_target_properties (cassandra PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
message("cass dirs")
message(${BINARY_DIR})
message(${SOURCE_DIR})
message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a)
add_dependencies(cassandra cassandra_src)
if(NOT libuv1)
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
target_link_libraries(cassandra INTERFACE libuv1)
else()
target_link_libraries(cassandra INTERFACE ${libuv1})
endif()
if(NOT krb5)
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
target_link_libraries(cassandra INTERFACE krb5)
else()
target_link_libraries(cassandra INTERFACE ${krb5})
endif()
if(NOT zlib)
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
target_link_libraries(cassandra INTERFACE zlib)
else()
target_link_libraries(cassandra INTERFACE ${zlib})
endif()
set(OPENSSL_USE_STATIC_LIBS TRUE)
find_package(OpenSSL REQUIRED)
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
target_link_libraries(clio PUBLIC cassandra)
else()
message("Found system installed cassandra cpp driver")
message(${cassandra})
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
message(${cassandra_includes})
get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH)
get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY)
target_link_libraries (clio PUBLIC ${cassandra})
target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR})
endif()

48
CMake/deps/clio.cmake Normal file
View File

@@ -0,0 +1,48 @@
set(CLIO_REPO "https://github.com/XRPLF/clio.git")
set(CLIO_BRANCH "1.0.4")
add_library(clio STATIC IMPORTED GLOBAL)
add_library(xrpl_core STATIC IMPORTED GLOBAL)
ExternalProject_Add(clio_src
GIT_REPOSITORY "${CLIO_REPO}"
GIT_TAG "${CLIO_BRANCH}"
GIT_SHALLOW ON
INSTALL_COMMAND ""
CMAKE_ARGS
-DBUILD_TESTS=OFF
-DPACKAGING=OFF
)
ExternalProject_Get_Property(clio_src SOURCE_DIR)
ExternalProject_Get_Property(clio_src BINARY_DIR)
file(MAKE_DIRECTORY ${SOURCE_DIR}/src)
set_target_properties(clio PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}clio_static.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/src
)
file(MAKE_DIRECTORY ${BINARY_DIR}/_deps/rippled-build)
file(MAKE_DIRECTORY ${BINARY_DIR}/_deps/rippled-src/src)
set_target_properties(xrpl_core PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/_deps/rippled-build/${CMAKE_STATIC_LIBRARY_PREFIX}xrpl_core.a
INTERFACE_INCLUDE_DIRECTORIES
${BINARY_DIR}/_deps/rippled-src/src
)
add_dependencies(clio clio_src)
add_dependencies(xrpl_core clio_src)
add_library(date STATIC IMPORTED GLOBAL)
file(MAKE_DIRECTORY
${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include)
set_target_properties(date PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include/date/date.h
INTERFACE_INCLUDE_DIRECTORIES
${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include
)

20
CMake/deps/gtest.cmake Normal file
View File

@@ -0,0 +1,20 @@
FetchContent_Declare(
googletest
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
)
FetchContent_GetProperties(googletest)
if(NOT googletest_POPULATED)
FetchContent_Populate(googletest)
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio_tests PUBLIC clio gmock_main)
target_include_directories(clio_tests PRIVATE unittests)
enable_testing()
include(GoogleTest)
gtest_discover_tests(clio_tests)

14
CMake/deps/libfmt.cmake Normal file
View File

@@ -0,0 +1,14 @@
FetchContent_Declare(
libfmt
URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip
)
FetchContent_GetProperties(libfmt)
if(NOT libfmt_POPULATED)
FetchContent_Populate(libfmt)
add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio PUBLIC fmt)

20
CMake/deps/rippled.cmake Normal file
View File

@@ -0,0 +1,20 @@
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
set(RIPPLED_BRANCH "1.9.2")
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
FetchContent_Declare(rippled
GIT_REPOSITORY "${RIPPLED_REPO}"
GIT_TAG "${RIPPLED_BRANCH}"
GIT_SHALLOW ON
PATCH_COMMAND "${patch_command}"
)
FetchContent_GetProperties(rippled)
if(NOT rippled_POPULATED)
FetchContent_Populate(rippled)
add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs)
target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed?

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Clio XRPL API server
Documentation=https://github.com/XRPLF/clio.git
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
Restart=on-failure
User=clio
Group=clio
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,16 @@
set(CLIO_INSTALL_DIR "/opt/clio")
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
install(TARGETS clio_server DESTINATION bin)
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
#install(FILES example-config.json DESTINATION etc RENAME config.json)
file(READ example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

6
CMake/settings.cmake Normal file
View File

@@ -0,0 +1,6 @@
target_compile_options(clio
PUBLIC -Wall
-Werror
-Wno-narrowing
-Wno-deprecated-declarations
-Wno-dangling-else)

98
CMakeLists.txt Normal file
View File

@@ -0,0 +1,98 @@
cmake_minimum_required(VERSION 3.16.3)
project(clio_migrator)
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
message(FATAL_ERROR "GCC 11+ required for building clio_migrator")
endif()
option(VERBOSE "Verbose build" TRUE)
if(VERBOSE)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
endif()
add_library(clio)
target_compile_features(clio PUBLIC cxx_std_20)
target_include_directories(clio PUBLIC src)
include(FetchContent)
include(ExternalProject)
include(CMake/settings.cmake)
include(CMake/ClioVersion.cmake)
include(CMake/deps/rippled.cmake)
include(CMake/deps/libfmt.cmake)
include(CMake/deps/Boost.cmake)
include(CMake/deps/cassandra.cmake)
include(CMake/deps/SourceLocation.cmake)
target_sources(clio PRIVATE
## Main
src/main/impl/Build.cpp
## Backend
src/backend/BackendInterface.cpp
src/backend/CassandraBackend.cpp
src/backend/SimpleCache.cpp
## ETL
src/etl/ETLSource.cpp
src/etl/ProbingETLSource.cpp
src/etl/NFTHelpers.cpp
src/etl/ReportingETL.cpp
## Subscriptions
src/subscriptions/SubscriptionManager.cpp
## RPC
src/rpc/Errors.cpp
src/rpc/RPC.cpp
src/rpc/RPCHelpers.cpp
src/rpc/Counters.cpp
src/rpc/WorkQueue.cpp
## NextGen RPC
src/rpc/common/Specs.cpp
src/rpc/common/Validators.cpp
## NextGen RPC handler
src/rpc/ngHandlers/AccountChannels.cpp
src/rpc/ngHandlers/AccountCurrencies.cpp
src/rpc/ngHandlers/Tx.cpp
src/rpc/ngHandlers/GatewayBalances.cpp
src/rpc/ngHandlers/LedgerEntry.cpp
## RPC Methods
# Account
src/rpc/handlers/AccountChannels.cpp
src/rpc/handlers/AccountCurrencies.cpp
src/rpc/handlers/AccountInfo.cpp
src/rpc/handlers/AccountLines.cpp
src/rpc/handlers/AccountOffers.cpp
src/rpc/handlers/AccountObjects.cpp
src/rpc/handlers/GatewayBalances.cpp
src/rpc/handlers/NoRippleCheck.cpp
# NFT
src/rpc/handlers/NFTHistory.cpp
src/rpc/handlers/NFTInfo.cpp
src/rpc/handlers/NFTOffers.cpp
# Ledger
src/rpc/handlers/Ledger.cpp
src/rpc/handlers/LedgerData.cpp
src/rpc/handlers/LedgerEntry.cpp
src/rpc/handlers/LedgerRange.cpp
# Transaction
src/rpc/handlers/Tx.cpp
src/rpc/handlers/TransactionEntry.cpp
src/rpc/handlers/AccountTx.cpp
# Dex
src/rpc/handlers/BookChanges.cpp
src/rpc/handlers/BookOffers.cpp
# Payment Channel
src/rpc/handlers/ChannelAuthorize.cpp
src/rpc/handlers/ChannelVerify.cpp
# Subscribe
src/rpc/handlers/Subscribe.cpp
# Server
src/rpc/handlers/ServerInfo.cpp
# Utilities
src/rpc/handlers/Random.cpp
src/config/Config.cpp
src/log/Logger.cpp
src/util/Taggable.cpp)
add_executable(clio_migrator src/main/main.cpp)
target_link_libraries(clio_migrator PUBLIC clio)

3
Doxyfile Normal file
View File

@@ -0,0 +1,3 @@
PROJECT_NAME = "Clio"
INPUT = src
RECURSIVE = YES

7
LICENSE Normal file
View File

@@ -0,0 +1,7 @@
ISC License
Copyright (c) 2023, XRPL Foundation
Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

43
README.md Normal file
View File

@@ -0,0 +1,43 @@
# CLIO MIGRATOR (ONE OFF!)
This tool is a (really) hacky way of migrating some data from
[clio](https://github.com/XRPLF/clio) due to the [specific pull request
313](https://github.com/XRPLF/clio/pull/313) in that repo.
Specifically, it is meant to migrate NFT data such that:
* The new `nf_token_uris` table is populated with all URIs for all NFTs known
* The new `issuer_nf_tokens_v2` table is populated with all NFTs known
* The old `issuer_nf_tokens` table is dropped. This table was never used prior
to the above-referenced PR, so it is very safe to drop.
This tool should be used as follows, with regard to the above update:
1) Stop serving requests from your clio
2) Stop your clio and upgrade it to the version after the after PR
3) Start your clio
4) Now, your clio is writing new data correctly. This tool will update your
old data, while your new clio is running.
5) Run this tool, using the _exact_ same config as what you are using for your
production clio.
6) Once this tool terminates successfully, you can resume serving requests
from your clio.
## Compiling
Git-clone this project to your server. Then from the top-level directory:
```
mkdir build
cd build
cmake ..
cmake --build . -j 4
```
Once this completes, the migrator will be compiled as `clio_migrator`. Then
you should copy your existing clio config somewhere and:
```
./clio_migrator <config path>
```
This migration will take a few hours to complete.

38
cloud-example-config.json Normal file
View File

@@ -0,0 +1,38 @@
{
"database":
{
"type":"cassandra",
"cassandra":
{
"secure_connect_bundle":"[path/to/zip. ignore if using contact_points]",
"contact_points":"[ip. ignore if using secure_connect_bundle]",
"port":"[port. ignore if using_secure_connect_bundle]",
"keyspace":"clio",
"username":"[username, if any]",
"password":"[password, if any]",
"max_requests_outstanding":25000,
"threads":8
}
},
"etl_sources":
[
{
"ip":"[rippled ip]",
"ws_port":"6006",
"grpc_port":"50051"
}
],
"dos_guard":
{
"whitelist":["127.0.0.1"]
},
"server":{
"ip":"0.0.0.0",
"port":8080
},
"log_level":"debug",
"log_file":"./clio.log",
"online_delete":0,
"extractor_threads":8,
"read_only":false
}

49
docker/centos/Dockerfile Normal file
View File

@@ -0,0 +1,49 @@
# FROM centos:7 as deps
FROM centos:7 as build
ENV CLIO_DIR=/opt/clio/
# ENV OPENSSL_DIR=/opt/openssl
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
RUN yum install -y devtoolset-11
ENV version=3.16
ENV build=3
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
COPY docker/shared/install_cmake.sh /install_cmake.sh
RUN /install_cmake.sh 3.16.3 /usr/local
RUN source /opt/rh/devtoolset-11/enable
WORKDIR /tmp
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
RUN ./build_git_centos7.sh
RUN git clone https://github.com/openssl/openssl
WORKDIR /tmp/openssl
RUN git checkout OpenSSL_1_1_1q
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
make -j $(nproc) && \
make install_sw
WORKDIR /tmp
# FROM centos:7 as build
RUN git clone https://github.com/xrplf/clio.git
COPY docker/shared/build_boost.sh build_boost.sh
ENV OPENSSL_ROOT=/opt/local/openssl
ENV BOOST_ROOT=/boost
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
RUN yum install -y bison flex
RUN yum install -y rpmdevtools rpmlint
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
cmake -B build -DBUILD_TESTS=1 && \
cmake --build build --parallel $(nproc)
RUN mkdir output
RUN strip clio/build/clio_server && strip clio/build/clio_tests
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
RUN cp clio/example-config.json output/example-config.json
FROM centos:7
COPY --from=build /tmp/output /clio
RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -ex
GIT_VERSION="2.37.1"
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
tar zxvf git-${GIT_VERSION}.tar.gz
cd git-${GIT_VERSION}
yum install -y centos-release-scl epel-release
yum update -y
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
source /opt/rh/devtoolset-11/enable
make configure
./configure
make git -j$(nproc)
make install git
git --version | cut -d ' ' -f3

11
docker/centos/install_cmake.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -exu
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
# it's either those or link=static that halves the failures. probably link=static
BOOST_VERSION=$1
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
echo "BOOST_VERSION: ${BOOST_VERSION}"
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
mkdir -p /boost && mv boost /boost && mv stage /boost

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -ex
GIT_VERSION="2.37.1"
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
tar zxvf git-${GIT_VERSION}.tar.gz
cd git-${GIT_VERSION}
yum install -y centos-release-scl epel-release
yum update -y
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
source /opt/rh/devtoolset-11/enable
make configure
./configure
make git -j$(nproc)
make install git
git --version | cut -d ' ' -f3

View File

@@ -0,0 +1,34 @@
FROM centos:7
ENV CLIO_DIR=/opt/clio/
# ENV OPENSSL_DIR=/opt/openssl
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
RUN yum install -y devtoolset-11
ENV version=3.16
ENV build=3
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
COPY install_cmake.sh /install_cmake.sh
RUN /install_cmake.sh 3.16.3 /usr/local
RUN source /opt/rh/devtoolset-11/enable
WORKDIR /tmp
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
COPY build_git_centos7.sh build_git_centos7.sh
RUN ./build_git_centos7.sh
RUN git clone https://github.com/openssl/openssl
WORKDIR /tmp/openssl
RUN git checkout OpenSSL_1_1_1q
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
make -j $(nproc) && \
make install_sw
WORKDIR /tmp
RUN git clone https://github.com/xrplf/clio.git
COPY build_boost.sh build_boost.sh
ENV OPENSSL_ROOT=/opt/local/openssl
ENV BOOST_ROOT=/boost
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
RUN yum install -y bison flex
RUN source /opt/rh/devtoolset-11/enable && \
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)

View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

13
docker/shared/build_boost.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -exu
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
# it's either those or link=static that halves the failures. probably link=static
BOOST_VERSION=$1
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
echo "BOOST_VERSION: ${BOOST_VERSION}"
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
mkdir -p /boost && mv boost /boost && mv stage /boost

11
docker/shared/install_cmake.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

View File

@@ -0,0 +1,3 @@
#!/bin/bash
set -e

24
docker/ubuntu/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM ubuntu:20.04 AS boost
RUN apt-get update && apt-get install -y build-essential
ARG BOOST_VERSION_=1_75_0
ARG BOOST_VERSION=1.75.0
COPY docker/shared/build_boost.sh .
RUN apt install -y curl
RUN ./build_boost.sh ${BOOST_VERSION}
ENV BOOST_ROOT=/boost
FROM ubuntu:20.04 AS build
ENV BOOST_ROOT=/boost
COPY --from=boost /boost /boost
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
RUN apt install -y gpg-agent
RUN wget https://apt.llvm.org/llvm.sh
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
# COPY . /clio
## Install cmake
ARG CMAKE_VERSION=3.16.3
COPY docker/shared/install_cmake.sh .
RUN ./install_cmake.sh ${CMAKE_VERSION}
ENV PATH="/opt/local/cmake/bin:$PATH"

93
example-config.json Normal file
View File

@@ -0,0 +1,93 @@
{
"database": {
"type": "cassandra",
"cassandra": {
"contact_points": "127.0.0.1",
"port": 9042,
"keyspace": "clio",
"replication_factor": 1,
"table_prefix": "",
"max_write_requests_outstanding": 25000,
"max_read_requests_outstanding": 30000,
"threads": 8
}
},
"etl_sources": [
{
"ip": "127.0.0.1",
"ws_port": "6006",
"grpc_port": "50051"
}
],
"dos_guard": {
"whitelist": [
"127.0.0.1"
], // comma-separated list of ips to exclude from rate limiting
/* The below values are the default values and are only specified here
* for documentation purposes. The rate limiter currently limits
* connections and bandwidth per ip. The rate limiter looks at the raw
* ip of a client connection, and so requests routed through a load
* balancer will all have the same ip and be treated as a single client
*/
"max_fetches": 1000000, // max bytes per ip per sweep interval
"max_connections": 20, // max connections per ip
"max_requests": 20, // max connections per ip
"sweep_interval": 1 // time in seconds before resetting bytes per ip count
},
"cache": {
"peers": [
{
"ip": "127.0.0.1",
"port": 51234
}
]
},
"server": {
"ip": "0.0.0.0",
"port": 51233,
/* Max number of requests to queue up before rejecting further requests.
* Defaults to 0, which disables the limit
*/
"max_queue_size": 500
},
"log_channels": [
{
"channel": "Backend",
"log_level": "fatal"
},
{
"channel": "WebServer",
"log_level": "info"
},
{
"channel": "Subscriptions",
"log_level": "info"
},
{
"channel": "RPC",
"log_level": "error"
},
{
"channel": "ETL",
"log_level": "debug"
},
{
"channel": "Performance",
"log_level": "trace"
}
],
"log_level": "info",
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
"log_to_console": true,
"log_directory": "./clio_log",
"log_rotation_size": 2048,
"log_directory_max_size": 51200,
"log_rotation_hour_interval": 12,
"log_tag_style": "uint",
"extractor_threads": 8,
"read_only": false,
//"start_sequence": [integer] the ledger index to start from,
//"finish_sequence": [integer] the ledger index to finish at,
//"ssl_cert_file" : "/full/path/to/cert.file",
//"ssl_key_file" : "/full/path/to/key.file"
}

181
metrics.py Normal file
View File

@@ -0,0 +1,181 @@
#!/usr/bin/python3
import argparse
from datetime import datetime
def getTime(line):
bracketOpen = line.find("[")
bracketClose = line.find("]")
timestampSub = line[bracketOpen+1:bracketClose]
timestamp = datetime.strptime(timestampSub, '%Y-%m-%d %H:%M:%S.%f')
return timestamp.timestamp()
def parseAccountTx(filename):
with open(filename) as f:
totalProcTime = 0.0
totalTxnTime = 0.0
numCalls = 0
for line in f:
if "executed stored_procedure" in line:
idx = line.find("in ")
idx = idx + 3
idx2 = line.find("num")
procTime = float(line[idx:idx2])
totalProcTime += procTime
if "fetchTransactions fetched" in line:
idx = line.find("took ")
idx = idx + 5
txnTime = float(line[idx:])
totalTxnTime += txnTime
numCalls = numCalls + 1
print(totalProcTime)
print(totalProcTime/numCalls)
print(totalTxnTime)
print(totalTxnTime/numCalls)
def parseLogs(filename, interval):
with open(filename) as f:
totalTime = 0
totalTxns = 0
totalObjs = 0
totalLoadTime = 0
start = 0
end = 0
totalLedgers = 0
intervalTime = 0
intervalTxns = 0
intervalObjs = 0
intervalLoadTime = 0
intervalStart = 0
intervalEnd = 0
intervalLedgers = 0
ledgersPerSecond = 0
print("ledgers, transactions, objects, loadTime, loadTime/ledger, ledgers/sec, txns/sec, objs/sec")
for line in f:
if "Load phase" in line:
sequenceIdx = line.find("Sequence : ")
hashIdx = line.find(" Hash :")
sequence = line[sequenceIdx + len("Sequence : "):hashIdx]
txnCountSubstr = "txn count = "
objCountSubstr = ". object count = "
loadTimeSubstr = ". load time = "
txnsSubstr = ". load txns per second = "
objsSubstr = ". load objs per second = "
txnCountIdx = line.find(txnCountSubstr)
objCountIdx = line.find(objCountSubstr)
loadTimeIdx = line.find(loadTimeSubstr)
txnsIdx = line.find(txnsSubstr)
objsIdx = line.find(objsSubstr)
txnCount = line[txnCountIdx + len(txnCountSubstr):objCountIdx]
objCount = line[objCountIdx + len(objCountSubstr):loadTimeIdx]
loadTime = line[loadTimeIdx + len(loadTimeSubstr):txnsIdx]
txnsPerSecond = line[txnsIdx + len(txnsSubstr):objsIdx]
objsPerSecond = line[objsIdx + len(objsSubstr):-1]
totalTime += float(loadTime);
totalTxns += float(txnCount)
totalObjs += float(objCount)
intervalTime += float(loadTime)
intervalTxns += float(txnCount)
intervalObjs += float(objCount)
totalLoadTime += float(loadTime)
intervalLoadTime += float(loadTime)
if start == 0:
start = getTime(line)
prevEnd = end
end = getTime(line)
if intervalStart == 0:
intervalStart = getTime(line)
intervalEnd = getTime(line)
totalLedgers+=1
intervalLedgers+=1
ledgersPerSecond = 0
if end != start:
ledgersPerSecond = float(totalLedgers) / float((end - start))
intervalLedgersPerSecond = 0
if intervalEnd != intervalStart:
intervalLedgersPerSecond = float(intervalLedgers) / float((intervalEnd - intervalStart))
if int(sequence) % interval == 0:
# print("Sequence = " + sequence + " : [time, txCount, objCount, txPerSec, objsPerSec]")
# print(loadTime + " , "
# + txnCount + " , "
# + objCount + " , "
# + txnsPerSecond + " , "
# + objsPerSecond)
# print("Interval Aggregate ( " + str(interval) + " ) [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]: ")
print(str(intervalLedgers) + " , "
+ str(intervalTxns) + " , "
+ str(intervalObjs) + " , "
+ str(intervalLoadTime) + " , "
+ str(intervalLoadTime/intervalLedgers) + " , "
+ str(intervalLedgers/intervalLoadTime) + " , "
+ str(intervalTxns/intervalLoadTime) + " , "
+ str(intervalObjs/intervalLoadTime))
# print("Total Aggregate: [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
# print(str(totalLedgers) + " , "
# + str(totalTxns) + " , "
# + str(totalObjs) + " , "
# + str(end-start) + " , "
# + str(ledgersPerSecond) + " , "
# + str(totalLoadTime/totalLedgers) + " , "
# + str(totalTxns/totalTime) + " , "
# + str(totalObjs/totalTime))
if int(sequence) % interval == 0:
intervalTime = 0
intervalTxns = 0
intervalObjs = 0
intervalStart = 0
intervalEnd = 0
intervalLedgers = 0
intervalLoadTime = 0
print("Total Aggregate: [ledgers, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
print(totalLedgers)
print(totalLoadTime)
print(str(totalLedgers) + " : "
+ str(end-start) + " : "
+ str(ledgersPerSecond) + " : "
+ str(totalLoadTime/totalLedgers) + " : "
+ str(totalTxns/totalTime) + " : "
+ str(totalObjs/totalTime))
parser = argparse.ArgumentParser(description='parses logs')
parser.add_argument("--filename")
parser.add_argument("--interval",default=100000)
parser.add_argument("--account_tx",default=False)
args = parser.parse_args()
def run(args):
if args.account_tx:
parseAccountTx(args.filename)
else:
parseLogs(args.filename, int(args.interval))
run(args)

View File

@@ -0,0 +1,66 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/BackendInterface.h>
#include <backend/CassandraBackend.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/algorithm/string.hpp>
namespace Backend {
std::shared_ptr<BackendInterface>
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
{
static clio::Logger log{"Backend"};
log.info() << "Constructing BackendInterface";
auto readOnly = config.valueOr("read_only", false);
auto type = config.value<std::string>("database.type");
std::shared_ptr<BackendInterface> backend = nullptr;
if (boost::iequals(type, "cassandra"))
{
auto cfg = config.section("database." + type);
auto ttl = config.valueOr<uint32_t>("online_delete", 0) * 4;
backend = std::make_shared<CassandraBackend>(ioc, cfg, ttl);
}
if (!backend)
throw std::runtime_error("Invalid database type");
backend->open(readOnly);
<<<<<<< HEAD
backend->checkFlagLedgers();
=======
auto rng = backend->hardFetchLedgerRangeNoThrow();
if (rng)
{
backend->updateRange(rng->minSequence);
backend->updateRange(rng->maxSequence);
}
>>>>>>> c7e31af... Add state data cache and successor table. Remove keys table
log.info() << "Constructed BackendInterface Successfully";
return backend;
}
} // namespace Backend

View File

@@ -0,0 +1,241 @@
#include <backend/BackendIndexer.h>
#include <backend/BackendInterface.h>
namespace Backend {
BackendIndexer::BackendIndexer(boost::json::object const& config)
: strand_(ioc_)
{
if (config.contains("indexer_key_shift"))
keyShift_ = config.at("indexer_key_shift").as_int64();
work_.emplace(ioc_);
ioThread_ = std::thread{[this]() { ioc_.run(); }};
};
BackendIndexer::~BackendIndexer()
{
work_.reset();
ioThread_.join();
}
void
BackendIndexer::addKey(ripple::uint256&& key)
{
keys.insert(std::move(key));
}
void
BackendIndexer::doKeysRepair(
BackendInterface const& backend,
std::optional<uint32_t> sequence)
{
auto rng = backend.fetchLedgerRangeNoThrow();
if (!rng)
return;
if (!sequence)
sequence = rng->maxSequence;
if (sequence < rng->minSequence)
sequence = rng->minSequence;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " sequence = " << std::to_string(*sequence);
std::optional<ripple::uint256> cursor;
while (true)
{
try
{
if (backend.isLedgerIndexed(*sequence))
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - " << std::to_string(*sequence)
<< " flag ledger already written. returning";
return;
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - " << std::to_string(*sequence)
<< " flag ledger not written. recursing..";
uint32_t lower = (*sequence - 1) >> keyShift_ << keyShift_;
doKeysRepair(backend, lower);
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - "
<< " sequence = " << std::to_string(*sequence)
<< " lower = " << std::to_string(lower)
<< " finished recursing. submitting repair ";
writeKeyFlagLedger(lower, backend);
return;
}
}
catch (DatabaseTimeout const& e)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Database timeout fetching keys";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << " finished. sequence = " << std::to_string(*sequence);
}
void
BackendIndexer::doKeysRepairAsync(
BackendInterface const& backend,
std::optional<uint32_t> sequence)
{
boost::asio::post(strand_, [this, sequence, &backend]() {
doKeysRepair(backend, sequence);
});
}
void
BackendIndexer::writeKeyFlagLedger(
uint32_t ledgerSequence,
BackendInterface const& backend)
{
auto nextFlag = getKeyIndexOfSeq(ledgerSequence + 1);
uint32_t lower = ledgerSequence >> keyShift_ << keyShift_;
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - "
<< "next flag = " << std::to_string(nextFlag.keyIndex)
<< "lower = " << std::to_string(lower)
<< "ledgerSequence = " << std::to_string(ledgerSequence) << " starting";
ripple::uint256 zero = {};
std::optional<ripple::uint256> cursor;
size_t numKeys = 0;
auto begin = std::chrono::system_clock::now();
while (true)
{
try
{
{
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - checking for complete...";
if (backend.isLedgerIndexed(nextFlag.keyIndex))
{
BOOST_LOG_TRIVIAL(warning)
<< "writeKeyFlagLedger - "
<< "flag ledger already written. flag = "
<< std::to_string(nextFlag.keyIndex)
<< " , ledger sequence = "
<< std::to_string(ledgerSequence);
return;
}
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - is not complete";
}
indexing_ = nextFlag.keyIndex;
auto start = std::chrono::system_clock::now();
auto [objects, curCursor, warning] =
backend.fetchLedgerPage(cursor, lower, 2048);
auto mid = std::chrono::system_clock::now();
// no cursor means this is the first page
if (!cursor)
{
if (warning)
{
BOOST_LOG_TRIVIAL(error)
<< "writeKeyFlagLedger - "
<< " prev flag ledger not written "
<< std::to_string(nextFlag.keyIndex) << " : "
<< std::to_string(ledgerSequence);
assert(false);
throw std::runtime_error("Missing prev flag");
}
}
cursor = curCursor;
std::unordered_set<ripple::uint256> keys;
for (auto& obj : objects)
{
keys.insert(obj.key);
}
backend.writeKeys(keys, nextFlag, true);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
<< " fetched a page "
<< " cursor = "
<< (cursor.has_value() ? ripple::strHex(*cursor)
: std::string{})
<< " num keys = " << std::to_string(numKeys) << " fetch time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
mid - start)
.count()
<< " write time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - mid)
.count();
if (!cursor)
break;
}
catch (DatabaseTimeout const& e)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Database timeout fetching keys";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
backend.writeKeys({zero}, nextFlag, true);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
<< " finished. "
<< " num keys = " << std::to_string(numKeys) << " total time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count();
indexing_ = 0;
}
void
BackendIndexer::writeKeyFlagLedgerAsync(
uint32_t ledgerSequence,
BackendInterface const& backend)
{
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " starting. sequence = " << std::to_string(ledgerSequence);
boost::asio::post(strand_, [this, ledgerSequence, &backend]() {
writeKeyFlagLedger(ledgerSequence, backend);
});
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " finished. sequence = " << std::to_string(ledgerSequence);
}
void
BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " starting. sequence = " << std::to_string(ledgerSequence);
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
if (isFirst_)
{
auto rng = backend.fetchLedgerRangeNoThrow();
if (rng && rng->minSequence != ledgerSequence)
isFirst_ = false;
else
{
keyIndex = KeyIndex{ledgerSequence};
}
}
backend.writeKeys(keys, keyIndex);
if (isFirst_)
{
// write completion record
ripple::uint256 zero = {};
backend.writeKeys({zero}, keyIndex);
// write next flag sychronously
keyIndex = getKeyIndexOfSeq(ledgerSequence + 1);
backend.writeKeys(keys, keyIndex);
backend.writeKeys({zero}, keyIndex);
}
isFirst_ = false;
keys = {};
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " finished. sequence = " << std::to_string(ledgerSequence);
}
} // namespace Backend

View File

@@ -0,0 +1,346 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"Backend"};
} // namespace
namespace Backend {
bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
{
auto commitRes = doFinishWrites();
if (commitRes)
{
updateRange(ledgerSequence);
}
return commitRes;
}
void
BackendInterface::writeLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow(
boost::asio::yield_context& yield) const
{
gLog.trace() << "called";
while (true)
{
try
{
return hardFetchLedgerRange(yield);
}
catch (DatabaseTimeout& t)
{
;
}
}
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const
{
gLog.trace() << "called";
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
}
// *** state data methods
std::optional<Blob>
BackendInterface::fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
auto obj = cache_.get(key, sequence);
if (obj)
{
gLog.trace() << "Cache hit - " << ripple::strHex(key);
return *obj;
}
else
{
gLog.trace() << "Cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj)
gLog.trace() << "Missed cache and missed in db";
else
gLog.trace() << "Missed cache but found in db";
return dbObj;
}
}
std::vector<Blob>
BackendInterface::fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
std::vector<Blob> results;
results.resize(keys.size());
std::vector<ripple::uint256> misses;
for (size_t i = 0; i < keys.size(); ++i)
{
auto obj = cache_.get(keys[i], sequence);
if (obj)
results[i] = *obj;
else
misses.push_back(keys[i]);
}
gLog.trace() << "Cache hits = " << keys.size() - misses.size()
<< " - cache misses = " << misses.size();
if (misses.size())
{
auto objs = doFetchLedgerObjects(misses, sequence, yield);
for (size_t i = 0, j = 0; i < results.size(); ++i)
{
if (results[i].size() == 0)
{
results[i] = objs[j];
++j;
}
}
}
return results;
}
// Fetches the successor to key/index
std::optional<ripple::uint256>
BackendInterface::fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ)
gLog.trace() << "Cache hit - " << ripple::strHex(key);
else
gLog.trace() << "Cache miss - " << ripple::strHex(key);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
std::optional<LedgerObject>
BackendInterface::fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
if (succ)
{
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
if (!obj)
return {{*succ, {}}};
return {{*succ, *obj}};
}
return {};
}
BookOffersPage
BackendInterface::fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursor,
boost::asio::yield_context& yield) const
{
// TODO try to speed this up. This can take a few seconds. The goal is
// to get it down to a few hundred milliseconds.
BookOffersPage page;
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
ripple::uint256 uTipIndex = book;
std::vector<ripple::uint256> keys;
auto getMillis = [](auto diff) {
return std::chrono::duration_cast<std::chrono::milliseconds>(diff)
.count();
};
auto begin = std::chrono::system_clock::now();
std::uint32_t numSucc = 0;
std::uint32_t numPages = 0;
long succMillis = 0;
long pageMillis = 0;
while (keys.size() < limit)
{
auto mid1 = std::chrono::system_clock::now();
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd)
{
gLog.trace() << "offerDir.has_value() " << offerDir.has_value()
<< " breaking";
break;
}
uTipIndex = offerDir->key;
while (keys.size() < limit)
{
++numPages;
ripple::STLedgerEntry sle{
ripple::SerialIter{
offerDir->blob.data(), offerDir->blob.size()},
offerDir->key};
auto indexes = sle.getFieldV256(ripple::sfIndexes);
keys.insert(keys.end(), indexes.begin(), indexes.end());
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next)
{
gLog.trace() << "Next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
auto nextDir =
fetchLedgerObject(nextKey.key, ledgerSequence, yield);
assert(nextDir);
offerDir->blob = *nextDir;
offerDir->key = nextKey.key;
}
auto mid3 = std::chrono::system_clock::now();
pageMillis += getMillis(mid3 - mid2);
}
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i)
{
gLog.trace() << "Key = " << ripple::strHex(keys[i])
<< " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
assert(objs[i].size());
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
gLog.debug() << "Fetching " << std::to_string(keys.size())
<< " offers took " << std::to_string(getMillis(mid - begin))
<< " milliseconds. Fetching next dir took "
<< std::to_string(succMillis)
<< " milliseonds. Fetched next dir " << std::to_string(numSucc)
<< " times"
<< " Fetching next page of dir took "
<< std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages)
<< ". Fetching all objects took "
<< std::to_string(getMillis(end - mid))
<< " milliseconds. total time = "
<< std::to_string(getMillis(end - begin)) << " milliseconds"
<< " book = " << ripple::strHex(book);
return page;
}
LedgerPage
BackendInterface::fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
bool outOfOrder,
boost::asio::yield_context& yield) const
{
LedgerPage page;
std::vector<ripple::uint256> keys;
bool reachedEnd = false;
while (keys.size() < limit && !reachedEnd)
{
ripple::uint256 const& curCursor = keys.size() ? keys.back()
: cursor ? *cursor
: firstKey;
std::uint32_t const seq =
outOfOrder ? range->maxSequence : ledgerSequence;
auto succ = fetchSuccessorKey(curCursor, seq, yield);
if (!succ)
reachedEnd = true;
else
keys.push_back(std::move(*succ));
}
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < objects.size(); ++i)
{
if (objects[i].size())
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
else if (!outOfOrder)
{
gLog.error()
<< "Deleted or non-existent object in successor table. key = "
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j)
{
msg << " - " << ripple::strHex(keys[j]);
}
gLog.error() << msg.str();
}
}
if (keys.size() && !reachedEnd)
page.cursor = keys.back();
return page;
}
std::optional<ripple::Fees>
BackendInterface::fetchFees(
std::uint32_t const seq,
boost::asio::yield_context& yield) const
{
ripple::Fees fees;
auto key = ripple::keylet::fees().key;
auto bytes = fetchLedgerObject(key, seq, yield);
if (!bytes)
{
gLog.error() << "Could not find fees";
return {};
}
ripple::SerialIter it(bytes->data(), bytes->size());
ripple::SLE sle{it, key};
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
fees.base = sle.getFieldU64(ripple::sfBaseFee);
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
return fees;
}
} // namespace Backend

View File

@@ -0,0 +1,663 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/ledger/ReadView.h>
#include <backend/DBHelpers.h>
#include <backend/SimpleCache.h>
#include <backend/Types.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <thread>
#include <type_traits>
namespace Backend {
/**
* @brief Throws an error when database read time limit is exceeded.
*
* This class is throws an error when read time limit is exceeded but
* is also paired with a separate class to retry the connection.
*/
class DatabaseTimeout : public std::exception
{
public:
const char*
what() const throw() override
{
return "Database read timed out. Please retry the request";
}
};
/**
* @brief Separate class that reattempts connection after time limit.
*
* @tparam F Represents a class of handlers for Cassandra database.
* @param func Instance of Cassandra database handler class.
* @param waitMs Is the arbitrary time limit of 500ms.
* @return auto
*/
template <class F>
auto
retryOnTimeout(F func, size_t waitMs = 500)
{
static clio::Logger log{"Backend"};
while (true)
{
try
{
return func();
}
catch (DatabaseTimeout& t)
{
log.error()
<< "Database request timed out. Sleeping and retrying ... ";
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
}
}
}
/**
* @brief Passes in serialized handlers in an asynchronous fashion.
*
* Note that the synchronous auto passes handlers critical to supporting
* the Clio backend. The coroutine types are checked if same/different.
*
* @tparam F Represents a class of handlers for Cassandra database.
* @param f R-value instance of Cassandra handler class.
* @return auto
*/
template <class F>
auto
synchronous(F&& f)
{
/** @brief Serialized handlers and their execution.
*
* The ctx class is converted into a serialized handler, also named
* ctx, and is used to pass a stream of data into the method.
*/
boost::asio::io_context ctx;
boost::asio::io_context::strand strand(ctx);
std::optional<boost::asio::io_context::work> work;
/*! @brief Place the ctx within the vector of serialized handlers. */
work.emplace(ctx);
/**
* @brief If/else statements regarding coroutine type matching.
*
* R is the currently executing coroutine that is about to get passed.
* If corountine types do not match, the current one's type is stored.
*/
using R = typename std::result_of<F(boost::asio::yield_context&)>::type;
if constexpr (!std::is_same<R, void>::value)
{
/**
* @brief When the coroutine type is the same
*
* The spawn function enables programs to implement asynchronous logic
* in a synchronous manner. res stores the instance of the currently
* executing coroutine, yield. The different type is returned.
*/
R res;
boost::asio::spawn(
strand, [&f, &work, &res](boost::asio::yield_context yield) {
res = f(yield);
work.reset();
});
ctx.run();
return res;
}
else
{
/*! @brief When the corutine type is different, run as normal. */
boost::asio::spawn(
strand, [&f, &work](boost::asio::yield_context yield) {
f(yield);
work.reset();
});
ctx.run();
}
}
/**
* @brief Reestablishes synchronous connection on timeout.
*
* @tparam Represents a class of handlers for Cassandra database.
* @param f R-value instance of Cassandra database handler class.
* @return auto
*/
template <class F>
auto
synchronousAndRetryOnTimeout(F&& f)
{
return retryOnTimeout([&]() { return synchronous(f); });
}
/*! @brief Handles ledger and transaction backend data. */
class BackendInterface
{
/**
* @brief Shared mutexes and a cache for the interface.
*
* rngMutex is a shared mutex. Shared mutexes prevent shared data
* from being accessed by multiple threads and has two levels of
* access: shared and exclusive.
*/
protected:
mutable std::shared_mutex rngMtx_;
std::optional<LedgerRange> range;
SimpleCache cache_;
/**
* @brief Public read methods
*
* All of these reads methods can throw DatabaseTimeout. When writing
* code in an RPC handler, this exception does not need to be caught:
* when an RPC results in a timeout, an error is returned to the client.
*/
public:
BackendInterface(clio::Config const& config)
{
}
virtual ~BackendInterface()
{
}
/*! @brief LEDGER METHODS */
public:
/**
* @brief Cache that holds states of the ledger
*
* const version holds the original cache state; the other tracks
* historical changes.
*
* @return SimpleCache const&
*/
SimpleCache const&
cache() const
{
return cache_;
}
SimpleCache&
cache()
{
return cache_;
}
/*! @brief Fetches a specific ledger by sequence number. */
virtual std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches a specific ledger by hash. */
virtual std::optional<ripple::LedgerInfo>
fetchLedgerByHash(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches the latest ledger sequence. */
virtual std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches the current ledger range while locking that process */
std::optional<LedgerRange>
fetchLedgerRange() const
{
std::shared_lock lck(rngMtx_);
return range;
}
/**
* @brief Updates the range of sequences to be tracked.
*
* Function that continues updating the range sliding window or creates
* a new sliding window once the maxSequence limit has been reached.
*
* @param newMax Unsigned 32-bit integer representing new max of range.
*/
void
updateRange(uint32_t newMax)
{
std::scoped_lock lck(rngMtx_);
assert(!range || newMax >= range->maxSequence);
if (!range)
range = {newMax, newMax};
else
range->maxSequence = newMax;
}
/**
* @brief Returns the fees for specific transactions.
*
* @param seq Unsigned 32-bit integer reprsenting sequence.
* @param yield The currently executing coroutine.
* @return std::optional<ripple::Fees>
*/
std::optional<ripple::Fees>
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
/*! @brief TRANSACTION METHODS */
/**
* @brief Fetches a specific transaction.
*
* @param hash Unsigned 256-bit integer representing hash.
* @param yield The currently executing coroutine.
* @return std::optional<TransactionAndMetadata>
*/
virtual std::optional<TransactionAndMetadata>
fetchTransaction(
ripple::uint256 const& hash,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches multiple transactions.
*
* @param hashes Unsigned integer value representing a hash.
* @param yield The currently executing coroutine.
* @return std::vector<TransactionAndMetadata>
*/
virtual std::vector<TransactionAndMetadata>
fetchTransactions(
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transactions for a specific account
*
* @param account A specific XRPL Account, speciifed by unique type
* accountID.
* @param limit Paging limit for how many transactions can be returned per
* page.
* @param forward Boolean whether paging happens forwards or backwards.
* @param cursor Important metadata returned every time paging occurs.
* @param yield Currently executing coroutine.
* @return TransactionsAndCursor
*/
virtual TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transactions from a specific ledger.
*
* @param ledgerSequence Unsigned 32-bit integer for latest total
* transactions.
* @param yield Currently executing coroutine.
* @return std::vector<TransactionAndMetadata>
*/
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transaction hashes from a specific ledger.
*
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::vector<ripple::uint256>
*/
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
/*! @brief NFT methods */
/**
* @brief Fetches a specific NFT
*
* @param tokenID Unsigned 256-bit integer.
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::optional<NFT>
*/
virtual std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transactions for a specific NFT.
*
* @param tokenID Unsigned 256-bit integer.
* @param limit Paging limit as to how many transactions return per page.
* @param forward Boolean whether paging happens forwards or backwards.
* @param cursorIn Represents transaction number and ledger sequence.
* @param yield Currently executing coroutine is passed in as input.
* @return TransactionsAndCursor
*/
virtual TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const = 0;
/*! @brief STATE DATA METHODS */
/**
* @brief Fetches a specific ledger object: vector of unsigned chars
*
* @param key Unsigned 256-bit integer.
* @param sequence Unsigned 32-bit integer.
* @param yield Currently executing coroutine.
* @return std::optional<Blob>
*/
std::optional<Blob>
fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const;
/**
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
*
* @param keys Unsigned 256-bit integer.
* @param sequence Unsigned 32-bit integer.
* @param yield Currently executing coroutine.
* @return std::vector<Blob>
*/
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const;
/*! @brief Virtual function version of fetchLedgerObject */
virtual std::optional<Blob>
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
/*! @brief Virtual function version of fetchLedgerObjects */
virtual std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Returns the difference between ledgers: vector of objects
*
* Objects are made of a key value, vector of unsigned chars (blob),
* and a boolean detailing whether keys and blob match.
*
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::vector<LedgerObject>
*/
virtual std::vector<LedgerObject>
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches a page of ledger objects, ordered by key/index.
*
* @param cursor Important metadata returned every time paging occurs.
* @param ledgerSequence Standard unsigned integer.
* @param limit Paging limit as to how many transactions returned per page.
* @param outOfOrder Boolean on whether ledger page is out of order.
* @param yield Currently executing coroutine.
* @return LedgerPage
*/
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
bool outOfOrder,
boost::asio::yield_context& yield) const;
/*! @brief Fetches successor object from key/index. */
std::optional<LedgerObject>
fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const;
/*! @brief Fetches successor key from key/index. */
std::optional<ripple::uint256>
fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const;
/*! @brief Virtual function version of fetchSuccessorKey. */
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches book offers.
*
* @param book Unsigned 256-bit integer.
* @param ledgerSequence Standard unsigned integer.
* @param limit Pagaing limit as to how many transactions returned per page.
* @param cursor Important metadata returned every time paging occurs.
* @param yield Currently executing coroutine.
* @return BookOffersPage
*/
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursor,
boost::asio::yield_context& yield) const;
/**
* @brief Returns a ledger range
*
* Ledger range is a struct of min and max sequence numbers). Due to
* the use of [&], which denotes a special case of a lambda expression
* where values found outside the scope are passed by reference, wrt the
* currently executing coroutine.
*
* @return std::optional<LedgerRange>
*/
std::optional<LedgerRange>
hardFetchLedgerRange() const
{
return synchronous([&](boost::asio::yield_context yield) {
return hardFetchLedgerRange(yield);
});
}
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
virtual std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow() const;
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
/**
* @brief Writes to a specific ledger.
*
* @param ledgerInfo Const on ledger information.
* @param ledgerHeader r-value string representing ledger header.
*/
virtual void
writeLedger(
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader) = 0;
/**
* @brief Writes a new ledger object.
*
* The key and blob are r-value references and do NOT have memory addresses.
*
* @param key String represented as an r-value.
* @param seq Unsigned integer representing a sequence.
* @param blob r-value vector of unsigned characters (blob).
*/
virtual void
writeLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob);
/**
* @brief Writes a new transaction.
*
* @param hash r-value reference. No memory address.
* @param seq Unsigned 32-bit integer.
* @param date Unsigned 32-bit integer.
* @param transaction r-value reference. No memory address.
* @param metadata r-value refrence. No memory address.
*/
virtual void
writeTransaction(
std::string&& hash,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata) = 0;
/**
* @brief Write a new NFT.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeNFTs(std::vector<NFTsData>&& data) = 0;
/**
* @brief Write a new set of account transactions.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
/**
* @brief Write a new transaction for a specific NFT.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
/**
* @brief Write a new successor.
*
* @param key Passed in as an r-value reference.
* @param seq Unsigned 32-bit integer.
* @param successor Passed in as an r-value reference.
*/
virtual void
writeSuccessor(
std::string&& key,
std::uint32_t const seq,
std::string&& successor) = 0;
/*! @brief Tells database we will write data for a specific ledger. */
virtual void
startWrites() const = 0;
/**
* @brief Tells database we finished writing all data for a specific ledger.
*
* TODO: change the return value to represent different results:
* Committed, write conflict, errored, successful but not committed
*
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
* @return true
* @return false
*/
bool
finishWrites(std::uint32_t const ledgerSequence);
/**
* @brief Selectively delets parts of the database.
*
* @param numLedgersToKeep Unsigned 32-bit integer on number of ledgers to
* keep.
* @param yield Currently executing coroutine.
* @return true
* @return false
*/
virtual bool
doOnlineDelete(
std::uint32_t numLedgersToKeep,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Opens the database
*
* Open the database. Set up all of the necessary objects and
* datastructures. After this call completes, the database is
* ready for use.
*
* @param readOnly Boolean whether ledger is read only.
*/
virtual void
open(bool readOnly) = 0;
/*! @brief Closes the database, releasing any resources. */
virtual void
close(){};
virtual bool
isTooBusy() const = 0;
private:
/**
* @brief Private helper method to write ledger object
*
* @param key r-value string representing key.
* @param seq Unsigned 32-bit integer representing sequence.
* @param blob r-value vector of unsigned chars.
*/
virtual void
doWriteLedgerObject(
std::string&& key,
std::uint32_t const seq,
std::string&& blob) = 0;
virtual bool
doFinishWrites() = 0;
};
} // namespace Backend
using BackendInterface = Backend::BackendInterface;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

249
src/backend/DBHelpers.h Normal file
View File

@@ -0,0 +1,249 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/basics/Log.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/ledger/ReadView.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/STAccount.h>
#include <ripple/protocol/TxMeta.h>
#include <boost/container/flat_set.hpp>
#include <backend/Types.h>
/// Struct used to keep track of what to write to
/// account_transactions/account_tx tables
struct AccountTransactionsData
{
boost::container::flat_set<ripple::AccountID> accounts;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
ripple::uint256 txHash;
AccountTransactionsData(
ripple::TxMeta& meta,
ripple::uint256 const& txHash,
beast::Journal& j)
: accounts(meta.getAffectedAccounts())
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)
{
}
AccountTransactionsData() = default;
};
/// Represents a link from a tx to an NFT that was targeted/modified/created
/// by it. Gets written to nf_token_transactions table and the like.
struct NFTTransactionsData
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
ripple::uint256 txHash;
NFTTransactionsData(
ripple::uint256 const& tokenID,
ripple::TxMeta const& meta,
ripple::uint256 const& txHash)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)
{
}
};
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
/// table and the like.
struct NFTsData
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
// The transaction index is only stored because we want to store only the
// final state of an NFT per ledger. Since we pull this from transactions
// we keep track of which tx index created this so we can de-duplicate, as
// it is possible for one ledger to have multiple txs that change the
// state of the same NFT. This field is not applicable when we are loading
// initial NFT state via ledger objects, since we do not have to tiebreak
// NFT state for a given ledger in that case.
std::optional<std::uint32_t> transactionIndex;
ripple::AccountID owner;
// We only set the uri if this is a mint tx, or if we are
// loading initial state from NFTokenPage objects. In other words,
// uri should only be set if the etl process believes this NFT hasn't
// been seen before in our local database. We do this so that we don't
// write to the the nf_token_uris table every
// time the same NFT changes hands. We also can infer if there is a URI
// that we need to write to the issuer_nf_tokens table.
std::optional<ripple::Blob> uri;
bool isBurned = false;
// This constructor is used when parsing an NFTokenMint tx.
// Unfortunately because of the extreme edge case of being able to
// re-mint an NFT with the same ID, we must explicitly record a null
// URI. For this reason, we _always_ write this field as a result of
// this tx.
NFTsData(
ripple::uint256 const& tokenID,
ripple::AccountID const& owner,
ripple::Blob const& uri,
ripple::TxMeta const& meta)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, owner(owner)
, uri(uri)
{
}
// This constructor is used when parsing an NFTokenBurn or
// NFTokenAcceptOffer tx
NFTsData(
ripple::uint256 const& tokenID,
ripple::AccountID const& owner,
ripple::TxMeta const& meta,
bool isBurned)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, owner(owner)
, isBurned(isBurned)
{
}
// This constructor is used when parsing an NFTokenPage directly from
// ledger state.
// Unfortunately because of the extreme edge case of being able to
// re-mint an NFT with the same ID, we must explicitly record a null
// URI. For this reason, we _always_ write this field as a result of
// this tx.
NFTsData(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
ripple::AccountID const& owner,
ripple::Blob const& uri)
: tokenID(tokenID)
, ledgerSequence(ledgerSequence)
, owner(owner)
, uri(uri)
{
}
};
template <class T>
inline bool
isOffer(T const& object)
{
short offer_bytes = (object[1] << 8) | object[2];
return offer_bytes == 0x006f;
}
template <class T>
inline bool
isOfferHex(T const& object)
{
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
if (blob)
{
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
return offer_bytes == 0x006f;
}
return false;
}
template <class T>
inline bool
isDirNode(T const& object)
{
short spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == 0x0064;
}
template <class T, class R>
inline bool
isBookDir(T const& key, R const& object)
{
if (!isDirNode(object))
return false;
ripple::STLedgerEntry const sle{
ripple::SerialIter{object.data(), object.size()}, key};
return !sle[~ripple::sfOwner].has_value();
}
template <class T>
inline ripple::uint256
getBook(T const& offer)
{
ripple::SerialIter it{offer.data(), offer.size()};
ripple::SLE sle{it, {}};
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
return book;
}
template <class T>
inline ripple::uint256
getBookBase(T const& key)
{
assert(key.size() == ripple::uint256::size());
ripple::uint256 ret;
for (size_t i = 0; i < 24; ++i)
{
ret.data()[i] = key.data()[i];
}
return ret;
}
inline ripple::LedgerInfo
deserializeHeader(ripple::Slice data)
{
ripple::SerialIter sit(data.data(), data.size());
ripple::LedgerInfo info;
info.seq = sit.get32();
info.drops = sit.get64();
info.parentHash = sit.get256();
info.txHash = sit.get256();
info.accountHash = sit.get256();
info.parentCloseTime =
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTime =
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTimeResolution = ripple::NetClock::duration{sit.get8()};
info.closeFlags = sit.get8();
info.hash = sit.get256();
return info;
}
inline std::string
uint256ToString(ripple::uint256 const& uint)
{
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
}
static constexpr std::uint32_t rippleEpochStart = 946684800;

38
src/backend/Errors.h Normal file
View File

@@ -0,0 +1,38 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
namespace Backend {
class UnexpectedDataError : public std::exception
{
std::string msg;
public:
explicit UnexpectedDataError(std::string const& msg) : msg(msg)
{
}
const char*
what() const throw() override
{
return msg.c_str();
}
};
} // namespace Backend

220
src/backend/README.md Normal file
View File

@@ -0,0 +1,220 @@
# Clio Backend
## Background
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
## Data Model
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type.
**Keywords**
*Sequence*: A unique incrementing identification number used to label the different ledger versions.
*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
## Cassandra Implementation
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below.
*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.*
### `ledger_transactions`
```
CREATE TABLE clio.ledger_transactions (
ledger_sequence bigint, # The sequence number of the ledger version
hash blob, # Hash of all the transactions on this ledger version
PRIMARY KEY (ledger_sequence, hash)
) WITH CLUSTERING ORDER BY (hash ASC) ...
```
This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order.
### `transactions`
```
CREATE TABLE clio.transactions (
hash blob PRIMARY KEY, # The transaction hash
date bigint, # Date of the transaction
ledger_sequence bigint, # The sequence that the transaction was validated
metadata blob, # Metadata of the transaction
transaction blob # Data of the transaction
) ...
```
This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key.
To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
### `ledger_hashes`
```
CREATE TABLE clio.ledger_hashes (
hash blob PRIMARY KEY, # Hash of entire ledger version's data
sequence bigint # The sequence of the ledger version
) ...
```
This table stores the hash of all ledger versions by their sequences.
### `ledger_range`
```
CREATE TABLE clio.ledger_range (
is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range
sequence bigint # The sequence number of the starting/stopping range
) ...
```
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
### `objects`
```
CREATE TABLE clio.objects (
key blob, # Object index of the object
sequence bigint, # The sequence this object was last updated
object blob, # Data of the object
PRIMARY KEY (key, sequence)
) WITH CLUSTERING ORDER BY (sequence DESC) ...
```
This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
### `ledgers`
```
CREATE TABLE clio.ledgers (
sequence bigint PRIMARY KEY, # Sequence of the ledger version
header blob # Data of the header
) ...
```
This table stores the ledger header data of specific ledger versions by their sequence.
### `diff`
```
CREATE TABLE clio.diff (
seq bigint, # Sequence of the ledger version
key blob, # Hash of changes in the ledger version
PRIMARY KEY (seq, key)
) WITH CLUSTERING ORDER BY (key ASC) ...
```
This table stores the object index of all the changes in each ledger version.
### `account_tx`
```
CREATE TABLE clio.account_tx (
account blob,
seq_idx frozen<tuple<bigint, bigint>>, # Tuple of (ledger_index, transaction_index)
hash blob, # Hash of the transaction
PRIMARY KEY (account, seq_idx)
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
```
This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received.
### `successor`
```
CREATE TABLE clio.successor (
key blob, # Object index
seq bigint, # The sequnce that this ledger object's predecessor and successor was updated
next blob, # Index of the next object that existed in this sequence
PRIMARY KEY (key, seq)
) WITH CLUSTERING ORDER BY (seq ASC) ...
```
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
![Successor Table Trace Diagram](https://raw.githubusercontent.com/Shoukozumi/clio/9b2ea3efb6b164b02e9a5f0ef6717065a70f078c/src/backend/README.png)
*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace*
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps:
1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
2. If `v` is...
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
2. Being **modified**, do nothing.
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
### NFT data model
In `rippled` NFTs are stored in NFTokenPage ledger objects. This object is
implemented to save ledger space and has the property that it gives us O(1)
lookup time for an NFT, assuming we know who owns the NFT at a particular
ledger. However, if we do not know who owns the NFT at a specific ledger
height we have no alternative in rippled other than scanning the entire
ledger. Because of this tradeoff, clio implements a special NFT indexing data
structure that allows clio users to query NFTs quickly, while keeping
rippled's space-saving optimizations.
#### `nf_tokens`
```
CREATE TABLE clio.nf_tokens (
token_id blob, # The NFT's ID
sequence bigint, # Sequence of ledger version
owner blob, # The account ID of the owner of this NFT at this ledger
is_burned boolean, # True if token was burned in this ledger
PRIMARY KEY (token_id, sequence)
) WITH CLUSTERING ORDER BY (sequence DESC) ...
```
This table indexes NFT IDs with their owner at a given ledger. So
```
SELECT * FROM nf_tokens
WHERE token_id = N AND seq <= Y
ORDER BY seq DESC LIMIT 1;
```
will give you the owner of token N at ledger Y and whether it was burned. If
the token is burned, the owner field indicates the account that owned the
token at the time it was burned; it does not indicate the person who burned
the token, necessarily. If you need to determine who burned the token you can
use the `nft_history` API, which will give you the NFTokenBurn transaction
that burned this token, along with the account that submitted that
transaction.
#### `issuer_nf_tokens_v2`
```
CREATE TABLE clio.issuer_nf_tokens_v2 (
issuer blob, # The NFT issuer's account ID
taxon bigint, # The NFT's token taxon
token_id blob, # The NFT's ID
PRIMARY KEY (issuer, taxon, token_id)
)
```
This table indexes token IDs against their issuer and issuer/taxon
combination. This is useful for determining all the NFTs a specific account
issued, or all the NFTs a specific account issued with a specific taxon. It is
not useful to know all the NFTs with a given taxon while excluding issuer, since the
meaning of a taxon is left to an issuer.
#### `nf_token_uris`
```
CREATE TABLE clio.nf_token_uris (
token_id blob, # The NFT's ID
sequence bigint, # Sequence of ledger version
uri blob, # The NFT's URI
PRIMARY KEY (token_id, sequence)
) WITH CLUSTERING ORDER BY (sequence DESC) ...
```
This table is used to store an NFT's URI. Without storing this here, we would
need to traverse the NFT owner's entire set of NFTs to find the URI, again due
to the way that NFTs are stored in rippled. Furthermore, instead of storing
this in the `nf_tokens` table, we store it here to save space. A given NFT
will have only one entry in this table (see caveat below), written to this
table as soon as clio sees the NFTokenMint transaction, or when clio loads an
NFTokenPage from the initial ledger it downloaded. However, the `nf_tokens`
table is written to every time an NFT changes ownership, or if it is burned.
Given this, why do we have to store the sequence? Unfortunately there is an
extreme edge case where a given NFT ID can be burned, and then re-minted with
a different URI. This is extremely unlikely, and might be fixed in a future
version to rippled, but just in case we can handle that edge case by allowing
a given NFT ID to have a new URI assigned in this case, without removing the
prior URI.
#### `nf_token_transactions`
```
CREATE TABLE clio.nf_token_transactions (
token_id blob, # The NFT's ID
seq_idx tuple<bigint, bigint>, # Tuple of (ledger_index, transaction_index)
hash blob, # Hash of the transaction
PRIMARY KEY (token_id, seq_idx)
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
```
This table is the NFT equivalent of `account_tx`. It's motivated by the exact
same reasons and serves the analogous purpose here. It drives the
`nft_history` API.

157
src/backend/SimpleCache.cpp Normal file
View File

@@ -0,0 +1,157 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <backend/SimpleCache.h>
namespace Backend {
uint32_t
SimpleCache::latestLedgerSequence() const
{
std::shared_lock lck{mtx_};
return latestSeq_;
}
void
SimpleCache::update(
std::vector<LedgerObject> const& objs,
uint32_t seq,
bool isBackground)
{
if (disabled_)
return;
{
std::scoped_lock lck{mtx_};
if (seq > latestSeq_)
{
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
latestSeq_ = seq;
}
for (auto const& obj : objs)
{
if (obj.blob.size())
{
if (isBackground && deletes_.count(obj.key))
continue;
auto& e = map_[obj.key];
if (seq > e.seq)
{
e = {seq, obj.blob};
}
}
else
{
map_.erase(obj.key);
if (!full_ && !isBackground)
deletes_.insert(obj.key);
}
}
}
}
std::optional<LedgerObject>
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock{mtx_};
successorReqCounter_++;
if (seq != latestSeq_)
return {};
auto e = map_.upper_bound(key);
if (e == map_.end())
return {};
successorHitCounter_++;
return {{e->first, e->second.blob}};
}
std::optional<LedgerObject>
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock lck{mtx_};
if (seq != latestSeq_)
return {};
auto e = map_.lower_bound(key);
if (e == map_.begin())
return {};
--e;
return {{e->first, e->second.blob}};
}
std::optional<Blob>
SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
{
if (seq > latestSeq_)
return {};
std::shared_lock lck{mtx_};
objectReqCounter_++;
auto e = map_.find(key);
if (e == map_.end())
return {};
if (seq < e->second.seq)
return {};
objectHitCounter_++;
return {e->second.blob};
}
void
SimpleCache::setDisabled()
{
disabled_ = true;
}
void
SimpleCache::setFull()
{
if (disabled_)
return;
full_ = true;
std::scoped_lock lck{mtx_};
deletes_.clear();
}
bool
SimpleCache::isFull() const
{
return full_;
}
size_t
SimpleCache::size() const
{
std::shared_lock lck{mtx_};
return map_.size();
}
float
SimpleCache::getObjectHitRate() const
{
if (!objectReqCounter_)
return 1;
return ((float)objectHitCounter_) / objectReqCounter_;
}
float
SimpleCache::getSuccessorHitRate() const
{
if (!successorReqCounter_)
return 1;
return ((float)successorHitCounter_) / successorReqCounter_;
}
} // namespace Backend

98
src/backend/SimpleCache.h Normal file
View File

@@ -0,0 +1,98 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/basics/base_uint.h>
#include <ripple/basics/hardened_hash.h>
#include <backend/Types.h>
#include <map>
#include <mutex>
#include <shared_mutex>
#include <utility>
#include <vector>
namespace Backend {
class SimpleCache
{
struct CacheEntry
{
uint32_t seq = 0;
Blob blob;
};
// counters for fetchLedgerObject(s) hit rate
mutable std::atomic_uint32_t objectReqCounter_;
mutable std::atomic_uint32_t objectHitCounter_;
// counters for fetchSuccessorKey hit rate
mutable std::atomic_uint32_t successorReqCounter_;
mutable std::atomic_uint32_t successorHitCounter_;
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
uint32_t latestSeq_ = 0;
std::atomic_bool full_ = false;
std::atomic_bool disabled_ = false;
// temporary set to prevent background thread from writing already deleted
// data. not used when cache is full
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
public:
// Update the cache with new ledger objects
// set isBackground to true when writing old data from a background thread
void
update(
std::vector<LedgerObject> const& blobs,
uint32_t seq,
bool isBackground = false);
std::optional<Blob>
get(ripple::uint256 const& key, uint32_t seq) const;
// always returns empty optional if isFull() is false
std::optional<LedgerObject>
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
// always returns empty optional if isFull() is false
std::optional<LedgerObject>
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
void
setDisabled();
void
setFull();
uint32_t
latestLedgerSequence() const;
// whether the cache has all data for the most recent ledger
bool
isFull() const;
size_t
size() const;
float
getObjectHitRate() const;
float
getSuccessorHitRate() const;
};
} // namespace Backend

111
src/backend/Types.h Normal file
View File

@@ -0,0 +1,111 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/AccountID.h>
#include <optional>
#include <string>
#include <vector>
namespace Backend {
// *** return types
using Blob = std::vector<unsigned char>;
struct LedgerObject
{
ripple::uint256 key;
Blob blob;
bool
operator==(const LedgerObject& other) const
{
return key == other.key && blob == other.blob;
}
};
struct LedgerPage
{
std::vector<LedgerObject> objects;
std::optional<ripple::uint256> cursor;
};
struct BookOffersPage
{
std::vector<LedgerObject> offers;
std::optional<ripple::uint256> cursor;
};
struct TransactionAndMetadata
{
Blob transaction;
Blob metadata;
std::uint32_t ledgerSequence;
std::uint32_t date;
bool
operator==(const TransactionAndMetadata& other) const
{
return transaction == other.transaction && metadata == other.metadata &&
ledgerSequence == other.ledgerSequence && date == other.date;
}
};
struct TransactionsCursor
{
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
};
struct TransactionsAndCursor
{
std::vector<TransactionAndMetadata> txns;
std::optional<TransactionsCursor> cursor;
};
struct NFT
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
ripple::AccountID owner;
Blob uri;
bool isBurned;
// clearly two tokens are the same if they have the same ID, but this
// struct stores the state of a given token at a given ledger sequence, so
// we also need to compare with ledgerSequence
bool
operator==(NFT const& other) const
{
return tokenID == other.tokenID &&
ledgerSequence == other.ledgerSequence;
}
};
struct LedgerRange
{
std::uint32_t minSequence;
std::uint32_t maxSequence;
};
constexpr ripple::uint256 firstKey{
"0000000000000000000000000000000000000000000000000000000000000000"};
constexpr ripple::uint256 lastKey{
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
constexpr ripple::uint256 hi192{
"0000000000000000000000000000000000000000000000001111111111111111"};
} // namespace Backend

190
src/config/Config.cpp Normal file
View File

@@ -0,0 +1,190 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <config/Config.h>
#include <log/Logger.h>
#include <fstream>
namespace clio {
// Note: `store_(store)` MUST use `()` instead of `{}` otherwise gcc
// picks `initializer_list` constructor and anything passed becomes an
// array :-D
Config::Config(boost::json::value store) : store_(std::move(store))
{
}
Config::operator bool() const noexcept
{
return not store_.is_null();
}
bool
Config::contains(key_type key) const
{
return lookup(key).has_value();
}
std::optional<boost::json::value>
Config::lookup(key_type key) const
{
if (store_.is_null())
return std::nullopt;
std::reference_wrapper<boost::json::value const> cur = std::cref(store_);
auto hasBrokenPath = false;
auto tokenized = detail::Tokenizer<key_type, Separator>{key};
std::string subkey{};
auto maybeSection = tokenized.next();
while (maybeSection.has_value())
{
auto section = maybeSection.value();
subkey += section;
if (not hasBrokenPath)
{
if (not cur.get().is_object())
throw detail::StoreException(
"Not an object at '" + subkey + "'");
if (not cur.get().as_object().contains(section))
hasBrokenPath = true;
else
cur = std::cref(cur.get().as_object().at(section));
}
subkey += Separator;
maybeSection = tokenized.next();
}
if (hasBrokenPath)
return std::nullopt;
return std::make_optional(cur);
}
std::optional<Config::array_type>
Config::maybeArray(key_type key) const
{
try
{
auto maybe_arr = lookup(key);
if (maybe_arr && maybe_arr->is_array())
{
auto& arr = maybe_arr->as_array();
array_type out;
out.reserve(arr.size());
std::transform(
std::begin(arr),
std::end(arr),
std::back_inserter(out),
[](auto&& element) { return Config{std::move(element)}; });
return std::make_optional<array_type>(std::move(out));
}
}
catch (detail::StoreException const&)
{
// ignore store error, but rethrow key errors
}
return std::nullopt;
}
Config::array_type
Config::array(key_type key) const
{
if (auto maybe_arr = maybeArray(key); maybe_arr)
return maybe_arr.value();
throw std::logic_error("No array found at '" + key + "'");
}
Config::array_type
Config::arrayOr(key_type key, array_type fallback) const
{
if (auto maybe_arr = maybeArray(key); maybe_arr)
return maybe_arr.value();
return fallback;
}
Config::array_type
Config::arrayOrThrow(key_type key, std::string_view err) const
{
try
{
return maybeArray(key).value();
}
catch (std::exception const&)
{
throw std::runtime_error(err.data());
}
}
Config
Config::section(key_type key) const
{
auto maybe_element = lookup(key);
if (maybe_element && maybe_element->is_object())
return Config{std::move(*maybe_element)};
throw std::logic_error("No section found at '" + key + "'");
}
Config::array_type
Config::array() const
{
if (not store_.is_array())
throw std::logic_error("_self_ is not an array");
array_type out;
auto const& arr = store_.as_array();
out.reserve(arr.size());
std::transform(
std::cbegin(arr),
std::cend(arr),
std::back_inserter(out),
[](auto const& element) { return Config{element}; });
return out;
}
Config
ConfigReader::open(std::filesystem::path path)
{
try
{
std::ifstream in(path, std::ios::in | std::ios::binary);
if (in)
{
std::stringstream contents;
contents << in.rdbuf();
auto opts = boost::json::parse_options{};
opts.allow_comments = true;
return Config{boost::json::parse(contents.str(), {}, opts)};
}
}
catch (std::exception const& e)
{
LogService::error() << "Could not read configuration file from '"
<< path.string() << "': " << e.what();
}
return Config{};
}
} // namespace clio

405
src/config/Config.h Normal file
View File

@@ -0,0 +1,405 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <config/detail/Helpers.h>
#include <boost/json.hpp>
#include <filesystem>
#include <optional>
#include <string>
#include <string_view>
namespace clio {
/**
* @brief Convenience wrapper to query a JSON configuration file.
*
* Any custom data type can be supported by implementing the right `tag_invoke`
* for `boost::json::value_to`.
*/
class Config final
{
boost::json::value store_;
static constexpr char Separator = '.';
public:
using key_type = std::string; /*! The type of key used */
using array_type = std::vector<Config>; /*! The type of array used */
using write_cursor_type = std::pair<
std::optional<std::reference_wrapper<boost::json::value>>,
key_type>;
/**
* @brief Construct a new Config object.
* @param store boost::json::value that backs this instance
*/
explicit Config(boost::json::value store = {});
//
// Querying the store
//
/**
* @brief Checks whether underlying store is not null.
*
* @return true If the store is null
* @return false If the store is not null
*/
operator bool() const noexcept;
/**
* @brief Checks whether something exists under given key.
*
* @param key The key to check
* @return true If something exists under key
* @return false If nothing exists under key
* @throws std::logic_error If the key is of invalid format
*/
[[nodiscard]] bool
contains(key_type key) const;
//
// Key value access
//
/**
* @brief Interface for fetching values by key that returns std::optional.
*
* Will attempt to fetch the value under the desired key. If the value
* exists and can be represented by the desired type Result then it will be
* returned wrapped in an optional. If the value exists but the conversion
* to Result is not possible - a runtime_error will be thrown. If the value
* does not exist under the specified key - std::nullopt is returned.
*
* @tparam Result The desired return type
* @param key The key to check
* @return std::optional<Result> Optional value of desired type
* @throws std::logic_error Thrown if conversion to Result is not possible
* or key is of invalid format
*/
template <typename Result>
[[nodiscard]] std::optional<Result>
maybeValue(key_type key) const
{
auto maybe_element = lookup(key);
if (maybe_element)
return std::make_optional<Result>(
checkedAs<Result>(key, *maybe_element));
return std::nullopt;
}
/**
* @brief Interface for fetching values by key.
*
* Will attempt to fetch the value under the desired key. If the value
* exists and can be represented by the desired type Result then it will be
* returned. If the value exists but the conversion
* to Result is not possible OR the value does not exist - a logic_error
* will be thrown.
*
* @tparam Result The desired return type
* @param key The key to check
* @return Result Value of desired type
* @throws std::logic_error Thrown if conversion to Result is not
* possible, value does not exist under specified key path or the key is of
* invalid format
*/
template <typename Result>
[[nodiscard]] Result
value(key_type key) const
{
return maybeValue<Result>(key).value();
}
/**
* @brief Interface for fetching values by key with fallback.
*
* Will attempt to fetch the value under the desired key. If the value
* exists and can be represented by the desired type Result then it will be
* returned. If the value exists but the conversion
* to Result is not possible - a logic_error will be thrown. If the value
* does not exist under the specified key - user specified fallback is
* returned.
*
* @tparam Result The desired return type
* @param key The key to check
* @param fallback The fallback value
* @return Result Value of desired type
* @throws std::logic_error Thrown if conversion to Result is not possible
* or the key is of invalid format
*/
template <typename Result>
[[nodiscard]] Result
valueOr(key_type key, Result fallback) const
{
try
{
return maybeValue<Result>(key).value_or(fallback);
}
catch (detail::StoreException const&)
{
return fallback;
}
}
/**
* @brief Interface for fetching values by key with custom error handling.
*
* Will attempt to fetch the value under the desired key. If the value
* exists and can be represented by the desired type Result then it will be
* returned. If the value exists but the conversion
* to Result is not possible OR the value does not exist - a runtime_error
* will be thrown with the user specified message.
*
* @tparam Result The desired return type
* @param key The key to check
* @param err The custom error message
* @return Result Value of desired type
* @throws std::runtime_error Thrown if conversion to Result is not possible
* or value does not exist under key
*/
template <typename Result>
[[nodiscard]] Result
valueOrThrow(key_type key, std::string_view err) const
{
try
{
return maybeValue<Result>(key).value();
}
catch (std::exception const&)
{
throw std::runtime_error(err.data());
}
}
/**
* @brief Interface for fetching an array by key that returns std::optional.
*
* Will attempt to fetch an array under the desired key. If the array
* exists then it will be
* returned wrapped in an optional. If the array does not exist under the
* specified key - std::nullopt is returned.
*
* @param key The key to check
* @return std::optional<array_type> Optional array
* @throws std::logic_error Thrown if the key is of invalid format
*/
[[nodiscard]] std::optional<array_type>
maybeArray(key_type key) const;
/**
* @brief Interface for fetching an array by key.
*
* Will attempt to fetch an array under the desired key. If the array
* exists then it will be
* returned. If the array does not exist under the
* specified key an std::logic_error is thrown.
*
* @param key The key to check
* @return array_type The array
* @throws std::logic_error Thrown if there is no array under the desired
* key or the key is of invalid format
*/
[[nodiscard]] array_type
array(key_type key) const;
/**
* @brief Interface for fetching an array by key with fallback.
*
* Will attempt to fetch an array under the desired key. If the array
* exists then it will be returned.
* If the array does not exist or another type is stored under the desired
* key - user specified fallback is returned.
*
* @param key The key to check
* @param fallback The fallback array
* @return array_type The array
* @throws std::logic_error Thrown if the key is of invalid format
*/
[[nodiscard]] array_type
arrayOr(key_type key, array_type fallback) const;
/**
* @brief Interface for fetching an array by key with custom error handling.
*
* Will attempt to fetch an array under the desired key. If the array
* exists then it will be returned.
* If the array does not exist or another type is stored under the desired
* key - std::runtime_error is thrown with the user specified error message.
*
* @param key The key to check
* @param err The custom error message
* @return array_type The array
* @throws std::runtime_error Thrown if there is no array under the desired
* key
*/
[[nodiscard]] array_type
arrayOrThrow(key_type key, std::string_view err) const;
/**
* @brief Interface for fetching a sub section by key.
*
* Will attempt to fetch an entire section under the desired key and return
* it as a Config instance. If the section does not exist or another type is
* stored under the desired key - std::logic_error is thrown.
*
* @param key The key to check
* @return Config Section represented as a separate instance of Config
* @throws std::logic_error Thrown if there is no section under the
* desired key or the key is of invalid format
*/
[[nodiscard]] Config
section(key_type key) const;
//
// Direct self-value access
//
/**
* @brief Interface for reading the value directly referred to by the
* instance. Wraps as std::optional.
*
* See @ref maybeValue(key_type) const for how this works.
*/
template <typename Result>
[[nodiscard]] std::optional<Result>
maybeValue() const
{
if (store_.is_null())
return std::nullopt;
return std::make_optional<Result>(checkedAs<Result>("_self_", store_));
}
/**
* @brief Interface for reading the value directly referred to by the
* instance.
*
* See @ref value(key_type) const for how this works.
*/
template <typename Result>
[[nodiscard]] Result
value() const
{
return maybeValue<Result>().value();
}
/**
* @brief Interface for reading the value directly referred to by the
* instance with user-specified fallback.
*
* See @ref valueOr(key_type, Result) const for how this works.
*/
template <typename Result>
[[nodiscard]] Result
valueOr(Result fallback) const
{
return maybeValue<Result>().valueOr(fallback);
}
/**
* @brief Interface for reading the value directly referred to by the
* instance with user-specified error message.
*
* See @ref valueOrThrow(key_type, std::string_view) const for how this
* works.
*/
template <typename Result>
[[nodiscard]] Result
valueOrThrow(std::string_view err) const
{
try
{
return maybeValue<Result>().value();
}
catch (std::exception const&)
{
throw std::runtime_error(err.data());
}
}
/**
* @brief Interface for reading the array directly referred to by the
* instance.
*
* See @ref array(key_type) const for how this works.
*/
[[nodiscard]] array_type
array() const;
private:
template <typename Return>
[[nodiscard]] Return
checkedAs(key_type key, boost::json::value const& value) const
{
auto has_error = false;
if constexpr (std::is_same_v<Return, bool>)
{
if (not value.is_bool())
has_error = true;
}
else if constexpr (std::is_same_v<Return, std::string>)
{
if (not value.is_string())
has_error = true;
}
else if constexpr (std::is_same_v<Return, double>)
{
if (not value.is_number())
has_error = true;
}
else if constexpr (
std::is_convertible_v<Return, uint64_t> ||
std::is_convertible_v<Return, int64_t>)
{
if (not value.is_int64() && not value.is_uint64())
has_error = true;
}
if (has_error)
throw std::runtime_error(
"Type for key '" + key + "' is '" +
std::string{to_string(value.kind())} +
"' in JSON but requested '" + detail::typeName<Return>() + "'");
return boost::json::value_to<Return>(value);
}
std::optional<boost::json::value>
lookup(key_type key) const;
write_cursor_type
lookupForWrite(key_type key);
};
/**
* @brief Simple configuration file reader.
*
* Reads the JSON file under specified path and creates a @ref Config object
* from its contents.
*/
class ConfigReader final
{
public:
static Config
open(std::filesystem::path path);
};
} // namespace clio

164
src/config/detail/Helpers.h Normal file
View File

@@ -0,0 +1,164 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <optional>
#include <queue>
#include <stdexcept>
#include <string>
namespace clio::detail {
/**
* @brief Thrown when a KeyPath related error occurs
*/
struct KeyException : public ::std::logic_error
{
KeyException(::std::string msg) : ::std::logic_error{msg}
{
}
};
/**
* @brief Thrown when a Store (config's storage) related error occurs.
*/
struct StoreException : public ::std::logic_error
{
StoreException(::std::string msg) : ::std::logic_error{msg}
{
}
};
/**
* @brief Simple string tokenizer. Used by @ref Config.
*
* @tparam KeyType The type of key to use
* @tparam Separator The separator character
*/
template <typename KeyType, char Separator>
class Tokenizer final
{
using opt_key_t = std::optional<KeyType>;
KeyType key_;
KeyType token_{};
std::queue<KeyType> tokens_{};
public:
explicit Tokenizer(KeyType key) : key_{key}
{
if (key.empty())
throw KeyException("Empty key");
for (auto const& c : key)
{
if (c == Separator)
saveToken();
else
token_ += c;
}
saveToken();
}
[[nodiscard]] opt_key_t
next()
{
if (tokens_.empty())
return std::nullopt;
auto token = tokens_.front();
tokens_.pop();
return std::make_optional(std::move(token));
}
private:
void
saveToken()
{
if (token_.empty())
throw KeyException("Empty token in key '" + key_ + "'.");
tokens_.push(std::move(token_));
token_ = {};
}
};
template <typename T>
static constexpr const char*
typeName()
{
return typeid(T).name();
}
template <>
constexpr const char*
typeName<uint64_t>()
{
return "uint64_t";
}
template <>
constexpr const char*
typeName<int64_t>()
{
return "int64_t";
}
template <>
constexpr const char*
typeName<uint32_t>()
{
return "uint32_t";
}
template <>
constexpr const char*
typeName<int32_t>()
{
return "int32_t";
}
template <>
constexpr const char*
typeName<bool>()
{
return "bool";
}
template <>
constexpr const char*
typeName<std::string>()
{
return "std::string";
}
template <>
constexpr const char*
typeName<const char*>()
{
return "const char*";
}
template <>
constexpr const char*
typeName<double>()
{
return "double";
}
}; // namespace clio::detail

192
src/etl/ETLHelpers.h Normal file
View File

@@ -0,0 +1,192 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/basics/base_uint.h>
#include <condition_variable>
#include <mutex>
#include <optional>
#include <queue>
#include <sstream>
/// This datastructure is used to keep track of the sequence of the most recent
/// ledger validated by the network. There are two methods that will wait until
/// certain conditions are met. This datastructure is able to be "stopped". When
/// the datastructure is stopped, any threads currently waiting are unblocked.
/// Any later calls to methods of this datastructure will not wait. Once the
/// datastructure is stopped, the datastructure remains stopped for the rest of
/// its lifetime.
class NetworkValidatedLedgers
{
// max sequence validated by network
std::optional<uint32_t> max_;
mutable std::mutex m_;
std::condition_variable cv_;
public:
static std::shared_ptr<NetworkValidatedLedgers>
make_ValidatedLedgers()
{
return std::make_shared<NetworkValidatedLedgers>();
}
/// Notify the datastructure that idx has been validated by the network
/// @param idx sequence validated by network
void
push(uint32_t idx)
{
std::lock_guard lck(m_);
if (!max_ || idx > *max_)
max_ = idx;
cv_.notify_all();
}
/// Get most recently validated sequence. If no ledgers are known to have
/// been validated, this function waits until the next ledger is validated
/// @return sequence of most recently validated ledger. empty optional if
/// the datastructure has been stopped
std::optional<uint32_t>
getMostRecent()
{
std::unique_lock lck(m_);
cv_.wait(lck, [this]() { return max_; });
return max_;
}
/// Waits for the sequence to be validated by the network
/// @param sequence to wait for
/// @return true if sequence was validated, false otherwise
/// a return value of false means the datastructure has been stopped
bool
waitUntilValidatedByNetwork(
uint32_t sequence,
std::optional<uint32_t> maxWaitMs = {})
{
std::unique_lock lck(m_);
auto pred = [sequence, this]() -> bool {
return (max_ && sequence <= *max_);
};
if (maxWaitMs)
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
else
cv_.wait(lck, pred);
return pred();
}
};
/// Generic thread-safe queue with an optional maximum size
/// Note, we can't use a lockfree queue here, since we need the ability to wait
/// for an element to be added or removed from the queue. These waits are
/// blocking calls.
template <class T>
class ThreadSafeQueue
{
std::queue<T> queue_;
mutable std::mutex m_;
std::condition_variable cv_;
std::optional<uint32_t> maxSize_;
public:
/// @param maxSize maximum size of the queue. Calls that would cause the
/// queue to exceed this size will block until free space is available
ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
{
}
/// Create a queue with no maximum size
ThreadSafeQueue() = default;
/// @param elt element to push onto queue
/// if maxSize is set, this method will block until free space is available
void
push(T const& elt)
{
std::unique_lock lck(m_);
// if queue has a max size, wait until not full
if (maxSize_)
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
queue_.push(elt);
cv_.notify_all();
}
/// @param elt element to push onto queue. elt is moved from
/// if maxSize is set, this method will block until free space is available
void
push(T&& elt)
{
std::unique_lock lck(m_);
// if queue has a max size, wait until not full
if (maxSize_)
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
queue_.push(std::move(elt));
cv_.notify_all();
}
/// @return element popped from queue. Will block until queue is non-empty
T
pop()
{
std::unique_lock lck(m_);
cv_.wait(lck, [this]() { return !queue_.empty(); });
T ret = std::move(queue_.front());
queue_.pop();
// if queue has a max size, unblock any possible pushers
if (maxSize_)
cv_.notify_all();
return ret;
}
/// @return element popped from queue. Will block until queue is non-empty
std::optional<T>
tryPop()
{
std::scoped_lock lck(m_);
if (queue_.empty())
return {};
T ret = std::move(queue_.front());
queue_.pop();
// if queue has a max size, unblock any possible pushers
if (maxSize_)
cv_.notify_all();
return ret;
}
};
/// Parititions the uint256 keyspace into numMarkers partitions, each of equal
/// size.
inline std::vector<ripple::uint256>
getMarkers(size_t numMarkers)
{
assert(numMarkers <= 256);
unsigned char incr = 256 / numMarkers;
std::vector<ripple::uint256> markers;
markers.reserve(numMarkers);
ripple::uint256 base{0};
for (size_t i = 0; i < numMarkers; ++i)
{
markers.push_back(base);
base.data()[0] += incr;
}
return markers;
}

1203
src/etl/ETLSource.cpp Normal file

File diff suppressed because it is too large Load Diff

724
src/etl/ETLSource.h Normal file
View File

@@ -0,0 +1,724 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/BackendInterface.h>
#include <config/Config.h>
#include <etl/ETLHelpers.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <boost/algorithm/string.hpp>
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
class ETLLoadBalancer;
class ETLSource;
class ProbingETLSource;
class SubscriptionManager;
/// This class manages a connection to a single ETL source. This is almost
/// always a rippled node, but really could be another reporting node. This
/// class subscribes to the ledgers and transactions_proposed streams of the
/// associated rippled node, and keeps track of which ledgers the rippled node
/// has. This class also has methods for extracting said ledgers. Lastly this
/// class forwards transactions received on the transactions_proposed streams to
/// any subscribers.
class ForwardCache
{
using response_type = std::optional<boost::json::object>;
clio::Logger log_{"ETL"};
mutable std::atomic_bool stopping_ = false;
mutable std::shared_mutex mtx_;
std::unordered_map<std::string, response_type> latestForwarded_;
boost::asio::io_context::strand strand_;
boost::asio::steady_timer timer_;
ETLSource const& source_;
std::uint32_t duration_ = 10;
void
clear();
public:
ForwardCache(
clio::Config const& config,
boost::asio::io_context& ioc,
ETLSource const& source)
: strand_(ioc), timer_(strand_), source_(source)
{
if (config.contains("cache"))
{
auto commands =
config.arrayOrThrow("cache", "ETLSource cache must be array");
if (config.contains("cache_duration"))
duration_ = config.valueOrThrow<uint32_t>(
"cache_duration",
"ETLSource cache_duration must be a number");
for (auto const& command : commands)
{
auto key = command.valueOrThrow<std::string>(
"ETLSource forward command must be array of strings");
latestForwarded_[key] = {};
}
}
}
// This is to be called every freshenDuration_ seconds.
// It will request information from this etlSource, and
// will populate the cache with the latest value. If the
// request fails, it will evict that value from the cache.
void
freshen();
std::optional<boost::json::object>
get(boost::json::object const& command) const;
};
class ETLSource
{
public:
virtual bool
isConnected() const = 0;
virtual boost::json::object
toJson() const = 0;
virtual void
run() = 0;
virtual void
pause() = 0;
virtual void
resume() = 0;
virtual std::string
toString() const = 0;
virtual bool
hasLedger(uint32_t sequence) const = 0;
virtual std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(
uint32_t ledgerSequence,
bool getObjects = true,
bool getObjectNeighbors = false) = 0;
virtual bool
loadInitialLedger(
uint32_t sequence,
std::uint32_t numMarkers,
bool cacheOnly = false) = 0;
virtual std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const = 0;
virtual ~ETLSource()
{
}
protected:
clio::Logger log_{"ETL"};
private:
friend ForwardCache;
friend ProbingETLSource;
virtual std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const = 0;
};
struct ETLSourceHooks
{
enum class Action { STOP, PROCEED };
std::function<Action(boost::beast::error_code)> onConnected;
std::function<Action(boost::beast::error_code)> onDisconnected;
};
template <class Derived>
class ETLSourceImpl : public ETLSource
{
std::string wsPort_;
std::string grpcPort_;
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
boost::asio::ip::tcp::resolver resolver_;
boost::beast::flat_buffer readBuffer_;
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
std::string validatedLedgersRaw_{"N/A"};
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
// beast::Journal journal_;
mutable std::mutex mtx_;
std::atomic_bool connected_{false};
// true if this ETL source is forwarding transactions received on the
// transactions_proposed stream. There are usually multiple ETL sources,
// so to avoid forwarding the same transaction multiple times, we only
// forward from one particular ETL source at a time.
std::atomic_bool forwardingStream_{false};
// The last time a message was received on the ledgers stream
std::chrono::system_clock::time_point lastMsgTime_;
mutable std::mutex lastMsgTimeMtx_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
ETLLoadBalancer& balancer_;
ForwardCache forwardCache_;
std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
protected:
Derived&
derived()
{
return static_cast<Derived&>(*this);
}
std::string ip_;
size_t numFailures_ = 0;
boost::asio::io_context& ioc_;
// used for retrying connections
boost::asio::steady_timer timer_;
std::atomic_bool closing_{false};
std::atomic_bool paused_{false};
ETLSourceHooks hooks_;
void
run() override
{
log_.trace() << toString();
auto const host = ip_;
auto const port = wsPort_;
resolver_.async_resolve(host, port, [this](auto ec, auto results) {
onResolve(ec, results);
});
}
public:
~ETLSourceImpl()
{
derived().close(false);
}
bool
isConnected() const override
{
return connected_;
}
std::chrono::system_clock::time_point
getLastMsgTime() const
{
std::lock_guard lck(lastMsgTimeMtx_);
return lastMsgTime_;
}
void
setLastMsgTime()
{
std::lock_guard lck(lastMsgTimeMtx_);
lastMsgTime_ = std::chrono::system_clock::now();
}
/// Create ETL source without gRPC endpoint
/// Fetch ledger and load initial ledger will fail for this source
/// Primarly used in read-only mode, to monitor when ledgers are validated
ETLSourceImpl(
clio::Config const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: resolver_(boost::asio::make_strand(ioContext))
, networkValidatedLedgers_(networkValidatedLedgers)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, forwardCache_(config, ioContext, *this)
, ioc_(ioContext)
, timer_(ioContext)
, hooks_(hooks)
{
ip_ = config.valueOr<std::string>("ip", {});
wsPort_ = config.valueOr<std::string>("ws_port", {});
if (auto value = config.maybeValue<std::string>("grpc_port"); value)
{
grpcPort_ = *value;
try
{
boost::asio::ip::tcp::endpoint endpoint{
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
std::stringstream ss;
ss << endpoint;
grpc::ChannelArguments chArgs;
chArgs.SetMaxReceiveMessageSize(-1);
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateCustomChannel(
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
log_.debug() << "Made stub for remote = " << toString();
}
catch (std::exception const& e)
{
log_.debug() << "Exception while creating stub = " << e.what()
<< " . Remote = " << toString();
}
}
}
/// @param sequence ledger sequence to check for
/// @return true if this source has the desired ledger
bool
hasLedger(uint32_t sequence) const override
{
std::lock_guard lck(mtx_);
for (auto& pair : validatedLedgers_)
{
if (sequence >= pair.first && sequence <= pair.second)
{
return true;
}
else if (sequence < pair.first)
{
// validatedLedgers_ is a sorted list of disjoint ranges
// if the sequence comes before this range, the sequence will
// come before all subsequent ranges
return false;
}
}
return false;
}
/// process the validated range received on the ledgers stream. set the
/// appropriate member variable
/// @param range validated range received on ledgers stream
void
setValidatedRange(std::string const& range)
{
std::vector<std::pair<uint32_t, uint32_t>> pairs;
std::vector<std::string> ranges;
boost::split(ranges, range, boost::is_any_of(","));
for (auto& pair : ranges)
{
std::vector<std::string> minAndMax;
boost::split(minAndMax, pair, boost::is_any_of("-"));
if (minAndMax.size() == 1)
{
uint32_t sequence = std::stoll(minAndMax[0]);
pairs.push_back(std::make_pair(sequence, sequence));
}
else
{
assert(minAndMax.size() == 2);
uint32_t min = std::stoll(minAndMax[0]);
uint32_t max = std::stoll(minAndMax[1]);
pairs.push_back(std::make_pair(min, max));
}
}
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) {
return left.first < right.first;
});
// we only hold the lock here, to avoid blocking while string processing
std::lock_guard lck(mtx_);
validatedLedgers_ = std::move(pairs);
validatedLedgersRaw_ = range;
}
/// @return the validated range of this source
/// @note this is only used by server_info
std::string
getValidatedRange() const
{
std::lock_guard lck(mtx_);
return validatedLedgersRaw_;
}
/// Fetch the specified ledger
/// @param ledgerSequence sequence of the ledger to fetch
/// @getObjects whether to get the account state diff between this ledger
/// and the prior one
/// @return the extracted data and the result status
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(
uint32_t ledgerSequence,
bool getObjects = true,
bool getObjectNeighbors = false) override;
std::string
toString() const override
{
return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ +
", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}";
}
boost::json::object
toJson() const override
{
boost::json::object res;
res["validated_range"] = getValidatedRange();
res["is_connected"] = std::to_string(isConnected());
res["ip"] = ip_;
res["ws_port"] = wsPort_;
res["grpc_port"] = grpcPort_;
auto last = getLastMsgTime();
if (last.time_since_epoch().count() != 0)
res["last_msg_age_seconds"] = std::to_string(
std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - getLastMsgTime())
.count());
return res;
}
/// Download a ledger in full
/// @param ledgerSequence sequence of the ledger to download
/// @param writeQueue queue to push downloaded ledger objects
/// @return true if the download was successful
bool
loadInitialLedger(
std::uint32_t ledgerSequence,
std::uint32_t numMarkers,
bool cacheOnly = false) override;
/// Attempt to reconnect to the ETL source
void
reconnect(boost::beast::error_code ec);
/// Pause the source effectively stopping it from trying to reconnect
void
pause() override
{
paused_ = true;
derived().close(false);
}
/// Resume the source allowing it to reconnect again
void
resume() override
{
paused_ = false;
derived().close(true);
}
/// Callback
void
onResolve(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type results);
/// Callback
virtual void
onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type
endpoint) = 0;
/// Callback
void
onHandshake(boost::beast::error_code ec);
/// Callback
void
onWrite(boost::beast::error_code ec, size_t size);
/// Callback
void
onRead(boost::beast::error_code ec, size_t size);
/// Handle the most recently received message
/// @return true if the message was handled successfully. false on error
bool
handleMessage();
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
};
class PlainETLSource : public ETLSourceImpl<PlainETLSource>
{
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>>
ws_;
public:
PlainETLSource(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: ETLSourceImpl(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
std::move(hooks))
, ws_(std::make_unique<
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
boost::asio::make_strand(ioc)))
{
}
void
onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
override;
/// Close the websocket
/// @param startAgain whether to reconnect
void
close(bool startAgain);
boost::beast::websocket::stream<boost::beast::tcp_stream>&
ws()
{
return *ws_;
}
};
class SslETLSource : public ETLSourceImpl<SslETLSource>
{
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx_;
std::unique_ptr<boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>>
ws_;
public:
SslETLSource(
clio::Config const& config,
boost::asio::io_context& ioc,
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: ETLSourceImpl(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
std::move(hooks))
, sslCtx_(sslCtx)
, ws_(std::make_unique<boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
boost::asio::make_strand(ioc_),
*sslCtx_))
{
}
void
onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
override;
void
onSslHandshake(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint);
/// Close the websocket
/// @param startAgain whether to reconnect
void
close(bool startAgain);
boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>&
ws()
{
return *ws_;
}
};
/// This class is used to manage connections to transaction processing processes
/// This class spawns a listener for each etl source, which listens to messages
/// on the ledgers stream (to keep track of which ledgers have been validated by
/// the network, and the range of ledgers each etl source has). This class also
/// allows requests for ledger data to be load balanced across all possible etl
/// sources.
class ETLLoadBalancer
{
private:
clio::Logger log_{"ETL"};
std::vector<std::unique_ptr<ETLSource>> sources_;
std::uint32_t downloadRanges_ = 16;
public:
ETLLoadBalancer(
clio::Config const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl);
static std::shared_ptr<ETLLoadBalancer>
make_ETLLoadBalancer(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
{
return std::make_shared<ETLLoadBalancer>(
config, ioc, backend, subscriptions, validatedLedgers);
}
~ETLLoadBalancer()
{
sources_.clear();
}
/// Load the initial ledger, writing data to the queue
/// @param sequence sequence of ledger to download
void
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
/// Fetch data for a specific ledger. This function will continuously try
/// to fetch data for the specified ledger until the fetch succeeds, the
/// ledger is found in the database, or the server is shutting down.
/// @param ledgerSequence sequence of ledger to fetch data for
/// @param getObjects if true, fetch diff between specified ledger and
/// previous
/// @return the extracted data, if extraction was successful. If the ledger
/// was found in the database or the server is shutting down, the optional
/// will be empty
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(
uint32_t ledgerSequence,
bool getObjects,
bool getObjectNeighbors);
/// Determine whether messages received on the transactions_proposed stream
/// should be forwarded to subscribing clients. The server subscribes to
/// transactions_proposed on multiple ETLSources, yet only forwards messages
/// from one source at any given time (to avoid sending duplicate messages
/// to clients).
/// @param in ETLSource in question
/// @return true if messages should be forwarded
bool
shouldPropagateTxnStream(ETLSource* in) const
{
for (auto& src : sources_)
{
assert(src);
// We pick the first ETLSource encountered that is connected
if (src->isConnected())
{
if (src.get() == in)
return true;
else
return false;
}
}
// If no sources connected, then this stream has not been forwarded
return true;
}
boost::json::value
toJson() const
{
boost::json::array ret;
for (auto& src : sources_)
{
ret.push_back(src->toJson());
}
return ret;
}
/// Forward a JSON RPC request to a randomly selected rippled node
/// @param request JSON-RPC request
/// @return response received from rippled node
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const;
private:
/// f is a function that takes an ETLSource as an argument and returns a
/// bool. Attempt to execute f for one randomly chosen ETLSource that has
/// the specified ledger. If f returns false, another randomly chosen
/// ETLSource is used. The process repeats until f returns true.
/// @param f function to execute. This function takes the ETL source as an
/// argument, and returns a bool.
/// @param ledgerSequence f is executed for each ETLSource that has this
/// ledger
/// @return true if f was eventually executed successfully. false if the
/// ledger was found in the database or the server is shutting down
template <class Func>
bool
execute(Func f, uint32_t ledgerSequence);
};

416
src/etl/NFTHelpers.cpp Normal file
View File

@@ -0,0 +1,416 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/protocol/STBase.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/TxMeta.h>
#include <vector>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <backend/Types.h>
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
// To find the minted token ID, we put all tokenIDs referenced in the
// metadata from prior to the tx application into one vector, then all
// tokenIDs referenced in the metadata from after the tx application into
// another, then find the one tokenID that was added by this tx
// application.
std::vector<ripple::uint256> prevIDs;
std::vector<ripple::uint256> finalIDs;
// The owner is not necessarily the issuer, if using authorized minter
// flow. Determine owner from the ledger object ID of the NFTokenPages
// that were changed.
std::optional<ripple::AccountID> owner;
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE)
continue;
if (!owner)
owner = ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data());
if (node.getFName() == ripple::sfCreatedNode)
{
ripple::STArray const& toAddNFTs =
node.peekAtField(ripple::sfNewFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddNFTs.begin(),
toAddNFTs.end(),
std::back_inserter(finalIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
}
// Else it's modified, as there should never be a deleted NFToken page
// as a result of a mint.
else
{
// When a mint results in splitting an existing page,
// it results in a created page and a modified node. Sometimes,
// the created node needs to be linked to a third page, resulting
// in modifying that third page's PreviousPageMin or NextPageMin
// field changing, but no NFTs within that page changing. In this
// case, there will be no previous NFTs and we need to skip.
// However, there will always be NFTs listed in the final fields,
// as rippled outputs all fields in final fields even if they were
// not changed.
ripple::STObject const& previousFields =
node.peekAtField(ripple::sfPreviousFields)
.downcast<ripple::STObject>();
if (!previousFields.isFieldPresent(ripple::sfNFTokens))
continue;
ripple::STArray const& toAddNFTs =
previousFields.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddNFTs.begin(),
toAddNFTs.end(),
std::back_inserter(prevIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
ripple::STArray const& toAddFinalNFTs =
node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddFinalNFTs.begin(),
toAddFinalNFTs.end(),
std::back_inserter(finalIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
}
}
std::sort(finalIDs.begin(), finalIDs.end());
std::sort(prevIDs.begin(), prevIDs.end());
std::vector<ripple::uint256> tokenIDResult;
std::set_difference(
finalIDs.begin(),
finalIDs.end(),
prevIDs.begin(),
prevIDs.end(),
std::inserter(tokenIDResult, tokenIDResult.begin()));
if (tokenIDResult.size() == 1 && owner)
return {
{NFTTransactionsData(
tokenIDResult.front(), txMeta, sttx.getTransactionID())},
NFTsData(
tokenIDResult.front(),
*owner,
sttx.getFieldVL(ripple::sfURI),
txMeta)};
std::stringstream msg;
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
std::vector<NFTTransactionsData> const txs = {
NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
// Determine who owned the token when it was burned by finding an
// NFTokenPage that was deleted or modified that contains this
// tokenID.
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE ||
node.getFName() == ripple::sfCreatedNode)
continue;
// NFT burn can result in an NFTokenPage being modified to no longer
// include the target, or an NFTokenPage being deleted. If this is
// modified, we want to look for the target in the fields prior to
// modification. If deleted, it's possible that the page was
// modified to remove the target NFT prior to the entire page being
// deleted. In this case, we need to look in the PreviousFields.
// Otherwise, the page was not modified prior to deleting and we
// need to look in the FinalFields.
std::optional<ripple::STArray> prevNFTs;
if (node.isFieldPresent(ripple::sfPreviousFields))
{
ripple::STObject const& previousFields =
node.peekAtField(ripple::sfPreviousFields)
.downcast<ripple::STObject>();
if (previousFields.isFieldPresent(ripple::sfNFTokens))
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
}
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
prevNFTs = node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
if (!prevNFTs)
continue;
auto const nft = std::find_if(
prevNFTs->begin(),
prevNFTs->end(),
[&tokenID](ripple::STObject const& candidate) {
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != prevNFTs->end())
return std::make_pair(
txs,
NFTsData(
tokenID,
ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data()),
txMeta,
true));
}
std::stringstream msg;
msg << " - could not determine owner at burntime for tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenAcceptOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
// If we have the buy offer from this tx, we can determine the owner
// more easily by just looking at the owner of the accepted NFTokenOffer
// object.
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
{
auto const affectedBuyOffer = std::find_if(
txMeta.getNodes().begin(),
txMeta.getNodes().end(),
[&sttx](ripple::STObject const& node) {
return node.getFieldH256(ripple::sfLedgerIndex) ==
sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
});
if (affectedBuyOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
ripple::uint256 const tokenID =
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
ripple::AccountID const owner =
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getAccountID(ripple::sfOwner);
return {
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
NFTsData(tokenID, owner, txMeta, false)};
}
// Otherwise we have to infer the new owner from the affected nodes.
auto const affectedSellOffer = std::find_if(
txMeta.getNodes().begin(),
txMeta.getNodes().end(),
[&sttx](ripple::STObject const& node) {
return node.getFieldH256(ripple::sfLedgerIndex) ==
sttx.getFieldH256(ripple::sfNFTokenSellOffer);
});
if (affectedSellOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
ripple::uint256 const tokenID =
affectedSellOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
ripple::AccountID const seller =
affectedSellOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getAccountID(ripple::sfOwner);
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE ||
node.getFName() == ripple::sfDeletedNode)
continue;
ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data());
if (nodeOwner == seller)
continue;
ripple::STArray const& nfts = [&node] {
if (node.getFName() == ripple::sfCreatedNode)
return node.peekAtField(ripple::sfNewFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
return node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
}();
auto const nft = std::find_if(
nfts.begin(),
nfts.end(),
[&tokenID](ripple::STObject const& candidate) {
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != nfts.end())
return {
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
NFTsData(tokenID, nodeOwner, txMeta, false)};
}
std::stringstream msg;
msg << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
// This is the only transaction where there can be more than 1 element in
// the returned vector, because you can cancel multiple offers in one
// transaction using this feature. This transaction also never returns an
// NFTsData because it does not change the state of an NFT itself.
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenCancelOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
std::vector<NFTTransactionsData> txs;
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_OFFER)
continue;
ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
txs.emplace_back(tokenID, txMeta, sttx.getTransactionID());
}
// Deduplicate any transactions based on tokenID/txIdx combo. Can't just
// use txIdx because in this case one tx can cancel offers for several
// NFTs.
std::sort(
txs.begin(),
txs.end(),
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
return a.tokenID < b.tokenID &&
a.transactionIndex < b.transactionIndex;
});
auto last = std::unique(
txs.begin(),
txs.end(),
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
return a.tokenID == b.tokenID &&
a.transactionIndex == b.transactionIndex;
});
txs.erase(last, txs.end());
return {txs, {}};
}
// This transaction never returns an NFTokensData because it does not
// change the state of an NFT itself.
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenCreateOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
return {
{NFTTransactionsData(
sttx.getFieldH256(ripple::sfNFTokenID),
txMeta,
sttx.getTransactionID())},
{}};
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
if (txMeta.getResultTER() != ripple::tesSUCCESS)
return {{}, {}};
switch (sttx.getTxnType())
{
case ripple::TxType::ttNFTOKEN_MINT:
return getNFTokenMintData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_BURN:
return getNFTokenBurnData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_ACCEPT_OFFER:
return getNFTokenAcceptOfferData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_CANCEL_OFFER:
return getNFTokenCancelOfferData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_CREATE_OFFER:
return getNFTokenCreateOfferData(txMeta, sttx);
default:
return {{}, {}};
}
}
std::vector<NFTsData>
getNFTDataFromObj(
std::uint32_t const seq,
std::string const& key,
std::string const& blob)
{
std::vector<NFTsData> nfts;
ripple::STLedgerEntry const sle = ripple::STLedgerEntry(
ripple::SerialIter{blob.data(), blob.size()},
ripple::uint256::fromVoid(key.data()));
if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
return nfts;
auto const owner = ripple::AccountID::fromVoid(key.data());
for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens))
nfts.emplace_back(
node.getFieldH256(ripple::sfNFTokenID),
seq,
owner,
node.getFieldVL(ripple::sfURI));
return nfts;
}

36
src/etl/NFTHelpers.h Normal file
View File

@@ -0,0 +1,36 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/DBHelpers.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/TxMeta.h>
// Pulling from tx via ReportingETL
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
// Pulling from ledger object via loadInitialLedger
std::vector<NFTsData>
getNFTDataFromObj(
std::uint32_t const seq,
std::string const& key,
std::string const& blob);

View File

@@ -0,0 +1,219 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <etl/ProbingETLSource.h>
#include <log/Logger.h>
using namespace clio;
ProbingETLSource::ProbingETLSource(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
boost::asio::ssl::context sslCtx)
: sslCtx_{std::move(sslCtx)}
, sslSrc_{make_shared<SslETLSource>(
config,
ioc,
std::ref(sslCtx_),
backend,
subscriptions,
nwvl,
balancer,
make_SSLHooks())}
, plainSrc_{make_shared<PlainETLSource>(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
make_PlainHooks())}
{
}
void
ProbingETLSource::run()
{
sslSrc_->run();
plainSrc_->run();
}
void
ProbingETLSource::pause()
{
sslSrc_->pause();
plainSrc_->pause();
}
void
ProbingETLSource::resume()
{
sslSrc_->resume();
plainSrc_->resume();
}
bool
ProbingETLSource::isConnected() const
{
return currentSrc_ && currentSrc_->isConnected();
}
bool
ProbingETLSource::hasLedger(uint32_t sequence) const
{
if (!currentSrc_)
return false;
return currentSrc_->hasLedger(sequence);
}
boost::json::object
ProbingETLSource::toJson() const
{
if (!currentSrc_)
{
boost::json::object sourcesJson = {
{"ws", plainSrc_->toJson()},
{"wss", sslSrc_->toJson()},
};
return {
{"probing", sourcesJson},
};
}
return currentSrc_->toJson();
}
std::string
ProbingETLSource::toString() const
{
if (!currentSrc_)
return "{probing... ws: " + plainSrc_->toString() +
", wss: " + sslSrc_->toString() + "}";
return currentSrc_->toString();
}
bool
ProbingETLSource::loadInitialLedger(
std::uint32_t ledgerSequence,
std::uint32_t numMarkers,
bool cacheOnly)
{
if (!currentSrc_)
return false;
return currentSrc_->loadInitialLedger(
ledgerSequence, numMarkers, cacheOnly);
}
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
ProbingETLSource::fetchLedger(
uint32_t ledgerSequence,
bool getObjects,
bool getObjectNeighbors)
{
if (!currentSrc_)
return {};
return currentSrc_->fetchLedger(
ledgerSequence, getObjects, getObjectNeighbors);
}
std::optional<boost::json::object>
ProbingETLSource::forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
if (!currentSrc_)
return {};
return currentSrc_->forwardToRippled(request, clientIp, yield);
}
std::optional<boost::json::object>
ProbingETLSource::requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
if (!currentSrc_)
return {};
return currentSrc_->requestFromRippled(request, clientIp, yield);
}
ETLSourceHooks
ProbingETLSource::make_SSLHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
return ETLSourceHooks::Action::STOP;
if (!ec)
{
plainSrc_->pause();
currentSrc_ = sslSrc_;
log_.info() << "Selected WSS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;
plainSrc_->resume();
}
return ETLSourceHooks::Action::STOP;
}};
}
ETLSourceHooks
ProbingETLSource::make_PlainHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
return ETLSourceHooks::Action::STOP;
if (!ec)
{
sslSrc_->pause();
currentSrc_ = plainSrc_;
log_.info() << "Selected Plain WS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;
sslSrc_->resume();
}
return ETLSourceHooks::Action::STOP;
}};
}

112
src/etl/ProbingETLSource.h Normal file
View File

@@ -0,0 +1,112 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <mutex>
#include <config/Config.h>
#include <etl/ETLSource.h>
#include <log/Logger.h>
/// This ETLSource implementation attempts to connect over both secure websocket
/// and plain websocket. First to connect pauses the other and the probing is
/// considered done at this point. If however the connected source loses
/// connection the probing is kickstarted again.
class ProbingETLSource : public ETLSource
{
clio::Logger log_{"ETL"};
std::mutex mtx_;
boost::asio::ssl::context sslCtx_;
std::shared_ptr<ETLSource> sslSrc_;
std::shared_ptr<ETLSource> plainSrc_;
std::shared_ptr<ETLSource> currentSrc_;
public:
ProbingETLSource(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{
boost::asio::ssl::context::tlsv12});
~ProbingETLSource() = default;
void
run() override;
void
pause() override;
void
resume() override;
bool
isConnected() const override;
bool
hasLedger(uint32_t sequence) const override;
boost::json::object
toJson() const override;
std::string
toString() const override;
bool
loadInitialLedger(
std::uint32_t ledgerSequence,
std::uint32_t numMarkers,
bool cacheOnly = false) override;
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(
uint32_t ledgerSequence,
bool getObjects = true,
bool getObjectNeighbors = false) override;
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
private:
std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
ETLSourceHooks
make_SSLHooks() noexcept;
ETLSourceHooks
make_PlainHooks() noexcept;
};

29
src/etl/README.md Normal file
View File

@@ -0,0 +1,29 @@
A single clio node has one or more ETL sources, specified in the config
file. clio will subscribe to the `ledgers` stream of each of the ETL
sources. This stream sends a message whenever a new ledger is validated. Upon
receiving a message on the stream, clio will then fetch the data associated
with the newly validated ledger from one of the ETL sources. The fetch is
performed via a gRPC request (`GetLedger`). This request returns the ledger
header, transactions+metadata blobs, and every ledger object
added/modified/deleted as part of this ledger. ETL then writes all of this data
to the databases, and moves on to the next ledger. ETL does not apply
transactions, but rather extracts the already computed results of those
transactions (all of the added/modified/deleted SHAMap leaf nodes of the state
tree).
If the database is entirely empty, ETL must download an entire ledger in full
(as opposed to just the diff, as described above). This download is done via the
`GetLedgerData` gRPC request. `GetLedgerData` allows clients to page through an
entire ledger over several RPC calls. ETL will page through an entire ledger,
and write each object to the database.
If the database is not empty, clio will first come up in a "soft"
read-only mode. In read-only mode, the server does not perform ETL and simply
publishes new ledgers as they are written to the database.
If the database is not updated within a certain time period
(currently hard coded at 20 seconds), clio will begin the ETL
process and start writing to the database. The database will report an error when
trying to write a record with a key that already exists. ETL uses this error to
determine that another process is writing to the database, and subsequently
falls back to a soft read-only mode. clio can also operate in strict
read-only mode, in which case they will never write to the database.

1326
src/etl/ReportingETL.cpp Normal file

File diff suppressed because it is too large Load Diff

408
src/etl/ReportingETL.h Normal file
View File

@@ -0,0 +1,408 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/ledger/ReadView.h>
#include <boost/algorithm/string.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <log/Logger.h>
#include <subscriptions/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <condition_variable>
#include <mutex>
#include <queue>
#include <chrono>
struct AccountTransactionsData;
struct NFTTransactionsData;
struct NFTsData;
struct FormattedTransactionsData
{
std::vector<AccountTransactionsData> accountTxData;
std::vector<NFTTransactionsData> nfTokenTxData;
std::vector<NFTsData> nfTokensData;
};
class SubscriptionManager;
/**
* This class is responsible for continuously extracting data from a
* p2p node, and writing that data to the databases. Usually, multiple different
* processes share access to the same network accessible databases, in which
* case only one such process is performing ETL and writing to the database. The
* other processes simply monitor the database for new ledgers, and publish
* those ledgers to the various subscription streams. If a monitoring process
* determines that the ETL writer has failed (no new ledgers written for some
* time), the process will attempt to become the ETL writer. If there are
* multiple monitoring processes that try to become the ETL writer at the same
* time, one will win out, and the others will fall back to
* monitoring/publishing. In this sense, this class dynamically transitions from
* monitoring to writing and from writing to monitoring, based on the activity
* of other processes running on different machines.
*/
class ReportingETL
{
private:
clio::Logger log_{"ETL"};
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
std::optional<std::uint32_t> onlineDeleteInterval_;
std::uint32_t extractorThreads_ = 1;
enum class CacheLoadStyle { ASYNC, SYNC, NOT_AT_ALL };
CacheLoadStyle cacheLoadStyle_ = CacheLoadStyle::ASYNC;
// number of diffs to use to generate cursors to traverse the ledger in
// parallel during initial cache download
size_t numCacheDiffs_ = 32;
// number of markers to use at one time to traverse the ledger in parallel
// during initial cache download
size_t numCacheMarkers_ = 48;
// number of ledger objects to fetch concurrently per marker during cache
// download
size_t cachePageFetchSize_ = 512;
// thread responsible for syncing the cache on startup
std::thread cacheDownloader_;
struct ClioPeer
{
std::string ip;
int port;
};
std::vector<ClioPeer> clioPeers;
std::thread worker_;
boost::asio::io_context& ioContext_;
/// Strand to ensure that ledgers are published in order.
/// If ETL is started far behind the network, ledgers will be written and
/// published very rapidly. Monitoring processes will publish ledgers as
/// they are written. However, to publish a ledger, the monitoring process
/// needs to read all of the transactions for that ledger from the database.
/// Reading the transactions from the database requires network calls, which
/// can be slow. It is imperative however that the monitoring processes keep
/// up with the writer, else the monitoring processes will not be able to
/// detect if the writer failed. Therefore, publishing each ledger (which
/// includes reading all of the transactions from the database) is done from
/// the application wide asio io_service, and a strand is used to ensure
/// ledgers are published in order
boost::asio::io_context::strand publishStrand_;
/// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an
/// arbitrary number of ETL sources and load balances ETL requests across
/// those sources.
/// Mechanism for detecting when the network has validated a new ledger.
/// This class provides a way to wait for a specific ledger to be validated
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
/// Whether the software is stopping
std::atomic_bool stopping_ = false;
/// Whether the software is performing online delete
// TODO this needs to live in the database, so diff servers can coordinate
// deletion
std::atomic_bool deleting_ = false;
/// This variable controls the number of GetLedgerData calls that will be
/// executed in parallel during the initial ledger download. GetLedgerData
/// allows clients to page through a ledger over many RPC calls.
/// GetLedgerData returns a marker that is used as an offset in a subsequent
/// call. If numMarkers_ is greater than 1, there will be multiple chains of
/// GetLedgerData calls iterating over different parts of the same ledger in
/// parallel. This can dramatically speed up the time to download the
/// initial ledger. However, a higher value for this member variable puts
/// more load on the ETL source.
size_t numMarkers_ = 2;
/// Whether the process is in strict read-only mode. In strict read-only
/// mode, the process will never attempt to become the ETL writer, and will
/// only publish ledgers as they are written to the database.
bool readOnly_ = false;
/// Whether the process is writing to the database. Used by server_info
std::atomic_bool writing_ = false;
/// Ledger sequence to start ETL from. If this is empty, ETL will start from
/// the next ledger validated by the network. If this is set, and the
/// database is already populated, an error is thrown.
std::optional<uint32_t> startSequence_;
std::optional<uint32_t> finishSequence_;
size_t txnThreshold_ = 0;
/// The time that the most recently published ledger was published. Used by
/// server_info
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
mutable std::shared_mutex publishTimeMtx_;
void
setLastPublish()
{
std::scoped_lock lck(publishTimeMtx_);
lastPublish_ = std::chrono::system_clock::now();
}
/// The time that the most recently published ledger was closed.
std::chrono::time_point<ripple::NetClock> lastCloseTime_;
mutable std::shared_mutex closeTimeMtx_;
void
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
{
std::scoped_lock lck(closeTimeMtx_);
lastCloseTime_ = lastCloseTime;
}
/// Download a ledger with specified sequence in full, via GetLedgerData,
/// and write the data to the databases. This takes several minutes or
/// longer.
/// @param sequence the sequence of the ledger to download
/// @return The ledger downloaded, with a full transaction and account state
/// map
std::optional<ripple::LedgerInfo>
loadInitialLedger(uint32_t sequence);
/// Populates the cache by walking through the given ledger. Should only be
/// called once. The default behavior is to return immediately and populate
/// the cache in the background. This can be overridden via config
/// parameter, to populate synchronously, or not at all
void
loadCache(uint32_t seq);
void
loadCacheFromDb(uint32_t seq);
bool
loadCacheFromClioPeer(
uint32_t ledgerSequence,
std::string const& ip,
std::string const& port,
boost::asio::yield_context& yield);
/// Run ETL. Extracts ledgers and writes them to the database, until a
/// write conflict occurs (or the server shuts down).
/// @note database must already be populated when this function is
/// called
/// @param startSequence the first ledger to extract
/// @return the last ledger written to the database, if any
std::optional<uint32_t>
runETLPipeline(uint32_t startSequence, int offset);
/// Monitor the network for newly validated ledgers. Also monitor the
/// database to see if any process is writing those ledgers. This function
/// is called when the application starts, and will only return when the
/// application is shutting down. If the software detects the database is
/// empty, this function will call loadInitialLedger(). If the software
/// detects ledgers are not being written, this function calls
/// runETLPipeline(). Otherwise, this function publishes ledgers as they are
/// written to the database.
void
monitor();
/// Monitor the database for newly written ledgers.
/// Similar to the monitor(), except this function will never call
/// runETLPipeline() or loadInitialLedger(). This function only publishes
/// ledgers as they are written to the database.
void
monitorReadOnly();
/// Extract data for a particular ledger from an ETL source. This function
/// continously tries to extract the specified ledger (using all available
/// ETL sources) until the extraction succeeds, or the server shuts down.
/// @param sequence sequence of the ledger to extract
/// @return ledger header and transaction+metadata blobs. Empty optional
/// if the server is shutting down
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedgerData(uint32_t sequence);
/// Extract data for a particular ledger from an ETL source. This function
/// continously tries to extract the specified ledger (using all available
/// ETL sources) until the extraction succeeds, or the server shuts down.
/// @param sequence sequence of the ledger to extract
/// @return ledger header, transaction+metadata blobs, and all ledger
/// objects created, modified or deleted between this ledger and the parent.
/// Empty optional if the server is shutting down
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedgerDataAndDiff(uint32_t sequence);
/// Insert all of the extracted transactions into the ledger, returning
/// transactions related to accounts, transactions related to NFTs, and
/// NFTs themselves for later processsing.
/// @param ledger ledger to insert transactions into
/// @param data data extracted from an ETL source
/// @return struct that contains the neccessary info to write to the
/// account_transactions/account_tx and nft_token_transactions tables
/// (mostly transaction hashes, corresponding nodestore hashes and affected
/// accounts)
FormattedTransactionsData
insertTransactions(
ripple::LedgerInfo const& ledger,
org::xrpl::rpc::v1::GetLedgerResponse& data);
// TODO update this documentation
/// Build the next ledger using the previous ledger and the extracted data.
/// This function calls insertTransactions()
/// @note rawData should be data that corresponds to the ledger immediately
/// following parent
/// @param parent the previous ledger
/// @param rawData data extracted from an ETL source
/// @return the newly built ledger and data to write to the database
std::pair<ripple::LedgerInfo, bool>
buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData);
/// Attempt to read the specified ledger from the database, and then publish
/// that ledger to the ledgers stream.
/// @param ledgerSequence the sequence of the ledger to publish
/// @param maxAttempts the number of times to attempt to read the ledger
/// from the database. 1 attempt per second
/// @return whether the ledger was found in the database and published
bool
publishLedger(uint32_t ledgerSequence, std::optional<uint32_t> maxAttempts);
/// Publish the passed in ledger
/// @param ledger the ledger to publish
void
publishLedger(ripple::LedgerInfo const& lgrInfo);
bool
isStopping()
{
return stopping_;
}
/// Get the number of markers to use during the initial ledger download.
/// This is equivelent to the degree of parallelism during the initial
/// ledger download
/// @return the number of markers
std::uint32_t
getNumMarkers()
{
return numMarkers_;
}
/// start all of the necessary components and begin ETL
void
run()
{
log_.info() << "Starting reporting etl";
stopping_ = false;
doWork();
}
void
doWork();
public:
ReportingETL(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<NetworkValidatedLedgers> ledgers);
static std::shared_ptr<ReportingETL>
make_ReportingETL(
clio::Config const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<NetworkValidatedLedgers> ledgers)
{
auto etl = std::make_shared<ReportingETL>(
config, ioc, backend, subscriptions, balancer, ledgers);
etl->run();
return etl;
}
~ReportingETL()
{
log_.info() << "onStop called";
log_.debug() << "Stopping Reporting ETL";
stopping_ = true;
if (worker_.joinable())
worker_.join();
if (cacheDownloader_.joinable())
cacheDownloader_.join();
log_.debug() << "Joined ReportingETL worker thread";
}
boost::json::object
getInfo() const
{
boost::json::object result;
result["etl_sources"] = loadBalancer_->toJson();
result["is_writer"] = writing_.load();
result["read_only"] = readOnly_;
auto last = getLastPublish();
if (last.time_since_epoch().count() != 0)
result["last_publish_age_seconds"] =
std::to_string(lastPublishAgeSeconds());
return result;
}
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const
{
std::shared_lock lck(publishTimeMtx_);
return lastPublish_;
}
std::uint32_t
lastPublishAgeSeconds() const
{
return std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - getLastPublish())
.count();
}
std::uint32_t
lastCloseAgeSeconds() const
{
std::shared_lock lck(closeTimeMtx_);
auto now = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch())
.count();
auto closeTime = lastCloseTime_.time_since_epoch().count();
if (now < (rippleEpochStart + closeTime))
return 0;
return now - (rippleEpochStart + closeTime);
}
};

209
src/log/Logger.cpp Normal file
View File

@@ -0,0 +1,209 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <config/Config.h>
#include <log/Logger.h>
#include <algorithm>
#include <array>
#include <filesystem>
namespace clio {
Logger LogService::general_log_ = Logger{"General"};
Logger LogService::alert_log_ = Logger{"Alert"};
std::ostream&
operator<<(std::ostream& stream, Severity sev)
{
static constexpr std::array<const char*, 6> labels = {
"TRC",
"DBG",
"NFO",
"WRN",
"ERR",
"FTL",
};
return stream << labels.at(static_cast<int>(sev));
}
Severity
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value)
{
if (not value.is_string())
throw std::runtime_error("`log_level` must be a string");
auto const& logLevel = value.as_string();
if (boost::iequals(logLevel, "trace"))
return Severity::TRC;
else if (boost::iequals(logLevel, "debug"))
return Severity::DBG;
else if (boost::iequals(logLevel, "info"))
return Severity::NFO;
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
return Severity::WRN;
else if (boost::iequals(logLevel, "error"))
return Severity::ERR;
else if (boost::iequals(logLevel, "fatal"))
return Severity::FTL;
else
throw std::runtime_error(
"Could not parse `log_level`: expected `trace`, `debug`, `info`, "
"`warning`, `error` or `fatal`");
}
void
LogService::init(Config const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<Severity, char>("Severity");
auto const defaultFormat =
"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% "
"%Message%";
std::string format =
config.valueOr<std::string>("log_format", defaultFormat);
if (config.valueOr("log_to_console", false))
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
auto logDir = config.maybeValue<std::string>("log_directory");
if (logDir)
{
boost::filesystem::path dirPath{logDir.value()};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
auto const rotationSize =
config.valueOr<uint64_t>("log_rotation_size", 2048u) * 1024u *
1024u;
auto const rotationPeriod =
config.valueOr<uint32_t>("log_rotation_hour_interval", 12u);
auto const dirSize =
config.valueOr<uint64_t>("log_directory_max_size", 50u * 1024u) *
1024u * 1024u;
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
// get default severity, can be overridden per channel using
// the `log_channels` array
auto defaultSeverity = config.valueOr<Severity>("log_level", Severity::NFO);
static constexpr std::array<const char*, 7> channels = {
"General",
"WebServer",
"Backend",
"RPC",
"ETL",
"Subscriptions",
"Performance",
};
auto core = boost::log::core::get();
auto min_severity = boost::log::expressions::channel_severity_filter(
log_channel, log_severity);
for (auto const& channel : channels)
min_severity[channel] = defaultSeverity;
min_severity["Alert"] =
Severity::WRN; // Channel for alerts, always warning severity
for (auto const overrides = config.arrayOr("log_channels", {});
auto const& cfg : overrides)
{
auto name = cfg.valueOrThrow<std::string>(
"channel", "Channel name is required");
if (not std::count(std::begin(channels), std::end(channels), name))
throw std::runtime_error(
"Can't override settings for log channel " + name +
": invalid channel");
min_severity[name] =
cfg.valueOr<Severity>("log_level", defaultSeverity);
}
core->set_filter(min_severity);
LogService::info() << "Default log level = " << defaultSeverity;
}
Logger::Pump
Logger::trace(source_location_t const& loc) const
{
return {logger_, Severity::TRC, loc};
};
Logger::Pump
Logger::debug(source_location_t const& loc) const
{
return {logger_, Severity::DBG, loc};
};
Logger::Pump
Logger::info(source_location_t const& loc) const
{
return {logger_, Severity::NFO, loc};
};
Logger::Pump
Logger::warn(source_location_t const& loc) const
{
return {logger_, Severity::WRN, loc};
};
Logger::Pump
Logger::error(source_location_t const& loc) const
{
return {logger_, Severity::ERR, loc};
};
Logger::Pump
Logger::fatal(source_location_t const& loc) const
{
return {logger_, Severity::FTL, loc};
};
std::string
Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const
{
auto const file_path = std::string{loc.file_name()};
auto idx = file_path.size();
while (max_depth-- > 0)
{
idx = file_path.rfind('/', idx - 1);
if (idx == std::string::npos || idx == 0)
break;
}
return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' +
std::to_string(loc.line());
}
} // namespace clio

314
src/log/Logger.h Normal file
View File

@@ -0,0 +1,314 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/json.hpp>
#include <boost/log/core/core.hpp>
#include <boost/log/expressions/predicates/channel_severity_filter.hpp>
#include <boost/log/sinks/unlocked_frontend.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_channel_logger.hpp>
#include <boost/log/sources/severity_feature.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/utility/manipulators/add_value.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <boost/log/utility/setup/formatter_parser.hpp>
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
// this is used by fully compatible compilers like gcc
#include <source_location>
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
// this is used by clang on linux where source_location is still not out of
// experimental headers
#include <experimental/source_location>
#endif
#include <optional>
#include <string>
namespace clio {
class Config;
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
using source_location_t = std::source_location;
#define CURRENT_SRC_LOCATION source_location_t::current()
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
using source_location_t = std::experimental::source_location;
#define CURRENT_SRC_LOCATION source_location_t::current()
#else
// A workaround for AppleClang that is lacking source_location atm.
// TODO: remove this workaround when all compilers catch up to c++20
class SourceLocation
{
std::string_view file_;
std::size_t line_;
public:
SourceLocation(std::string_view file, std::size_t line)
: file_{file}, line_{line}
{
}
std::string_view
file_name() const
{
return file_;
}
std::size_t
line() const
{
return line_;
}
};
using source_location_t = SourceLocation;
#define CURRENT_SRC_LOCATION \
source_location_t(__builtin_FILE(), __builtin_LINE())
#endif
/**
* @brief Custom severity levels for @ref Logger.
*/
enum class Severity {
TRC,
DBG,
NFO,
WRN,
ERR,
FTL,
};
BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity);
BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string);
/**
* @brief Custom labels for @ref Severity in log output.
*
* @param stream std::ostream The output stream
* @param sev Severity The severity to output to the ostream
* @return std::ostream& The same ostream we were given
*/
std::ostream&
operator<<(std::ostream& stream, Severity sev);
/**
* @brief Custom JSON parser for @ref Severity.
*
* @param value The JSON string to parse
* @return Severity The parsed severity
* @throws std::runtime_error Thrown if severity is not in the right format
*/
Severity
tag_invoke(
boost::json::value_to_tag<Severity>,
boost::json::value const& value);
/**
* @brief A simple thread-safe logger for the channel specified
* in the constructor.
*
* This is cheap to copy and move. Designed to be used as a member variable or
* otherwise. See @ref LogService::init() for setup of the logging core and
* severity levels for each channel.
*/
class Logger final
{
using logger_t =
boost::log::sources::severity_channel_logger_mt<Severity, std::string>;
mutable logger_t logger_;
friend class LogService; // to expose the Pump interface
/**
* @brief Helper that pumps data into a log record via `operator<<`.
*/
class Pump final
{
using pump_opt_t =
std::optional<boost::log::aux::record_pump<logger_t>>;
boost::log::record rec_;
pump_opt_t pump_ = std::nullopt;
public:
~Pump() = default;
Pump(logger_t& logger, Severity sev, source_location_t const& loc)
: rec_{logger.open_record(boost::log::keywords::severity = sev)}
{
if (rec_)
{
pump_.emplace(boost::log::aux::make_record_pump(logger, rec_));
pump_->stream() << boost::log::add_value(
"SourceLocation", pretty_path(loc));
}
}
Pump(Pump&&) = delete;
Pump(Pump const&) = delete;
Pump&
operator=(Pump const&) = delete;
Pump&
operator=(Pump&&) = delete;
/**
* @brief Perfectly forwards any incoming data into the underlying
* boost::log pump if the pump is available. nop otherwise.
*
* @tparam T Type of data to pump
* @param data The data to pump
* @return Pump& Reference to itself for chaining
*/
template <typename T>
[[maybe_unused]] Pump&
operator<<(T&& data)
{
if (pump_)
pump_->stream() << std::forward<T>(data);
return *this;
}
private:
[[nodiscard]] std::string
pretty_path(source_location_t const& loc, size_t max_depth = 3) const;
};
public:
~Logger() = default;
/**
* @brief Construct a new Logger object that produces loglines for the
* specified channel.
*
* See @ref LogService::init() for general setup and configuration of
* severity levels per channel.
*
* @param channel The channel this logger will report into.
*/
Logger(std::string channel)
: logger_{boost::log::keywords::channel = channel}
{
}
Logger(Logger const&) = default;
Logger(Logger&&) = default;
Logger&
operator=(Logger const&) = default;
Logger&
operator=(Logger&&) = default;
/*! Interface for logging at @ref Severity::TRC severity */
[[nodiscard]] Pump
trace(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
/*! Interface for logging at @ref Severity::DBG severity */
[[nodiscard]] Pump
debug(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
/*! Interface for logging at @ref Severity::INFO severity */
[[nodiscard]] Pump
info(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
/*! Interface for logging at @ref Severity::WRN severity */
[[nodiscard]] Pump
warn(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
/*! Interface for logging at @ref Severity::ERR severity */
[[nodiscard]] Pump
error(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
/*! Interface for logging at @ref Severity::FTL severity */
[[nodiscard]] Pump
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
};
/**
* @brief A global logging service.
*
* Used to initialize and setup the logging core as well as a globally available
* entrypoint for logging into the `General` channel as well as raising alerts.
*/
class LogService
{
static Logger general_log_; /*! Global logger for General channel */
static Logger alert_log_; /*! Global logger for Alerts channel */
public:
LogService() = delete;
/**
* @brief Global log core initialization from a @ref Config
*/
static void
init(Config const& config);
/*! Globally accesible General logger at @ref Severity::TRC severity */
[[nodiscard]] static Logger::Pump
trace(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.trace(loc);
}
/*! Globally accesible General logger at @ref Severity::DBG severity */
[[nodiscard]] static Logger::Pump
debug(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.debug(loc);
}
/*! Globally accesible General logger at @ref Severity::NFO severity */
[[nodiscard]] static Logger::Pump
info(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.info(loc);
}
/*! Globally accesible General logger at @ref Severity::WRN severity */
[[nodiscard]] static Logger::Pump
warn(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.warn(loc);
}
/*! Globally accesible General logger at @ref Severity::ERR severity */
[[nodiscard]] static Logger::Pump
error(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.error(loc);
}
/*! Globally accesible General logger at @ref Severity::FTL severity */
[[nodiscard]] static Logger::Pump
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return general_log_.fatal(loc);
}
/*! Globally accesible Alert logger */
[[nodiscard]] static Logger::Pump
alert(source_location_t const& loc = CURRENT_SRC_LOCATION)
{
return alert_log_.warn(loc);
}
};
}; // namespace clio

32
src/main/Build.h Normal file
View File

@@ -0,0 +1,32 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <string>
namespace Build {
std::string const&
getClioVersionString();
std::string const&
getClioFullVersionString();
} // namespace Build

245
src/main/main.cpp Normal file
View File

@@ -0,0 +1,245 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <grpc/impl/codegen/port_platform.h>
#ifdef GRPC_TSAN_ENABLED
#undef GRPC_TSAN_ENABLED
#endif
#ifdef GRPC_ASAN_ENABLED
#undef GRPC_ASAN_ENABLED
#endif
#include <backend/BackendFactory.h>
#include <config/Config.h>
#include <etl/ReportingETL.h>
#include <log/Logger.h>
#include <webserver/Listener.h>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/json.hpp>
#include <boost/program_options.hpp>
#include <algorithm>
#include <cstdlib>
#include <fstream>
#include <functional>
#include <iostream>
#include <main/Build.h>
#include <memory>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
using namespace clio;
namespace po = boost::program_options;
/**
* @brief Parse command line and return path to configuration file
*
* @param argc
* @param argv
* @return std::string Path to configuration file
*/
std::string
parseCli(int argc, char* argv[])
{
static constexpr char defaultConfigPath[] = "/etc/opt/clio/config.json";
// clang-format off
po::options_description description("Options");
description.add_options()
("help,h", "print help message and exit")
("version,v", "print version and exit")
("conf,c", po::value<std::string>()->default_value(defaultConfigPath), "configuration file")
;
// clang-format on
po::positional_options_description positional;
positional.add("conf", 1);
po::variables_map parsed;
po::store(
po::command_line_parser(argc, argv)
.options(description)
.positional(positional)
.run(),
parsed);
po::notify(parsed);
if (parsed.count("version"))
{
std::cout << Build::getClioFullVersionString() << '\n';
std::exit(EXIT_SUCCESS);
}
if (parsed.count("help"))
{
std::cout << "Clio server " << Build::getClioFullVersionString()
<< "\n\n"
<< description;
std::exit(EXIT_SUCCESS);
}
return parsed["conf"].as<std::string>();
}
/**
* @brief Parse certificates from configuration file
*
* @param config The configuration
* @return std::optional<ssl::context> SSL context if certificates were parsed
*/
std::optional<ssl::context>
parseCerts(Config const& config)
{
if (!config.contains("ssl_cert_file") || !config.contains("ssl_key_file"))
return {};
auto certFilename = config.value<std::string>("ssl_cert_file");
auto keyFilename = config.value<std::string>("ssl_key_file");
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
if (!readCert)
return {};
std::stringstream contents;
contents << readCert.rdbuf();
std::string cert = contents.str();
std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary);
if (!readKey)
return {};
contents.str("");
contents << readKey.rdbuf();
readKey.close();
std::string key = contents.str();
ssl::context ctx{ssl::context::tlsv12};
ctx.set_options(
boost::asio::ssl::context::default_workarounds |
boost::asio::ssl::context::no_sslv2);
ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size()));
ctx.use_private_key(
boost::asio::buffer(key.data(), key.size()),
boost::asio::ssl::context::file_format::pem);
return ctx;
}
/**
* @brief Start context threads
*
* @param ioc Context
* @param numThreads Number of worker threads to start
*/
void
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
{
std::vector<std::thread> v;
v.reserve(numThreads - 1);
for (auto i = numThreads - 1; i > 0; --i)
v.emplace_back([&ioc] { ioc.run(); });
ioc.run();
}
int
main(int argc, char* argv[])
try
{
auto const configPath = parseCli(argc, argv);
auto const config = ConfigReader::open(configPath);
if (!config)
{
std::cerr << "Couldnt parse config '" << configPath << "'."
<< std::endl;
return EXIT_FAILURE;
}
LogService::init(config);
LogService::info() << "Clio version: " << Build::getClioFullVersionString();
auto ctx = parseCerts(config);
auto ctxRef = ctx
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
: std::nullopt;
auto const threads = config.valueOr("io_threads", 2);
if (threads <= 0)
{
LogService::fatal() << "io_threads is less than 0";
return EXIT_FAILURE;
}
LogService::info() << "Number of io threads = " << threads;
// IO context to handle all incoming requests, as well as other things
// This is not the only io context in the application
boost::asio::io_context ioc{threads};
// Rate limiter, to prevent abuse
auto sweepHandler = IntervalSweepHandler{config, ioc};
auto dosGuard = DOSGuard{config, sweepHandler};
// Interface to the database
auto backend = Backend::make_Backend(ioc, config);
// Manages clients subscribed to streams
auto subscriptions =
SubscriptionManager::make_SubscriptionManager(config, backend);
// Tracks which ledgers have been validated by the
// network
auto ledgers = NetworkValidatedLedgers::make_ValidatedLedgers();
// Handles the connection to one or more rippled nodes.
// ETL uses the balancer to extract data.
// The server uses the balancer to forward RPCs to a rippled node.
// The balancer itself publishes to streams (transactions_proposed and
// accounts_proposed)
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
config, ioc, backend, subscriptions, ledgers);
// ETL is responsible for writing and publishing to streams. In read-only
// mode, ETL only publishes
auto etl = ReportingETL::make_ReportingETL(
config, ioc, backend, subscriptions, balancer, ledgers);
// The server handles incoming RPCs
auto httpServer = Server::make_HttpServer(
config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard);
// Blocks until stopped.
// When stopped, shared_ptrs fall out of scope
// Calls destructors on all resources, and destructs in order
start(ioc, threads);
return EXIT_SUCCESS;
}
catch (std::exception const& e)
{
LogService::fatal() << "Exit on exception: " << e.what();
}

108
src/rpc/Counters.cpp Normal file
View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <rpc/Counters.h>
#include <rpc/RPC.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
void
Counters::initializeCounter(std::string const& method)
{
std::shared_lock lk(mutex_);
if (methodInfo_.count(method) == 0)
{
lk.unlock();
std::scoped_lock ulk(mutex_);
// This calls the default constructor for methodInfo of the method.
methodInfo_[method];
}
}
void
Counters::rpcErrored(std::string const& method)
{
if (!validHandler(method))
return;
initializeCounter(method);
std::shared_lock lk(mutex_);
MethodInfo& counters = methodInfo_[method];
counters.started++;
counters.errored++;
}
void
Counters::rpcComplete(
std::string const& method,
std::chrono::microseconds const& rpcDuration)
{
if (!validHandler(method))
return;
initializeCounter(method);
std::shared_lock lk(mutex_);
MethodInfo& counters = methodInfo_[method];
counters.started++;
counters.finished++;
counters.duration += rpcDuration.count();
}
void
Counters::rpcForwarded(std::string const& method)
{
if (!validHandler(method))
return;
initializeCounter(method);
std::shared_lock lk(mutex_);
MethodInfo& counters = methodInfo_[method];
counters.forwarded++;
}
boost::json::object
Counters::report()
{
std::shared_lock lk(mutex_);
boost::json::object obj = {};
obj[JS(rpc)] = boost::json::object{};
auto& rpc = obj[JS(rpc)].as_object();
for (auto const& [method, info] : methodInfo_)
{
boost::json::object counters = {};
counters[JS(started)] = std::to_string(info.started);
counters[JS(finished)] = std::to_string(info.finished);
counters[JS(errored)] = std::to_string(info.errored);
counters["forwarded"] = std::to_string(info.forwarded);
counters[JS(duration_us)] = std::to_string(info.duration);
rpc[method] = std::move(counters);
}
obj["work_queue"] = workQueue_.get().report();
return obj;
}
} // namespace RPC

73
src/rpc/Counters.h Normal file
View File

@@ -0,0 +1,73 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <boost/json.hpp>
#include <chrono>
#include <cstdint>
#include <functional>
#include <rpc/WorkQueue.h>
#include <shared_mutex>
#include <string>
#include <unordered_map>
namespace RPC {
class Counters
{
private:
struct MethodInfo
{
MethodInfo() = default;
std::atomic_uint64_t started{0};
std::atomic_uint64_t finished{0};
std::atomic_uint64_t errored{0};
std::atomic_uint64_t forwarded{0};
std::atomic_uint64_t duration{0};
};
void
initializeCounter(std::string const& method);
std::shared_mutex mutex_;
std::unordered_map<std::string, MethodInfo> methodInfo_;
std::reference_wrapper<const WorkQueue> workQueue_;
public:
Counters(WorkQueue const& wq) : workQueue_(std::cref(wq)){};
void
rpcErrored(std::string const& method);
void
rpcComplete(
std::string const& method,
std::chrono::microseconds const& rpcDuration);
void
rpcForwarded(std::string const& method);
boost::json::object
report();
};
} // namespace RPC

169
src/rpc/Errors.cpp Normal file
View File

@@ -0,0 +1,169 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <rpc/Errors.h>
#include <algorithm>
using namespace std;
namespace {
template <class... Ts>
struct overloadSet : Ts...
{
using Ts::operator()...;
};
// explicit deduction guide (not needed as of C++20, but clang be clang)
template <class... Ts>
overloadSet(Ts...) -> overloadSet<Ts...>;
} // namespace
namespace RPC {
WarningInfo const&
getWarningInfo(WarningCode code)
{
constexpr static WarningInfo infos[]{
{warnUNKNOWN, "Unknown warning"},
{warnRPC_CLIO,
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request"},
{warnRPC_OUTDATED, "This server may be out of date"},
{warnRPC_RATE_LIMIT, "You are about to be rate limited"}};
auto matchByCode = [code](auto const& info) { return info.code == code; };
if (auto it = find_if(begin(infos), end(infos), matchByCode);
it != end(infos))
return *it;
throw(out_of_range("Invalid WarningCode"));
}
boost::json::object
makeWarning(WarningCode code)
{
boost::json::object json;
auto const& info = getWarningInfo(code);
json["id"] = code;
json["message"] = static_cast<string>(info.message);
return json;
}
ClioErrorInfo const&
getErrorInfo(ClioError code)
{
constexpr static ClioErrorInfo infos[]{
{ClioError::rpcMALFORMED_CURRENCY,
"malformedCurrency",
"Malformed currency."},
{ClioError::rpcMALFORMED_REQUEST,
"malformedRequest",
"Malformed request."},
{ClioError::rpcMALFORMED_OWNER, "malformedOwner", "Malformed owner."},
{ClioError::rpcMALFORMED_ADDRESS,
"malformedAddress",
"Malformed address."},
};
auto matchByCode = [code](auto const& info) { return info.code == code; };
if (auto it = find_if(begin(infos), end(infos), matchByCode);
it != end(infos))
return *it;
throw(out_of_range("Invalid error code"));
}
boost::json::object
makeError(
RippledError err,
optional<string_view> customError,
optional<string_view> customMessage)
{
boost::json::object json;
auto const& info = ripple::RPC::get_error_info(err);
json["error"] = customError.value_or(info.token.c_str()).data();
json["error_code"] = static_cast<uint32_t>(err);
json["error_message"] = customMessage.value_or(info.message.c_str()).data();
json["status"] = "error";
json["type"] = "response";
return json;
}
boost::json::object
makeError(
ClioError err,
optional<string_view> customError,
optional<string_view> customMessage)
{
boost::json::object json;
auto const& info = getErrorInfo(err);
json["error"] = customError.value_or(info.error).data();
json["error_code"] = static_cast<uint32_t>(info.code);
json["error_message"] = customMessage.value_or(info.message).data();
json["status"] = "error";
json["type"] = "response";
return json;
}
boost::json::object
makeError(Status const& status)
{
auto wrapOptional = [](string_view const& str) {
return str.empty() ? nullopt : make_optional(str);
};
auto res = visit(
overloadSet{
[&status, &wrapOptional](RippledError err) {
if (err == ripple::rpcUNKNOWN)
{
return boost::json::object{
{"error", status.message},
{"type", "response"},
{"status", "error"}};
}
return makeError(
err,
wrapOptional(status.error),
wrapOptional(status.message));
},
[&status, &wrapOptional](ClioError err) {
return makeError(
err,
wrapOptional(status.error),
wrapOptional(status.message));
},
},
status.code);
if (status.extraInfo)
{
for (auto& [key, value] : status.extraInfo.value())
{
res[key] = value;
}
}
return res;
}
} // namespace RPC

256
src/rpc/Errors.h Normal file
View File

@@ -0,0 +1,256 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/protocol/ErrorCodes.h>
#include <boost/json/object.hpp>
#include <optional>
#include <string>
#include <string_view>
#include <variant>
namespace RPC {
/**
* @brief Custom clio RPC Errors.
*/
enum class ClioError {
rpcMALFORMED_CURRENCY = 5000,
rpcMALFORMED_REQUEST = 5001,
rpcMALFORMED_OWNER = 5002,
rpcMALFORMED_ADDRESS = 5003,
};
/**
* @brief Holds info about a particular @ref ClioError.
*/
struct ClioErrorInfo
{
ClioError const code;
std::string_view const error;
std::string_view const message;
};
/**
* @brief Clio uses compatible Rippled error codes for most RPC errors.
*/
using RippledError = ripple::error_code_i;
/**
* @brief Clio operates on a combination of Rippled and Custom Clio error codes.
*
* @see RippledError For rippled error codes
* @see ClioError For custom clio error codes
*/
using CombinedError = std::variant<RippledError, ClioError>;
/**
* @brief A status returned from any RPC handler.
*/
struct Status
{
CombinedError code = RippledError::rpcSUCCESS;
std::string error = "";
std::string message = "";
std::optional<boost::json::object> extraInfo;
Status() = default;
/* implicit */ Status(CombinedError code) : code(code){};
Status(CombinedError code, boost::json::object&& extraInfo)
: code(code), extraInfo(std::move(extraInfo)){};
// HACK. Some rippled handlers explicitly specify errors.
// This means that we have to be able to duplicate this
// functionality.
explicit Status(std::string const& message)
: code(ripple::rpcUNKNOWN), message(message)
{
}
Status(CombinedError code, std::string message)
: code(code), message(message)
{
}
Status(CombinedError code, std::string error, std::string message)
: code(code), error(error), message(message)
{
}
/**
* @brief Returns true if the Status is *not* OK.
*/
operator bool() const
{
if (auto err = std::get_if<RippledError>(&code))
return *err != RippledError::rpcSUCCESS;
return true;
}
/**
* @brief Returns true if the Status contains the desired @ref RippledError
*
* @param other The RippledError to match
* @return bool true if status matches given error; false otherwise
*/
bool
operator==(RippledError other) const
{
if (auto err = std::get_if<RippledError>(&code))
return *err == other;
return false;
}
/**
* @brief Returns true if the Status contains the desired @ref ClioError
*
* @param other The RippledError to match
* @return bool true if status matches given error; false otherwise
*/
bool
operator==(ClioError other) const
{
if (auto err = std::get_if<ClioError>(&code))
return *err == other;
return false;
}
};
/**
* @brief Warning codes that can be returned by clio.
*/
enum WarningCode {
warnUNKNOWN = -1,
warnRPC_CLIO = 2001,
warnRPC_OUTDATED = 2002,
warnRPC_RATE_LIMIT = 2003
};
/**
* @brief Holds information about a clio warning.
*/
struct WarningInfo
{
constexpr WarningInfo() = default;
constexpr WarningInfo(WarningCode code, char const* message)
: code(code), message(message)
{
}
WarningCode code = warnUNKNOWN;
std::string_view const message = "unknown warning";
};
/**
* @brief Invalid parameters error.
*/
class InvalidParamsError : public std::exception
{
std::string msg;
public:
explicit InvalidParamsError(std::string const& msg) : msg(msg)
{
}
const char*
what() const throw() override
{
return msg.c_str();
}
};
/**
* @brief Account not found error.
*/
class AccountNotFoundError : public std::exception
{
std::string account;
public:
explicit AccountNotFoundError(std::string const& acct) : account(acct)
{
}
const char*
what() const throw() override
{
return account.c_str();
}
};
/**
* @brief A globally available @ref Status that represents a successful state
*/
static Status OK;
/**
* @brief Get the warning info object from a warning code.
*
* @param code The warning code
* @return WarningInfo const& A reference to the static warning info
*/
WarningInfo const&
getWarningInfo(WarningCode code);
/**
* @brief Generate JSON from a warning code.
*
* @param code The @ref WarningCode
* @return boost::json::object The JSON output
*/
boost::json::object
makeWarning(WarningCode code);
/**
* @brief Generate JSON from a @ref Status.
*
* @param status The @ref Status
* @return boost::json::object The JSON output
*/
boost::json::object
makeError(Status const& status);
/**
* @brief Generate JSON from a @ref RippledError.
*
* @param status The rippled @ref RippledError
* @return boost::json::object The JSON output
*/
boost::json::object
makeError(
RippledError err,
std::optional<std::string_view> customError = std::nullopt,
std::optional<std::string_view> customMessage = std::nullopt);
/**
* @brief Generate JSON from a @ref ClioError.
*
* @param status The clio's custom @ref ClioError
* @return boost::json::object The JSON output
*/
boost::json::object
makeError(
ClioError err,
std::optional<std::string_view> customError = std::nullopt,
std::optional<std::string_view> customMessage = std::nullopt);
} // namespace RPC

122
src/rpc/Handlers.h Normal file
View File

@@ -0,0 +1,122 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/RPC.h>
namespace RPC {
/*
* This file just contains declarations for all of the handlers
*/
// account state methods
Result
doAccountInfo(Context const& context);
Result
doAccountChannels(Context const& context);
Result
doAccountCurrencies(Context const& context);
Result
doAccountLines(Context const& context);
Result
doAccountNFTs(Context const& context);
Result
doAccountObjects(Context const& context);
Result
doAccountOffers(Context const& context);
Result
doGatewayBalances(Context const& context);
Result
doNoRippleCheck(Context const& context);
// channels methods
Result
doChannelAuthorize(Context const& context);
Result
doChannelVerify(Context const& context);
// book methods
[[nodiscard]] Result
doBookChanges(Context const& context);
Result
doBookOffers(Context const& context);
// NFT methods
Result
doNFTBuyOffers(Context const& context);
Result
doNFTSellOffers(Context const& context);
Result
doNFTInfo(Context const& context);
Result
doNFTHistory(Context const& context);
// ledger methods
Result
doLedger(Context const& context);
Result
doLedgerEntry(Context const& context);
Result
doLedgerData(Context const& context);
Result
doLedgerRange(Context const& context);
// transaction methods
Result
doTx(Context const& context);
Result
doTransactionEntry(Context const& context);
Result
doAccountTx(Context const& context);
// subscriptions
Result
doSubscribe(Context const& context);
Result
doUnsubscribe(Context const& context);
// server methods
Result
doServerInfo(Context const& context);
// Utility methods
Result
doRandom(Context const& context);
} // namespace RPC

29
src/rpc/README.md Normal file
View File

@@ -0,0 +1,29 @@
# Clio RPC subsystem
## Background
The RPC subsystem is where the common framework for handling incoming JSON requests is implemented.
Currently the NextGen RPC framework is a work in progress and the handlers are not yet implemented using the new common framework classes.
## Integration plan
- Implement base framework - **done**
- Migrate handlers one by one, making them injectable, adding unit-tests - **in progress**
- Integrate all new handlers into clio in one go
- Cover the rest with unit-tests
- Release first time with new subsystem active
## Components
See `common` subfolder.
- **AnyHandler**: The type-erased wrapper that allows for storing different handlers in one map/vector.
- **RpcSpec/FieldSpec**: The RPC specification classes, used to specify how incoming JSON is to be validated before it's parsed and passed on to individual handler implementations.
- **Validators**: A bunch of supported validators that can be specified as requirements for each **`FieldSpec`** to make up the final **`RpcSpec`** of any given RPC handler.
## Implementing a (NextGen) handler
See `unittests/rpc` for exmaples.
Handlers need to fulfil the requirements specified by the **`Handler`** concept (see `rpc/common/Concepts.h`):
- Expose types:
* `Input` - The POD struct which acts as input for the handler
* `Output` - The POD struct which acts as output of a valid handler invocation
- Have a `spec()` member function returning a const reference to an **`RpcSpec`** describing the JSON input.
- Have a `process(Input)` member function that operates on `Input` POD and returns `HandlerReturnType<Output>`
- Implement `value_from` and `value_to` support using `tag_invoke` as per `boost::json` documentation for these functions.

396
src/rpc/RPC.cpp Normal file
View File

@@ -0,0 +1,396 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <etl/ETLSource.h>
#include <log/Logger.h>
#include <rpc/Handlers.h>
#include <rpc/RPCHelpers.h>
#include <webserver/HttpBase.h>
#include <webserver/WsBase.h>
#include <boost/asio/spawn.hpp>
#include <unordered_map>
using namespace std;
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gPerfLog{"Performance"};
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
Context::Context(
boost::asio::yield_context& yield_,
string const& command_,
uint32_t version_,
boost::json::object const& params_,
shared_ptr<BackendInterface const> const& backend_,
shared_ptr<SubscriptionManager> const& subscriptions_,
shared_ptr<ETLLoadBalancer> const& balancer_,
shared_ptr<ReportingETL const> const& etl_,
shared_ptr<WsBase> const& session_,
util::TagDecoratorFactory const& tagFactory_,
Backend::LedgerRange const& range_,
Counters& counters_,
string const& clientIp_)
: Taggable(tagFactory_)
, yield(yield_)
, method(command_)
, version(version_)
, params(params_)
, backend(backend_)
, subscriptions(subscriptions_)
, balancer(balancer_)
, etl(etl_)
, session(session_)
, range(range_)
, counters(counters_)
, clientIp(clientIp_)
{
gPerfLog.debug() << tag() << "new Context created";
}
optional<Context>
make_WsContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
shared_ptr<BackendInterface const> const& backend,
shared_ptr<SubscriptionManager> const& subscriptions,
shared_ptr<ETLLoadBalancer> const& balancer,
shared_ptr<ReportingETL const> const& etl,
shared_ptr<WsBase> const& session,
util::TagDecoratorFactory const& tagFactory,
Backend::LedgerRange const& range,
Counters& counters,
string const& clientIp)
{
boost::json::value commandValue = nullptr;
if (!request.contains("command") && request.contains("method"))
commandValue = request.at("method");
else if (request.contains("command") && !request.contains("method"))
commandValue = request.at("command");
if (!commandValue.is_string())
return {};
string command = commandValue.as_string().c_str();
return make_optional<Context>(
yc,
command,
1,
request,
backend,
subscriptions,
balancer,
etl,
session,
tagFactory,
range,
counters,
clientIp);
}
optional<Context>
make_HttpContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
shared_ptr<BackendInterface const> const& backend,
shared_ptr<SubscriptionManager> const& subscriptions,
shared_ptr<ETLLoadBalancer> const& balancer,
shared_ptr<ReportingETL const> const& etl,
util::TagDecoratorFactory const& tagFactory,
Backend::LedgerRange const& range,
RPC::Counters& counters,
string const& clientIp)
{
if (!request.contains("method") || !request.at("method").is_string())
return {};
string const& command = request.at("method").as_string().c_str();
if (command == "subscribe" || command == "unsubscribe")
return {};
if (!request.at("params").is_array())
return {};
boost::json::array const& array = request.at("params").as_array();
if (array.size() != 1)
return {};
if (!array.at(0).is_object())
return {};
return make_optional<Context>(
yc,
command,
1,
array.at(0).as_object(),
backend,
subscriptions,
balancer,
etl,
nullptr,
tagFactory,
range,
counters,
clientIp);
}
using LimitRange = tuple<uint32_t, uint32_t, uint32_t>;
using HandlerFunction = function<Result(Context const&)>;
struct Handler
{
string method;
function<Result(Context const&)> handler;
optional<LimitRange> limit;
bool isClioOnly = false;
};
class HandlerTable
{
unordered_map<string, Handler> handlerMap_;
public:
HandlerTable(initializer_list<Handler> handlers)
{
for (auto const& handler : handlers)
{
handlerMap_[handler.method] = move(handler);
}
}
bool
contains(string const& method)
{
return handlerMap_.contains(method);
}
optional<LimitRange>
getLimitRange(string const& command)
{
if (!handlerMap_.contains(command))
return {};
return handlerMap_[command].limit;
}
optional<HandlerFunction>
getHandler(string const& command)
{
if (!handlerMap_.contains(command))
return {};
return handlerMap_[command].handler;
}
bool
isClioOnly(string const& command)
{
return handlerMap_.contains(command) && handlerMap_[command].isClioOnly;
}
};
static HandlerTable handlerTable{
{"account_channels", &doAccountChannels, LimitRange{10, 50, 256}},
{"account_currencies", &doAccountCurrencies, {}},
{"account_info", &doAccountInfo, {}},
{"account_lines", &doAccountLines, LimitRange{10, 50, 256}},
{"account_nfts", &doAccountNFTs, LimitRange{1, 5, 10}},
{"account_objects", &doAccountObjects, LimitRange{10, 50, 256}},
{"account_offers", &doAccountOffers, LimitRange{10, 50, 256}},
{"account_tx", &doAccountTx, LimitRange{1, 50, 100}},
{"gateway_balances", &doGatewayBalances, {}},
{"noripple_check", &doNoRippleCheck, LimitRange{1, 300, 500}},
{"book_changes", &doBookChanges, {}},
{"book_offers", &doBookOffers, LimitRange{1, 50, 100}},
{"ledger", &doLedger, {}},
{"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}},
{"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}},
{"nft_history", &doNFTHistory, LimitRange{1, 50, 100}, true},
{"nft_info", &doNFTInfo, {}, true},
{"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}},
{"ledger_entry", &doLedgerEntry, {}},
{"ledger_range", &doLedgerRange, {}},
{"subscribe", &doSubscribe, {}},
{"server_info", &doServerInfo, {}},
{"unsubscribe", &doUnsubscribe, {}},
{"tx", &doTx, {}},
{"transaction_entry", &doTransactionEntry, {}},
{"random", &doRandom, {}}};
static unordered_set<string> forwardCommands{
"submit",
"submit_multisigned",
"fee",
"ledger_closed",
"ledger_current",
"ripple_path_find",
"manifest",
"channel_authorize",
"channel_verify"};
bool
validHandler(string const& method)
{
return handlerTable.contains(method) || forwardCommands.contains(method);
}
bool
isClioOnly(string const& method)
{
return handlerTable.isClioOnly(method);
}
bool
shouldSuppressValidatedFlag(RPC::Context const& context)
{
return boost::iequals(context.method, "subscribe") ||
boost::iequals(context.method, "unsubscribe");
}
Status
getLimit(RPC::Context const& context, uint32_t& limit)
{
if (!handlerTable.getHandler(context.method))
return Status{RippledError::rpcUNKNOWN_COMMAND};
if (!handlerTable.getLimitRange(context.method))
return Status{
RippledError::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"};
auto [lo, def, hi] = *handlerTable.getLimitRange(context.method);
if (context.params.contains(JS(limit)))
{
string errMsg = "Invalid field 'limit', not unsigned integer.";
if (!context.params.at(JS(limit)).is_int64())
return Status{RippledError::rpcINVALID_PARAMS, errMsg};
int input = context.params.at(JS(limit)).as_int64();
if (input <= 0)
return Status{RippledError::rpcINVALID_PARAMS, errMsg};
limit = clamp(static_cast<uint32_t>(input), lo, hi);
}
else
{
limit = def;
}
return {};
}
bool
shouldForwardToRippled(Context const& ctx)
{
auto request = ctx.params;
if (isClioOnly(ctx.method))
return false;
if (forwardCommands.find(ctx.method) != forwardCommands.end())
return true;
if (specifiesCurrentOrClosedLedger(request))
return true;
if (ctx.method == "account_info" && request.contains("queue") &&
request.at("queue").as_bool())
return true;
return false;
}
Result
buildResponse(Context const& ctx)
{
if (shouldForwardToRippled(ctx))
{
boost::json::object toForward = ctx.params;
toForward["command"] = ctx.method;
auto res =
ctx.balancer->forwardToRippled(toForward, ctx.clientIp, ctx.yield);
ctx.counters.rpcForwarded(ctx.method);
if (!res)
return Status{RippledError::rpcFAILED_TO_FORWARD};
return *res;
}
if (ctx.method == "ping")
return boost::json::object{};
if (ctx.backend->isTooBusy())
{
gLog.error() << "Database is too busy. Rejecting request";
return Status{RippledError::rpcTOO_BUSY};
}
auto method = handlerTable.getHandler(ctx.method);
if (!method)
return Status{RippledError::rpcUNKNOWN_COMMAND};
try
{
gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method
<< '`';
auto v = (*method)(ctx);
gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method
<< '`';
if (auto object = get_if<boost::json::object>(&v);
object && not shouldSuppressValidatedFlag(ctx))
{
(*object)[JS(validated)] = true;
}
return v;
}
catch (InvalidParamsError const& err)
{
return Status{RippledError::rpcINVALID_PARAMS, err.what()};
}
catch (AccountNotFoundError const& err)
{
return Status{RippledError::rpcACT_NOT_FOUND, err.what()};
}
catch (Backend::DatabaseTimeout const& t)
{
gLog.error() << "Database timeout";
return Status{RippledError::rpcTOO_BUSY};
}
catch (exception const& err)
{
gLog.error() << ctx.tag() << " caught exception: " << err.what();
return Status{RippledError::rpcINTERNAL};
}
}
} // namespace RPC

166
src/rpc/RPC.h Normal file
View File

@@ -0,0 +1,166 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/BackendInterface.h>
#include <log/Logger.h>
#include <rpc/Counters.h>
#include <rpc/Errors.h>
#include <util/Taggable.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <optional>
#include <string>
#include <variant>
/*
* This file contains various classes necessary for executing RPC handlers.
* Context gives the handlers access to various other parts of the application
* Status is used to report errors.
* And lastly, there are various functions for making Contexts, Statuses and
* serializing Status to JSON.
* This file is meant to contain any class or function that code outside of the
* rpc folder needs to use. For helper functions or classes used within the rpc
* folder, use RPCHelpers.h.
*/
class WsBase;
class SubscriptionManager;
class ETLLoadBalancer;
class ReportingETL;
namespace RPC {
struct Context : public util::Taggable
{
clio::Logger perfLog_{"Performance"};
boost::asio::yield_context& yield;
std::string method;
std::uint32_t version;
boost::json::object const& params;
std::shared_ptr<BackendInterface const> const& backend;
// this needs to be an actual shared_ptr, not a reference. The above
// references refer to shared_ptr members of WsBase, but WsBase contains
// SubscriptionManager as a weak_ptr, to prevent a shared_ptr cycle.
std::shared_ptr<SubscriptionManager> subscriptions;
std::shared_ptr<ETLLoadBalancer> const& balancer;
std::shared_ptr<ReportingETL const> const& etl;
std::shared_ptr<WsBase> session;
Backend::LedgerRange const& range;
Counters& counters;
std::string clientIp;
Context(
boost::asio::yield_context& yield_,
std::string const& command_,
std::uint32_t version_,
boost::json::object const& params_,
std::shared_ptr<BackendInterface const> const& backend_,
std::shared_ptr<SubscriptionManager> const& subscriptions_,
std::shared_ptr<ETLLoadBalancer> const& balancer_,
std::shared_ptr<ReportingETL const> const& etl_,
std::shared_ptr<WsBase> const& session_,
util::TagDecoratorFactory const& tagFactory_,
Backend::LedgerRange const& range_,
Counters& counters_,
std::string const& clientIp_);
};
struct AccountCursor
{
ripple::uint256 index;
std::uint32_t hint;
std::string
toString() const
{
return ripple::strHex(index) + "," + std::to_string(hint);
}
bool
isNonZero() const
{
return index.isNonZero() || hint != 0;
}
};
using Result = std::variant<Status, boost::json::object>;
std::optional<Context>
make_WsContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,
std::shared_ptr<ETLLoadBalancer> const& balancer,
std::shared_ptr<ReportingETL const> const& etl,
std::shared_ptr<WsBase> const& session,
util::TagDecoratorFactory const& tagFactory,
Backend::LedgerRange const& range,
Counters& counters,
std::string const& clientIp);
std::optional<Context>
make_HttpContext(
boost::asio::yield_context& yc,
boost::json::object const& request,
std::shared_ptr<BackendInterface const> const& backend,
std::shared_ptr<SubscriptionManager> const& subscriptions,
std::shared_ptr<ETLLoadBalancer> const& balancer,
std::shared_ptr<ReportingETL const> const& etl,
util::TagDecoratorFactory const& tagFactory,
Backend::LedgerRange const& range,
Counters& counters,
std::string const& clientIp);
Result
buildResponse(Context const& ctx);
bool
validHandler(std::string const& method);
bool
isClioOnly(std::string const& method);
Status
getLimit(RPC::Context const& context, std::uint32_t& limit);
template <class T>
void
logDuration(Context const& ctx, T const& dur)
{
static clio::Logger log{"RPC"};
std::stringstream ss;
ss << ctx.tag() << "Request processing duration = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(dur).count()
<< " milliseconds. request = " << ctx.params;
auto seconds =
std::chrono::duration_cast<std::chrono::seconds>(dur).count();
if (seconds > 10)
log.error() << ss.str();
else if (seconds > 1)
log.warn() << ss.str();
else
log.info() << ss.str();
}
} // namespace RPC

1766
src/rpc/RPCHelpers.cpp Normal file

File diff suppressed because it is too large Load Diff

308
src/rpc/RPCHelpers.h Normal file
View File

@@ -0,0 +1,308 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
/*
* This file contains a variety of utility functions used when executing
* the handlers
*/
#include <ripple/app/ledger/Ledger.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/jss.h>
#include <backend/BackendInterface.h>
#include <rpc/RPC.h>
// Useful macro for borrowing from ripple::jss
// static strings. (J)son (S)trings
#define JS(x) ripple::jss::x.c_str()
// Access (SF)ield name (S)trings
#define SFS(x) ripple::x.jsonName.c_str()
namespace RPC {
std::optional<ripple::AccountID>
accountFromStringStrict(std::string const& account);
bool
isOwnedByAccount(ripple::SLE const& sle, ripple::AccountID const& accountID);
std::uint64_t
getStartHint(ripple::SLE const& sle, ripple::AccountID const& accountID);
std::optional<AccountCursor>
parseAccountCursor(std::optional<std::string> jsonCursor);
// TODO this function should probably be in a different file and namespace
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::STObject const>>
deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs);
// TODO this function should probably be in a different file and namespace
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::TxMeta const>>
deserializeTxPlusMeta(
Backend::TransactionAndMetadata const& blobs,
std::uint32_t seq);
std::pair<boost::json::object, boost::json::object>
toExpandedJson(Backend::TransactionAndMetadata const& blobs);
bool
insertDeliveredAmount(
boost::json::object& metaJson,
std::shared_ptr<ripple::STTx const> const& txn,
std::shared_ptr<ripple::TxMeta const> const& meta,
uint32_t date);
boost::json::object
toJson(ripple::STBase const& obj);
boost::json::object
toJson(ripple::SLE const& sle);
boost::json::object
toJson(ripple::LedgerInfo const& info);
boost::json::object
toJson(ripple::TxMeta const& meta);
using RippledJson = Json::Value;
boost::json::value
toBoostJson(RippledJson const& value);
boost::json::object
generatePubLedgerMessage(
ripple::LedgerInfo const& lgrInfo,
ripple::Fees const& fees,
std::string const& ledgerRange,
std::uint32_t txnCount);
std::variant<Status, ripple::LedgerInfo>
ledgerInfoFromRequest(Context const& ctx);
std::variant<Status, ripple::LedgerInfo>
getLedgerInfoFromHashOrSeq(
BackendInterface const& backend,
boost::asio::yield_context& yield,
std::optional<std::string> ledgerHash,
std::optional<uint32_t> ledgerIndex,
uint32_t maxSeq);
std::variant<Status, AccountCursor>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::AccountID const& accountID,
std::uint32_t sequence,
std::uint32_t limit,
std::optional<std::string> jsonCursor,
boost::asio::yield_context& yield,
std::function<void(ripple::SLE&&)> atOwnedNode);
std::variant<Status, AccountCursor>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::Keylet const& owner,
ripple::uint256 const& hexMarker,
std::uint32_t const startHint,
std::uint32_t sequence,
std::uint32_t limit,
std::optional<std::string> jsonCursor,
boost::asio::yield_context& yield,
std::function<void(ripple::SLE&&)> atOwnedNode);
// Remove the account check from traverseOwnedNodes
// Account check has been done by framework,remove it from internal function
std::variant<Status, AccountCursor>
ngTraverseOwnedNodes(
BackendInterface const& backend,
ripple::AccountID const& accountID,
std::uint32_t sequence,
std::uint32_t limit,
std::optional<std::string> jsonCursor,
boost::asio::yield_context& yield,
std::function<void(ripple::SLE&&)> atOwnedNode);
std::shared_ptr<ripple::SLE const>
read(
ripple::Keylet const& keylet,
ripple::LedgerInfo const& lgrInfo,
Context const& context);
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
keypairFromRequst(boost::json::object const& request);
std::vector<ripple::AccountID>
getAccountsFromTransaction(boost::json::object const& transaction);
std::vector<unsigned char>
ledgerInfoToBlob(ripple::LedgerInfo const& info, bool includeHash = false);
bool
isGlobalFrozen(
BackendInterface const& backend,
std::uint32_t seq,
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
bool
isFrozen(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
ripple::STAmount
accountFunds(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::STAmount const& amount,
ripple::AccountID const& id,
boost::asio::yield_context& yield);
ripple::STAmount
accountHolds(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& account,
ripple::Currency const& currency,
ripple::AccountID const& issuer,
bool zeroIfFrozen,
boost::asio::yield_context& yield);
ripple::Rate
transferRate(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& issuer,
boost::asio::yield_context& yield);
ripple::XRPAmount
xrpLiquid(
BackendInterface const& backend,
std::uint32_t sequence,
ripple::AccountID const& id,
boost::asio::yield_context& yield);
boost::json::array
postProcessOrderBook(
std::vector<Backend::LedgerObject> const& offers,
ripple::Book const& book,
ripple::AccountID const& takerID,
Backend::BackendInterface const& backend,
std::uint32_t ledgerSequence,
boost::asio::yield_context& yield);
std::variant<Status, ripple::Book>
parseBook(boost::json::object const& request);
std::variant<Status, ripple::AccountID>
parseTaker(boost::json::value const& request);
std::optional<std::uint32_t>
getUInt(boost::json::object const& request, std::string const& field);
std::uint32_t
getUInt(
boost::json::object const& request,
std::string const& field,
std::uint32_t dfault);
std::uint32_t
getRequiredUInt(boost::json::object const& request, std::string const& field);
std::optional<bool>
getBool(boost::json::object const& request, std::string const& field);
bool
getBool(
boost::json::object const& request,
std::string const& field,
bool dfault);
bool
getRequiredBool(boost::json::object const& request, std::string const& field);
std::optional<std::string>
getString(boost::json::object const& request, std::string const& field);
std::string
getRequiredString(boost::json::object const& request, std::string const& field);
std::string
getString(
boost::json::object const& request,
std::string const& field,
std::string dfault);
Status
getHexMarker(boost::json::object const& request, ripple::uint256& marker);
Status
getAccount(boost::json::object const& request, ripple::AccountID& accountId);
Status
getAccount(
boost::json::object const& request,
ripple::AccountID& destAccount,
boost::string_view const& field);
Status
getOptionalAccount(
boost::json::object const& request,
std::optional<ripple::AccountID>& account,
boost::string_view const& field);
Status
getTaker(boost::json::object const& request, ripple::AccountID& takerID);
Status
getChannelId(boost::json::object const& request, ripple::uint256& channelId);
bool
specifiesCurrentOrClosedLedger(boost::json::object const& request);
std::variant<ripple::uint256, Status>
getNFTID(boost::json::object const& request);
// This function is the driver for both `account_tx` and `nft_tx` and should
// be used for any future transaction enumeration APIs.
std::variant<Status, boost::json::object>
traverseTransactions(
Context const& context,
std::function<Backend::TransactionsAndCursor(
std::shared_ptr<Backend::BackendInterface const> const& backend,
std::uint32_t const,
bool const,
std::optional<Backend::TransactionsCursor> const&,
boost::asio::yield_context& yield)> transactionFetcher);
[[nodiscard]] boost::json::object const
computeBookChanges(
ripple::LedgerInfo const& lgrInfo,
std::vector<Backend::TransactionAndMetadata> const& transactions);
} // namespace RPC

30
src/rpc/WorkQueue.cpp Normal file
View File

@@ -0,0 +1,30 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <rpc/WorkQueue.h>
WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize)
{
if (maxSize != 0)
maxSize_ = maxSize;
while (--numWorkers)
{
threads_.emplace_back([this] { ioc_.run(); });
}
}

97
src/rpc/WorkQueue.h Normal file
View File

@@ -0,0 +1,97 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <log/Logger.h>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <memory>
#include <optional>
#include <queue>
#include <shared_mutex>
#include <thread>
class WorkQueue
{
// these are cumulative for the lifetime of the process
std::atomic_uint64_t queued_ = 0;
std::atomic_uint64_t durationUs_ = 0;
std::atomic_uint64_t curSize_ = 0;
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
clio::Logger log_{"RPC"};
public:
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
template <typename F>
bool
postCoro(F&& f, bool isWhiteListed)
{
if (curSize_ >= maxSize_ && !isWhiteListed)
{
log_.warn() << "Queue is full. rejecting job. current size = "
<< curSize_ << " max size = " << maxSize_;
return false;
}
++curSize_;
auto start = std::chrono::system_clock::now();
// Each time we enqueue a job, we want to post a symmetrical job that
// will dequeue and run the job at the front of the job queue.
boost::asio::spawn(
ioc_,
[this, f = std::move(f), start](boost::asio::yield_context yield) {
auto run = std::chrono::system_clock::now();
auto wait =
std::chrono::duration_cast<std::chrono::microseconds>(
run - start)
.count();
// increment queued_ here, in the same place we implement
// durationUs_
++queued_;
durationUs_ += wait;
log_.info() << "WorkQueue wait time = " << wait
<< " queue size = " << curSize_;
f(yield);
--curSize_;
});
return true;
}
boost::json::object
report() const
{
boost::json::object obj;
obj["queued"] = queued_;
obj["queued_duration_us"] = durationUs_;
obj["current_queue_size"] = curSize_;
obj["max_queue_size"] = maxSize_;
return obj;
}
private:
std::vector<std::thread> threads_ = {};
boost::asio::io_context ioc_ = {};
std::optional<boost::asio::io_context::work> work_{ioc_};
};

141
src/rpc/common/AnyHandler.h Normal file
View File

@@ -0,0 +1,141 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Concepts.h>
#include <rpc/common/Types.h>
#include <rpc/common/impl/Processors.h>
namespace RPCng {
/**
* @brief A type-erased Handler that can contain any (NextGen) RPC handler class
*
* This allows to store different handlers in one map/vector etc.
* Support for copying was added in order to allow storing in a
* map/unordered_map using the initializer_list constructor.
*/
class AnyHandler final
{
public:
/**
* @brief Type-erases any handler class.
*
* @tparam HandlerType The real type of wrapped handler class
* @tparam ProcessingStrategy A strategy that implements how processing of
* JSON is to be done
* @param handler The handler to wrap. Required to fulfil the @ref Handler
* concept.
*/
template <
Handler HandlerType,
typename ProcessingStrategy = detail::DefaultProcessor<HandlerType>>
/* implicit */ AnyHandler(HandlerType&& handler)
: pimpl_{std::make_unique<Model<HandlerType, ProcessingStrategy>>(
std::forward<HandlerType>(handler))}
{
}
~AnyHandler() = default;
AnyHandler(AnyHandler const& other) : pimpl_{other.pimpl_->clone()}
{
}
AnyHandler&
operator=(AnyHandler const& rhs)
{
AnyHandler copy{rhs};
pimpl_.swap(copy.pimpl_);
return *this;
}
AnyHandler(AnyHandler&&) = default;
AnyHandler&
operator=(AnyHandler&&) = default;
/**
* @brief Process incoming JSON by the stored handler
*
* @param value The JSON to process
* @return JSON result or @ref RPC::Status on error
*/
[[nodiscard]] ReturnType
process(boost::json::value const& value) const
{
return pimpl_->process(value);
}
/**
* @brief Process incoming JSON by the stored handler in a provided
* coroutine
*
* @param value The JSON to process
* @return JSON result or @ref RPC::Status on error
*/
[[nodiscard]] ReturnType
process(
boost::json::value const& value,
boost::asio::yield_context& ptrYield) const
{
return pimpl_->process(value, &ptrYield);
}
private:
struct Concept
{
virtual ~Concept() = default;
[[nodiscard]] virtual ReturnType
process(
boost::json::value const& value,
boost::asio::yield_context* ptrYield = nullptr) const = 0;
[[nodiscard]] virtual std::unique_ptr<Concept>
clone() const = 0;
};
template <typename HandlerType, typename ProcessorType>
struct Model : Concept
{
HandlerType handler;
ProcessorType processor;
Model(HandlerType&& handler) : handler{std::move(handler)}
{
}
[[nodiscard]] ReturnType
process(
boost::json::value const& value,
boost::asio::yield_context* ptrYield = nullptr) const override
{
return processor(handler, value, ptrYield);
}
[[nodiscard]] std::unique_ptr<Concept>
clone() const override
{
return std::make_unique<Model>(*this);
}
};
private:
std::unique_ptr<Concept> pimpl_;
};
} // namespace RPCng

78
src/rpc/common/Concepts.h Normal file
View File

@@ -0,0 +1,78 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Types.h>
#include <boost/asio/spawn.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <string>
namespace RPCng {
struct RpcSpec;
/**
* @brief A concept that specifies what a requirement used with @ref FieldSpec
* must provide
*/
// clang-format off
template <typename T>
concept Requirement = requires(T a) {
{ a.verify(boost::json::value{}, std::string{}) } -> std::same_as<MaybeError>;
};
// clang-format on
/**
* @brief A concept that specifies what a Handler type must provide
*
* Note that value_from and value_to should be implemented using tag_invoke
* as per boost::json documentation for these functions.
*/
// clang-format off
template <typename T>
concept CoroutineProcess = requires(T a, typename T::Input in, typename T::Output out, boost::asio::yield_context& y) {
{ a.process(in, y) } -> std::same_as<HandlerReturnType<decltype(out)>>; };
template <typename T>
concept NonCoroutineProcess = requires(T a, typename T::Input in, typename T::Output out) {
{ a.process(in) } -> std::same_as<HandlerReturnType<decltype(out)>>; };
template <typename T>
concept HandlerWithInput = requires(T a, typename T::Input in, typename T::Output out) {
{ a.spec() } -> std::same_as<RpcSpecConstRef>; }
and (CoroutineProcess<T> or NonCoroutineProcess<T>)
and boost::json::has_value_to<typename T::Input>::value;
template <typename T>
concept HandlerWithoutInput = requires(T a, typename T::Output out) {
{ a.process() } -> std::same_as<HandlerReturnType<decltype(out)>>; };
template <typename T>
concept Handler =
(HandlerWithInput<T>
or
HandlerWithoutInput<T>)
and boost::json::has_value_from<typename T::Output>::value;
// clang-format on
} // namespace RPCng

42
src/rpc/common/Specs.cpp Normal file
View File

@@ -0,0 +1,42 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <rpc/common/Specs.h>
#include <boost/json/value.hpp>
namespace RPCng {
[[nodiscard]] MaybeError
FieldSpec::validate(boost::json::value const& value) const
{
return validator_(value);
}
[[nodiscard]] MaybeError
RpcSpec::validate(boost::json::value const& value) const
{
for (auto const& field : fields_)
if (auto ret = field.validate(value); not ret)
return Error{ret.error()};
return {};
}
} // namespace RPCng

95
src/rpc/common/Specs.h Normal file
View File

@@ -0,0 +1,95 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Concepts.h>
#include <rpc/common/Types.h>
#include <rpc/common/impl/Factories.h>
#include <string>
#include <vector>
namespace RPCng {
/**
* @brief Represents a Specification for one field of an RPC command
*/
struct FieldSpec final
{
/**
* @brief Construct a field specification out of a set of requirements
*
* @tparam Requirements The types of requirements @ref Requirement
* @param key The key in a JSON object that the field validates
* @param requirements The requirements, each of them have to fulfil
* the @ref Requirement concept
*/
template <Requirement... Requirements>
FieldSpec(std::string const& key, Requirements&&... requirements)
: validator_{detail::makeFieldValidator<Requirements...>(
key,
std::forward<Requirements>(requirements)...)}
{
}
/**
* @brief Validates the passed JSON value using the stored requirements
*
* @param value The JSON value to validate
* @return Nothing on success; @ref RPC::Status on error
*/
[[nodiscard]] MaybeError
validate(boost::json::value const& value) const;
private:
std::function<MaybeError(boost::json::value const&)> validator_;
};
/**
* @brief Represents a Specification of an entire RPC command
*
* Note: this should really be all constexpr and handlers would expose
* static constexpr RpcSpec spec instead. Maybe some day in the future.
*/
struct RpcSpec final
{
/**
* @brief Construct a full RPC request specification
*
* @param fields The fields of the RPC specification @ref FieldSpec
*/
RpcSpec(std::initializer_list<FieldSpec> fields) : fields_{fields}
{
}
/**
* @brief Validates the passed JSON value using the stored field specs
*
* @param value The JSON value to validate
* @return Nothing on success; @ref RPC::Status on error
*/
[[nodiscard]] MaybeError
validate(boost::json::value const& value) const;
private:
std::vector<FieldSpec> fields_;
};
} // namespace RPCng

69
src/rpc/common/Types.h Normal file
View File

@@ -0,0 +1,69 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/Errors.h>
#include <util/Expected.h>
#include <boost/json/value.hpp>
namespace RPCng {
/**
* @brief Return type used for Validators that can return error but don't have
* specific value to return
*/
using MaybeError = util::Expected<void, RPC::Status>;
/**
* @brief The type that represents just the error part of @ref MaybeError
*/
using Error = util::Unexpected<RPC::Status>;
/**
* @brief Return type for each individual handler
*/
template <typename OutputType>
using HandlerReturnType = util::Expected<OutputType, RPC::Status>;
/**
* @brief The final return type out of RPC engine
*/
using ReturnType = util::Expected<boost::json::value, RPC::Status>;
struct RpcSpec;
struct FieldSpec;
using RpcSpecConstRef = RpcSpec const&;
struct VoidOutput
{
};
inline void
tag_invoke(
boost::json::value_from_tag,
boost::json::value& jv,
VoidOutput const&)
{
jv = boost::json::object{};
}
} // namespace RPCng

View File

@@ -0,0 +1,203 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/base_uint.h>
#include <rpc/RPCHelpers.h>
#include <rpc/common/Validators.h>
#include <boost/json/value.hpp>
#include <charconv>
#include <string_view>
namespace RPCng::validation {
[[nodiscard]] MaybeError
Section::verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
auto const& res = value.at(key.data());
// if it is not a json object, let other validators fail
if (!res.is_object())
return {};
for (auto const& spec : specs)
{
if (auto const ret = spec.validate(res); not ret)
return Error{ret.error()};
}
return {};
}
[[nodiscard]] MaybeError
Required::verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
"Required field '" + std::string{key} + "' missing"}};
return {};
}
[[nodiscard]] MaybeError
ValidateArrayAt::verify(boost::json::value const& value, std::string_view key)
const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
if (not value.as_object().at(key.data()).is_array())
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
auto const& arr = value.as_object().at(key.data()).as_array();
if (idx_ >= arr.size())
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
auto const& res = arr.at(idx_);
for (auto const& spec : specs_)
if (auto const ret = spec.validate(res); not ret)
return Error{ret.error()};
return {};
}
[[nodiscard]] MaybeError
CustomValidator::verify(boost::json::value const& value, std::string_view key)
const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
return validator_(value.as_object().at(key.data()), key);
}
[[nodiscard]] bool
checkIsU32Numeric(std::string_view sv)
{
uint32_t unused;
auto [_, ec] = std::from_chars(sv.data(), sv.data() + sv.size(), unused);
return ec == std::errc();
}
CustomValidator Uint256HexStringValidator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
if (!value.is_string())
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "NotString"}};
}
ripple::uint256 ledgerHash;
if (!ledgerHash.parseHex(value.as_string().c_str()))
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "Malformed"}};
return MaybeError{};
}};
CustomValidator LedgerIndexValidator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
auto err = Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}};
if (!value.is_string() && !(value.is_uint64() || value.is_int64()))
{
return err;
}
if (value.is_string() && value.as_string() != "validated" &&
!checkIsU32Numeric(value.as_string().c_str()))
{
return err;
}
return MaybeError{};
}};
CustomValidator AccountValidator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
if (!value.is_string())
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "NotString"}};
}
// TODO: we are using accountFromStringStrict from RPCHelpers, after we
// remove all old handler, this function can be moved to here
if (!RPC::accountFromStringStrict(value.as_string().c_str()))
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "Malformed"}};
}
return MaybeError{};
}};
CustomValidator AccountBase58Validator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
if (!value.is_string())
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "NotString"}};
}
auto const account =
ripple::parseBase58<ripple::AccountID>(value.as_string().c_str());
if (!account || account->isZero())
return Error{RPC::Status{RPC::ClioError::rpcMALFORMED_ADDRESS}};
return MaybeError{};
}};
CustomValidator MarkerValidator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
if (!value.is_string())
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "NotString"}};
}
// TODO: we are using parseAccountCursor from RPCHelpers, after we
// remove all old handler, this function can be moved to here
if (!RPC::parseAccountCursor(value.as_string().c_str()))
{
// align with the current error message
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS, "Malformed cursor"}};
}
return MaybeError{};
}};
CustomValidator CurrencyValidator = CustomValidator{
[](boost::json::value const& value, std::string_view key) -> MaybeError {
if (!value.is_string())
{
return Error{RPC::Status{
RPC::RippledError::rpcINVALID_PARAMS,
std::string(key) + "NotString"}};
}
ripple::Currency currency;
if (!ripple::to_currency(currency, value.as_string().c_str()))
return Error{RPC::Status{
RPC::ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}};
return MaybeError{};
}};
} // namespace RPCng::validation

488
src/rpc/common/Validators.h Normal file
View File

@@ -0,0 +1,488 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Concepts.h>
#include <rpc/common/Specs.h>
#include <rpc/common/Types.h>
namespace RPCng::validation {
/**
* @brief Check that the type is the same as what was expected
*
* @tparam Expected The expected type that value should be convertible to
* @param value The json value to check the type of
* @return true if convertible; false otherwise
*/
template <typename Expected>
[[nodiscard]] bool static checkType(boost::json::value const& value)
{
auto hasError = false;
if constexpr (std::is_same_v<Expected, bool>)
{
if (not value.is_bool())
hasError = true;
}
else if constexpr (std::is_same_v<Expected, std::string>)
{
if (not value.is_string())
hasError = true;
}
else if constexpr (
std::is_same_v<Expected, double> or std::is_same_v<Expected, float>)
{
if (not value.is_double())
hasError = true;
}
else if constexpr (std::is_same_v<Expected, boost::json::array>)
{
if (not value.is_array())
hasError = true;
}
else if constexpr (std::is_same_v<Expected, boost::json::object>)
{
if (not value.is_object())
hasError = true;
}
else if constexpr (
std::is_convertible_v<Expected, uint64_t> or
std::is_convertible_v<Expected, int64_t>)
{
if (not value.is_int64() && not value.is_uint64())
hasError = true;
}
return not hasError;
}
/**
* @brief A meta-validator that acts as a spec for a sub-object/section
*/
class Section final
{
std::vector<FieldSpec> specs;
public:
/**
* @brief Construct new section validator from a list of specs
*
* @param specs List of specs @ref FieldSpec
*/
explicit Section(std::initializer_list<FieldSpec> specs) : specs{specs}
{
}
/**
* @brief Verify that the JSON value representing the section is valid
* according to the given specs
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the section from the outer object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const;
};
/**
* @brief A validator that simply requires a field to be present
*/
struct Required final
{
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const;
};
/**
* @brief Validates that the type of the value is one of the given types
*/
template <typename... Types>
struct Type final
{
/**
* @brief Verify that the JSON value is (one) of specified type(s)
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer
* object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
auto const& res = value.as_object().at(key.data());
auto const convertible = (checkType<Types>(res) || ...);
if (not convertible)
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
return {};
}
};
/**
* @brief Validate that value is between specified min and max
*/
template <typename Type>
class Between final
{
Type min_;
Type max_;
public:
/**
* @brief Construct the validator storing min and max values
*
* @param min
* @param max
*/
explicit Between(Type min, Type max) : min_{min}, max_{max}
{
}
/**
* @brief Verify that the JSON value is within a certain range
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer
* object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
using boost::json::value_to;
auto const res = value_to<Type>(value.as_object().at(key.data()));
// todo: may want a way to make this code more generic (e.g. use a free
// function that can be overridden for this comparison)
if (res < min_ || res > max_)
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
return {};
}
};
/**
* @brief Validates that the value is equal to the one passed in
*/
template <typename Type>
class EqualTo final
{
Type original_;
public:
/**
* @brief Construct the validator with stored original value
*
* @param original The original value to store
*/
explicit EqualTo(Type original) : original_{original}
{
}
/**
* @brief Verify that the JSON value is equal to the stored original
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer
* object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
using boost::json::value_to;
auto const res = value_to<Type>(value.as_object().at(key.data()));
if (res != original_)
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
return {};
}
};
/**
* @brief Deduction guide to help disambiguate what it means to EqualTo a
* "string" without specifying the type.
*/
EqualTo(char const*)->EqualTo<std::string>;
/**
* @brief Validates that the value is one of the values passed in
*/
template <typename Type>
class OneOf final
{
std::vector<Type> options_;
public:
/**
* @brief Construct the validator with stored options
*
* @param options The list of allowed options
*/
explicit OneOf(std::initializer_list<Type> options) : options_{options}
{
}
/**
* @brief Verify that the JSON value is one of the stored options
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer
* object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
using boost::json::value_to;
auto const res = value_to<Type>(value.as_object().at(key.data()));
if (std::find(std::begin(options_), std::end(options_), res) ==
std::end(options_))
return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}};
return {};
}
};
/**
* @brief Deduction guide to help disambiguate what it means to OneOf a
* few "strings" without specifying the type.
*/
OneOf(std::initializer_list<char const*>)->OneOf<std::string>;
/**
* @brief A meta-validator that specifies a list of specs to run against the
* object at the given index in the array
*/
class ValidateArrayAt final
{
std::size_t idx_;
std::vector<FieldSpec> specs_;
public:
/**
* @brief Constructs a validator that validates the specified element of a
* JSON array
*
* @param idx The index inside the array to validate
* @param specs The specifications to validate against
*/
ValidateArrayAt(std::size_t idx, std::initializer_list<FieldSpec> specs)
: idx_{idx}, specs_{specs}
{
}
/**
* @brief Verify that the JSON array element at given index is valid
* according the stored specs
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the array from the outer object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const;
};
/**
* @brief A meta-validator that specifies a list of requirements to run against
* when the type matches the template parameter
*/
template <typename Type>
class IfType final
{
public:
/**
* @brief Constructs a validator that validates the specs if the type
* matches
* @param requirements The requirements to validate against
*/
template <Requirement... Requirements>
IfType(Requirements&&... requirements)
{
validator_ = [... r = std::forward<Requirements>(requirements)](
boost::json::value const& j,
std::string_view key) -> MaybeError {
// clang-format off
std::optional<RPC::Status> firstFailure = std::nullopt;
// the check logic is the same as fieldspec
([&j, &key, &firstFailure, req = &r]() {
if (firstFailure)
return;
if (auto const res = req->verify(j, key); not res)
firstFailure = res.error();
}(), ...);
// clang-format on
if (firstFailure)
return Error{firstFailure.value()};
return {};
};
}
/**
* @brief Verify that the element is valid
* according the stored requirements when type matches
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the element from the outer object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail
// instead
if (not checkType<Type>(value.as_object().at(key.data())))
return {}; // ignore if type does not match
return validator_(value, key);
}
private:
std::function<MaybeError(boost::json::value const&, std::string_view)>
validator_;
};
/**
* @brief A meta-validator that wrapp other validator to send the customized
* error
*/
template <typename Requirement>
class WithCustomError final
{
Requirement requirement;
RPC::Status error;
public:
/**
* @brief Constructs a validator that calls the given validator "req" and
* return customized error "err"
*/
WithCustomError(Requirement req, RPC::Status err)
: requirement{std::move(req)}, error{err}
{
}
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
{
if (auto const res = requirement.verify(value, key); not res)
return Error{error};
return {};
}
};
/**
* @brief A meta-validator that allows to specify a custom validation function
*/
class CustomValidator final
{
std::function<MaybeError(boost::json::value const&, std::string_view)>
validator_;
public:
/**
* @brief Constructs a custom validator from any supported callable
*
* @tparam Fn The type of callable
* @param fn The callable/function object
*/
template <typename Fn>
explicit CustomValidator(Fn&& fn) : validator_{std::forward<Fn>(fn)}
{
}
/**
* @brief Verify that the JSON value is valid according to the custom
* validation function stored
*
* @param value The JSON value representing the outer object
* @param key The key used to retrieve the tested value from the outer
* object
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const;
};
/**
* @brief Helper function to check if sv is an uint32 number or not
*/
[[nodiscard]] bool
checkIsU32Numeric(std::string_view sv);
/**
* @brief Provide a common used validator for ledger index
* LedgerIndex must be a string or int
* If the specified LedgerIndex is a string, it's value must be either
* "validated" or a valid integer value represented as a string.
*/
extern CustomValidator LedgerIndexValidator;
/**
* @brief Provide a common used validator for account
* Account must be a string and the converted public key is valid
*/
extern CustomValidator AccountValidator;
/**
* @brief Provide a common used validator for account
* Account must be a string and can convert to base58
*/
extern CustomValidator AccountBase58Validator;
/**
* @brief Provide a common used validator for marker
* Marker is composed of a comma separated index and start hint. The
* former will be read as hex, and the latter can cast to uint64.
*/
extern CustomValidator MarkerValidator;
/**
* @brief Provide a common used validator for uint256 hex string
* It must be a string and hex
* Transaction index, ledger hash all use this validator
*/
extern CustomValidator Uint256HexStringValidator;
/**
* @brief Provide a common used validator for currency
* including standard currency code and token code
*/
extern CustomValidator CurrencyValidator;
} // namespace RPCng::validation

View File

@@ -0,0 +1,59 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Concepts.h>
#include <rpc/common/Types.h>
#include <boost/json/value.hpp>
#include <optional>
namespace RPCng::detail {
template <Requirement... Requirements>
[[nodiscard]] auto
makeFieldValidator(std::string const& key, Requirements&&... requirements)
{
return [key, ... r = std::forward<Requirements>(requirements)](
boost::json::value const& j) -> MaybeError {
// clang-format off
std::optional<RPC::Status> firstFailure = std::nullopt;
// This expands in order of Requirements and stops evaluating after
// first failure which is stored in `firstFailure` and can be checked
// later on to see whether the verification failed as a whole or not.
([&j, &key, &firstFailure, req = &r]() {
if (firstFailure)
return; // already failed earlier - skip
if (auto const res = req->verify(j, key); not res)
firstFailure = res.error();
}(), ...);
// clang-format on
if (firstFailure)
return Error{firstFailure.value()};
return {};
};
}
} // namespace RPCng::detail

View File

@@ -0,0 +1,85 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <rpc/common/Concepts.h>
#include <rpc/common/Types.h>
namespace RPCng::detail {
template <typename>
static constexpr bool unsupported_handler_v = false;
template <Handler HandlerType>
struct DefaultProcessor final
{
[[nodiscard]] ReturnType
operator()(
HandlerType const& handler,
boost::json::value const& value,
boost::asio::yield_context* ptrYield = nullptr) const
{
using boost::json::value_from;
using boost::json::value_to;
if constexpr (HandlerWithInput<HandlerType>)
{
// first we run validation
auto const spec = handler.spec();
if (auto const ret = spec.validate(value); not ret)
return Error{ret.error()}; // forward Status
auto const inData = value_to<typename HandlerType::Input>(value);
if constexpr (NonCoroutineProcess<HandlerType>)
{
auto const ret = handler.process(inData);
// real handler is given expected Input, not json
if (!ret)
return Error{ret.error()}; // forward Status
else
return value_from(ret.value());
}
else
{
auto const ret = handler.process(inData, *ptrYield);
// real handler is given expected Input, not json
if (!ret)
return Error{ret.error()}; // forward Status
else
return value_from(ret.value());
}
}
else if constexpr (HandlerWithoutInput<HandlerType>)
{
// no input to pass, ignore the value
if (auto const ret = handler.process(); not ret)
return Error{ret.error()}; // forward Status
else
return value_from(ret.value());
}
else
{
// when concept HandlerWithInput and HandlerWithoutInput not cover
// all Handler case
static_assert(unsupported_handler_v<HandlerType>);
}
}
};
} // namespace RPCng::detail

View File

@@ -0,0 +1,145 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
void
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
{
boost::json::object jDst;
jDst[JS(channel_id)] = ripple::to_string(line.key());
jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount));
jDst[JS(destination_account)] =
ripple::to_string(line.getAccountID(ripple::sfDestination));
jDst[JS(amount)] = line[ripple::sfAmount].getText();
jDst[JS(balance)] = line[ripple::sfBalance].getText();
if (publicKeyType(line[ripple::sfPublicKey]))
{
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
jDst[JS(public_key)] = toBase58(ripple::TokenType::AccountPublic, pk);
jDst[JS(public_key_hex)] = strHex(pk);
}
jDst[JS(settle_delay)] = line[ripple::sfSettleDelay];
if (auto const& v = line[~ripple::sfExpiration])
jDst[JS(expiration)] = *v;
if (auto const& v = line[~ripple::sfCancelAfter])
jDst[JS(cancel_after)] = *v;
if (auto const& v = line[~ripple::sfSourceTag])
jDst[JS(source_tag)] = *v;
if (auto const& v = line[~ripple::sfDestinationTag])
jDst[JS(destination_tag)] = *v;
jsonLines.push_back(jDst);
}
Result
doAccountChannels(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
ripple::AccountID destAccount;
if (auto const status =
getAccount(request, destAccount, JS(destination_account));
status)
return status;
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains(JS(marker)))
{
if (!request.at(JS(marker)).is_string())
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
marker = request.at(JS(marker)).as_string().c_str();
}
response[JS(account)] = ripple::to_string(accountID);
response[JS(channels)] = boost::json::value(boost::json::array_kind);
response[JS(limit)] = limit;
boost::json::array& jsonChannels = response.at(JS(channels)).as_array();
auto const addToResponse = [&](ripple::SLE&& sle) {
if (sle.getType() == ripple::ltPAYCHAN &&
sle.getAccountID(ripple::sfAccount) == accountID &&
(!destAccount ||
destAccount == sle.getAccountID(ripple::sfDestination)))
{
addChannel(jsonChannels, sle);
}
return true;
};
auto next = traverseOwnedNodes(
*context.backend,
accountID,
lgrInfo.seq,
limit,
marker,
context.yield,
addToResponse);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
Result
doAccountCurrencies(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
std::set<std::string> send, receive;
auto const addToResponse = [&](ripple::SLE&& sle) {
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance);
auto lowLimit = sle.getFieldAmount(ripple::sfLowLimit);
auto highLimit = sle.getFieldAmount(ripple::sfHighLimit);
bool viewLowest = (lowLimit.getIssuer() == accountID);
auto lineLimit = viewLowest ? lowLimit : highLimit;
auto lineLimitPeer = !viewLowest ? lowLimit : highLimit;
if (!viewLowest)
balance.negate();
if (balance < lineLimit)
receive.insert(ripple::to_string(balance.getCurrency()));
if ((-balance) < lineLimitPeer)
send.insert(ripple::to_string(balance.getCurrency()));
}
return true;
};
traverseOwnedNodes(
*context.backend,
accountID,
lgrInfo.seq,
std::numeric_limits<std::uint32_t>::max(),
{},
context.yield,
addToResponse);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(receive_currencies)] =
boost::json::value(boost::json::array_kind);
boost::json::array& jsonReceive =
response.at(JS(receive_currencies)).as_array();
for (auto const& currency : receive)
jsonReceive.push_back(currency.c_str());
response[JS(send_currencies)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonSend = response.at(JS(send_currencies)).as_array();
for (auto const& currency : send)
jsonSend.push_back(currency.c_str());
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,120 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
// {
// account: <ident>,
// strict: <bool> // optional (default false)
// // if true only allow public keys and addresses.
// ledger_hash : <ledger>
// ledger_index : <ledger_index>
// signer_lists : <bool> // optional (default false)
// // if true return SignerList(s).
// queue : <bool> // optional (default false)
// // if true return information about transactions
// // in the current TxQ, only if the requested
// // ledger is open. Otherwise if true, returns an
// // error.
// }
namespace RPC {
Result
doAccountInfo(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
std::string strIdent;
if (request.contains(JS(account)))
strIdent = request.at(JS(account)).as_string().c_str();
else if (request.contains(JS(ident)))
strIdent = request.at(JS(ident)).as_string().c_str();
else
return Status{RippledError::rpcACT_MALFORMED};
// We only need to fetch the ledger header because the ledger hash is
// supposed to be included in the response. The ledger sequence is specified
// in the request
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
// Get info on account.
auto accountID = accountFromStringStrict(strIdent);
if (!accountID)
return Status{RippledError::rpcACT_MALFORMED};
auto key = ripple::keylet::account(accountID.value());
std::optional<std::vector<unsigned char>> dbResponse =
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
if (!dbResponse)
return Status{RippledError::rpcACT_NOT_FOUND};
ripple::STLedgerEntry sle{
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key};
if (!key.check(sle))
return Status{RippledError::rpcDB_DESERIALIZATION};
response[JS(account_data)] = toJson(sle);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
// Return SignerList(s) if that is requested.
if (request.contains(JS(signer_lists)) &&
request.at(JS(signer_lists)).as_bool())
{
// We put the SignerList in an array because of an anticipated
// future when we support multiple signer lists on one account.
boost::json::array signerList;
auto signersKey = ripple::keylet::signers(*accountID);
// This code will need to be revisited if in the future we
// support multiple SignerLists on one account.
auto const signers = context.backend->fetchLedgerObject(
signersKey.key, lgrInfo.seq, context.yield);
if (signers)
{
ripple::STLedgerEntry sleSigners{
ripple::SerialIter{signers->data(), signers->size()},
signersKey.key};
if (!signersKey.check(sleSigners))
return Status{RippledError::rpcDB_DESERIALIZATION};
signerList.push_back(toJson(sleSigners));
}
response[JS(account_data)].as_object()[JS(signer_lists)] =
std::move(signerList);
}
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,208 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
void
addLine(
boost::json::array& jsonLines,
ripple::SLE const& line,
ripple::AccountID const& account,
std::optional<ripple::AccountID> const& peerAccount)
{
auto flags = line.getFieldU32(ripple::sfFlags);
auto lowLimit = line.getFieldAmount(ripple::sfLowLimit);
auto highLimit = line.getFieldAmount(ripple::sfHighLimit);
auto lowID = lowLimit.getIssuer();
auto highID = highLimit.getIssuer();
auto lowQualityIn = line.getFieldU32(ripple::sfLowQualityIn);
auto lowQualityOut = line.getFieldU32(ripple::sfLowQualityOut);
auto highQualityIn = line.getFieldU32(ripple::sfHighQualityIn);
auto highQualityOut = line.getFieldU32(ripple::sfHighQualityOut);
auto balance = line.getFieldAmount(ripple::sfBalance);
bool viewLowest = (lowID == account);
auto lineLimit = viewLowest ? lowLimit : highLimit;
auto lineLimitPeer = !viewLowest ? lowLimit : highLimit;
auto lineAccountIDPeer = !viewLowest ? lowID : highID;
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
if (peerAccount && peerAccount != lineAccountIDPeer)
return;
if (!viewLowest)
balance.negate();
bool lineAuth =
flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool lineAuthPeer =
flags & (!viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool lineNoRipple =
flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool lineDefaultRipple = flags & ripple::lsfDefaultRipple;
bool lineNoRipplePeer = flags &
(!viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool lineFreeze =
flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
bool lineFreezePeer =
flags & (!viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
ripple::STAmount const& saBalance(balance);
ripple::STAmount const& saLimit(lineLimit);
ripple::STAmount const& saLimitPeer(lineLimitPeer);
boost::json::object jPeer;
jPeer[JS(account)] = ripple::to_string(lineAccountIDPeer);
jPeer[JS(balance)] = saBalance.getText();
jPeer[JS(currency)] = ripple::to_string(saBalance.issue().currency);
jPeer[JS(limit)] = saLimit.getText();
jPeer[JS(limit_peer)] = saLimitPeer.getText();
jPeer[JS(quality_in)] = lineQualityIn;
jPeer[JS(quality_out)] = lineQualityOut;
if (lineAuth)
jPeer[JS(authorized)] = true;
if (lineAuthPeer)
jPeer[JS(peer_authorized)] = true;
if (lineNoRipple || !lineDefaultRipple)
jPeer[JS(no_ripple)] = lineNoRipple;
if (lineNoRipple || !lineDefaultRipple)
jPeer[JS(no_ripple_peer)] = lineNoRipplePeer;
if (lineFreeze)
jPeer[JS(freeze)] = true;
if (lineFreezePeer)
jPeer[JS(freeze_peer)] = true;
jsonLines.push_back(jPeer);
}
Result
doAccountLines(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
std::optional<ripple::AccountID> peerAccount;
if (auto const status = getOptionalAccount(request, peerAccount, JS(peer));
status)
return status;
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains(JS(marker)))
{
if (not request.at(JS(marker)).is_string())
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
marker = request.at(JS(marker)).as_string().c_str();
}
auto ignoreDefault = false;
if (request.contains(JS(ignore_default)))
{
if (not request.at(JS(ignore_default)).is_bool())
return Status{
RippledError::rpcINVALID_PARAMS, "ignoreDefaultNotBool"};
ignoreDefault = request.at(JS(ignore_default)).as_bool();
}
response[JS(account)] = ripple::to_string(accountID);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(limit)] = limit;
response[JS(lines)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at(JS(lines)).as_array();
auto const addToResponse = [&](ripple::SLE&& sle) -> void {
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
auto ignore = false;
if (ignoreDefault)
{
if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() ==
accountID)
ignore =
!(sle.getFieldU32(ripple::sfFlags) &
ripple::lsfLowReserve);
else
ignore =
!(sle.getFieldU32(ripple::sfFlags) &
ripple::lsfHighReserve);
}
if (!ignore)
addLine(jsonLines, sle, accountID, peerAccount);
}
};
auto next = traverseOwnedNodes(
*context.backend,
accountID,
lgrInfo.seq,
limit,
marker,
context.yield,
addToResponse);
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,231 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <ripple/protocol/nftPageMask.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
namespace RPC {
std::unordered_map<std::string, ripple::LedgerEntryType> types{
{"state", ripple::ltRIPPLE_STATE},
{"ticket", ripple::ltTICKET},
{"signer_list", ripple::ltSIGNER_LIST},
{"payment_channel", ripple::ltPAYCHAN},
{"offer", ripple::ltOFFER},
{"escrow", ripple::ltESCROW},
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
{"check", ripple::ltCHECK},
{"nft_page", ripple::ltNFTOKEN_PAGE},
{"nft_offer", ripple::ltNFTOKEN_OFFER}};
Result
doAccountNFTs(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!accountID)
return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
ripple::uint256 marker;
if (auto const status = getHexMarker(request, marker); status)
return status;
response[JS(account)] = ripple::toBase58(accountID);
response[JS(validated)] = true;
response[JS(limit)] = limit;
std::uint32_t numPages = 0;
response[JS(account_nfts)] = boost::json::value(boost::json::array_kind);
auto& nfts = response.at(JS(account_nfts)).as_array();
// if a marker was passed, start at the page specified in marker. Else,
// start at the max page
auto const pageKey =
marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker;
auto const blob =
context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield);
if (!blob)
return response;
std::optional<ripple::SLE const> page{
ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}};
// Continue iteration from the current page
while (page)
{
auto arr = page->getFieldArray(ripple::sfNFTokens);
for (auto const& o : arr)
{
ripple::uint256 const nftokenID = o[ripple::sfNFTokenID];
{
nfts.push_back(
toBoostJson(o.getJson(ripple::JsonOptions::none)));
auto& obj = nfts.back().as_object();
// Pull out the components of the nft ID.
obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID);
obj[SFS(sfIssuer)] =
to_string(ripple::nft::getIssuer(nftokenID));
obj[SFS(sfNFTokenTaxon)] =
ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID));
obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID);
if (std::uint16_t xferFee = {
ripple::nft::getTransferFee(nftokenID)})
obj[SFS(sfTransferFee)] = xferFee;
}
}
++numPages;
if (auto npm = (*page)[~ripple::sfPreviousPageMin])
{
auto const nextKey = ripple::Keylet(ripple::ltNFTOKEN_PAGE, *npm);
if (numPages == limit)
{
response[JS(marker)] = to_string(nextKey.key);
response[JS(limit)] = numPages;
return response;
}
auto const nextBlob = context.backend->fetchLedgerObject(
nextKey.key, lgrInfo.seq, context.yield);
page.emplace(ripple::SLE{
ripple::SerialIter{nextBlob->data(), nextBlob->size()},
nextKey.key});
}
else
page.reset();
}
return response;
}
Result
doAccountObjects(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains("marker"))
{
if (!request.at("marker").is_string())
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
marker = request.at("marker").as_string().c_str();
}
std::optional<ripple::LedgerEntryType> objectType = {};
if (request.contains(JS(type)))
{
if (!request.at(JS(type)).is_string())
return Status{RippledError::rpcINVALID_PARAMS, "typeNotString"};
std::string typeAsString = request.at(JS(type)).as_string().c_str();
if (types.find(typeAsString) == types.end())
return Status{RippledError::rpcINVALID_PARAMS, "typeInvalid"};
objectType = types[typeAsString];
}
response[JS(account)] = ripple::to_string(accountID);
response[JS(account_objects)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonObjects =
response.at(JS(account_objects)).as_array();
auto const addToResponse = [&](ripple::SLE&& sle) {
if (!objectType || objectType == sle.getType())
{
jsonObjects.push_back(toJson(sle));
}
};
auto next = traverseOwnedNodes(
*context.backend,
accountID,
lgrInfo.seq,
limit,
marker,
context.yield,
addToResponse);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto const& nextMarker = std::get<RPC::AccountCursor>(next);
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,157 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
namespace RPC {
void
addOffer(boost::json::array& offersJson, ripple::SLE const& offer)
{
auto quality = getQuality(offer.getFieldH256(ripple::sfBookDirectory));
ripple::STAmount rate = ripple::amountFromQuality(quality);
ripple::STAmount takerPays = offer.getFieldAmount(ripple::sfTakerPays);
ripple::STAmount takerGets = offer.getFieldAmount(ripple::sfTakerGets);
boost::json::object obj;
if (!takerPays.native())
{
obj[JS(taker_pays)] = boost::json::value(boost::json::object_kind);
boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object();
takerPaysJson[JS(value)] = takerPays.getText();
takerPaysJson[JS(currency)] =
ripple::to_string(takerPays.getCurrency());
takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer());
}
else
{
obj[JS(taker_pays)] = takerPays.getText();
}
if (!takerGets.native())
{
obj[JS(taker_gets)] = boost::json::value(boost::json::object_kind);
boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object();
takerGetsJson[JS(value)] = takerGets.getText();
takerGetsJson[JS(currency)] =
ripple::to_string(takerGets.getCurrency());
takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer());
}
else
{
obj[JS(taker_gets)] = takerGets.getText();
}
obj[JS(seq)] = offer.getFieldU32(ripple::sfSequence);
obj[JS(flags)] = offer.getFieldU32(ripple::sfFlags);
obj[JS(quality)] = rate.getText();
if (offer.isFieldPresent(ripple::sfExpiration))
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
offersJson.push_back(obj);
};
Result
doAccountOffers(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains(JS(marker)))
{
if (!request.at(JS(marker)).is_string())
return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"};
marker = request.at(JS(marker)).as_string().c_str();
}
response[JS(account)] = ripple::to_string(accountID);
response[JS(limit)] = limit;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(offers)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at(JS(offers)).as_array();
auto const addToResponse = [&](ripple::SLE&& sle) {
if (sle.getType() == ripple::ltOFFER)
{
addOffer(jsonLines, sle);
}
return true;
};
auto next = traverseOwnedNodes(
*context.backend,
accountID,
lgrInfo.seq,
limit,
marker,
context.yield,
addToResponse);
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,67 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <log/Logger.h>
#include <rpc/RPCHelpers.h>
#include <util/Profiler.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"RPC"};
} // namespace
namespace RPC {
Result
doAccountTx(Context const& context)
{
ripple::AccountID accountID;
if (auto const status = getAccount(context.params, accountID); status)
return status;
constexpr std::string_view outerFuncName = __func__;
auto const maybeResponse = traverseTransactions(
context,
[&accountID, &outerFuncName](
std::shared_ptr<Backend::BackendInterface const> const& backend,
std::uint32_t const limit,
bool const forward,
std::optional<Backend::TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) {
auto [txnsAndCursor, timeDiff] = util::timed([&]() {
return backend->fetchAccountTransactions(
accountID, limit, forward, cursorIn, yield);
});
gLog.info() << outerFuncName << " db fetch took " << timeDiff
<< " milliseconds - num blobs = "
<< txnsAndCursor.txns.size();
return txnsAndCursor;
});
if (auto const status = std::get_if<Status>(&maybeResponse); status)
return *status;
auto response = std::get<boost::json::object>(maybeResponse);
response[JS(account)] = ripple::to_string(accountID);
return response;
}
} // namespace RPC

View File

@@ -0,0 +1,275 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/ToString.h>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
namespace json = boost::json;
using namespace ripple;
namespace RPC {
/**
* @brief Represents an entry in the book_changes' changes array.
*/
struct BookChange
{
STAmount sideAVolume;
STAmount sideBVolume;
STAmount highRate;
STAmount lowRate;
STAmount openRate;
STAmount closeRate;
};
/**
* @brief Encapsulates the book_changes computations and transformations.
*/
class BookChanges final
{
public:
BookChanges() = delete; // only accessed via static handle function
/**
* @brief Computes all book_changes for the given transactions.
*
* @param transactions The transactions to compute book changes for
* @return std::vector<BookChange> Book changes
*/
[[nodiscard]] static std::vector<BookChange>
compute(std::vector<Backend::TransactionAndMetadata> const& transactions)
{
return HandlerImpl{}(transactions);
}
private:
class HandlerImpl final
{
std::map<std::string, BookChange> tally_ = {};
std::optional<uint32_t> offerCancel_ = {};
public:
[[nodiscard]] std::vector<BookChange>
operator()(
std::vector<Backend::TransactionAndMetadata> const& transactions)
{
for (auto const& tx : transactions)
handleBookChange(tx);
// TODO: rewrite this with std::ranges when compilers catch up
std::vector<BookChange> changes;
std::transform(
std::make_move_iterator(std::begin(tally_)),
std::make_move_iterator(std::end(tally_)),
std::back_inserter(changes),
[](auto obj) { return obj.second; });
return changes;
}
private:
void
handleAffectedNode(STObject const& node)
{
auto const& metaType = node.getFName();
auto const nodeType = node.getFieldU16(sfLedgerEntryType);
// we only care about ltOFFER objects being modified or
// deleted
if (nodeType != ltOFFER || metaType == sfCreatedNode)
return;
// if either FF or PF are missing we can't compute
// but generally these are cancelled rather than crossed
// so skipping them is consistent
if (!node.isFieldPresent(sfFinalFields) ||
!node.isFieldPresent(sfPreviousFields))
return;
auto const& finalFields =
node.peekAtField(sfFinalFields).downcast<STObject>();
auto const& previousFields =
node.peekAtField(sfPreviousFields).downcast<STObject>();
// defensive case that should never be hit
if (!finalFields.isFieldPresent(sfTakerGets) ||
!finalFields.isFieldPresent(sfTakerPays) ||
!previousFields.isFieldPresent(sfTakerGets) ||
!previousFields.isFieldPresent(sfTakerPays))
return;
// filter out any offers deleted by explicit offer cancels
if (metaType == sfDeletedNode && offerCancel_ &&
finalFields.getFieldU32(sfSequence) == *offerCancel_)
return;
// compute the difference in gets and pays actually
// affected onto the offer
auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) -
previousFields.getFieldAmount(sfTakerGets);
auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) -
previousFields.getFieldAmount(sfTakerPays);
transformAndStore(deltaGets, deltaPays);
}
void
transformAndStore(
ripple::STAmount const& deltaGets,
ripple::STAmount const& deltaPays)
{
auto const g = to_string(deltaGets.issue());
auto const p = to_string(deltaPays.issue());
auto const noswap =
isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p));
auto first = noswap ? deltaGets : deltaPays;
auto second = noswap ? deltaPays : deltaGets;
// defensively programmed, should (probably) never happen
if (second == beast::zero)
return;
auto const rate = divide(first, second, noIssue());
if (first < beast::zero)
first = -first;
if (second < beast::zero)
second = -second;
auto const key = noswap ? (g + '|' + p) : (p + '|' + g);
if (tally_.contains(key))
{
auto& entry = tally_.at(key);
entry.sideAVolume += first;
entry.sideBVolume += second;
if (entry.highRate < rate)
entry.highRate = rate;
if (entry.lowRate > rate)
entry.lowRate = rate;
entry.closeRate = rate;
}
else
{
// TODO: use paranthesized initialization when clang catches up
tally_[key] = {
first, // sideAVolume
second, // sideBVolume
rate, // highRate
rate, // lowRate
rate, // openRate
rate, // closeRate
};
}
}
void
handleBookChange(Backend::TransactionAndMetadata const& blob)
{
auto const [tx, meta] = deserializeTxPlusMeta(blob);
if (!tx || !meta || !tx->isFieldPresent(sfTransactionType))
return;
offerCancel_ = shouldCancelOffer(tx);
for (auto const& node : meta->getFieldArray(sfAffectedNodes))
handleAffectedNode(node);
}
std::optional<uint32_t>
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
{
switch (tx->getFieldU16(sfTransactionType))
{
// in future if any other ways emerge to cancel an offer
// this switch makes them easy to add
case ttOFFER_CANCEL:
case ttOFFER_CREATE:
if (tx->isFieldPresent(sfOfferSequence))
return tx->getFieldU32(sfOfferSequence);
default:
return std::nullopt;
}
}
};
};
void
tag_invoke(json::value_from_tag, json::value& jv, BookChange const& change)
{
auto amountStr = [](STAmount const& amount) -> std::string {
return isXRP(amount) ? to_string(amount.xrp())
: to_string(amount.iou());
};
auto currencyStr = [](STAmount const& amount) -> std::string {
return isXRP(amount) ? "XRP_drops" : to_string(amount.issue());
};
jv = {
{JS(currency_a), currencyStr(change.sideAVolume)},
{JS(currency_b), currencyStr(change.sideBVolume)},
{JS(volume_a), amountStr(change.sideAVolume)},
{JS(volume_b), amountStr(change.sideBVolume)},
{JS(high), to_string(change.highRate.iou())},
{JS(low), to_string(change.lowRate.iou())},
{JS(open), to_string(change.openRate.iou())},
{JS(close), to_string(change.closeRate.iou())},
};
}
json::object const
computeBookChanges(
ripple::LedgerInfo const& lgrInfo,
std::vector<Backend::TransactionAndMetadata> const& transactions)
{
return {
{JS(type), "bookChanges"},
{JS(ledger_index), lgrInfo.seq},
{JS(ledger_hash), to_string(lgrInfo.hash)},
{JS(ledger_time), lgrInfo.closeTime.time_since_epoch().count()},
{JS(changes), json::value_from(BookChanges::compute(transactions))},
};
}
Result
doBookChanges(Context const& context)
{
auto const request = context.params;
auto const info = ledgerInfoFromRequest(context);
if (auto const status = std::get_if<Status>(&info))
return *status;
auto const lgrInfo = std::get<ripple::LedgerInfo>(info);
auto const transactions = context.backend->fetchAllTransactionsInLedger(
lgrInfo.seq, context.yield);
return computeBookChanges(lgrInfo, transactions);
}
} // namespace RPC

Some files were not shown because too many files have changed in this diff Show More