commit 9ef14f919e310df0a4ac42815e111227c742f5ad Author: CJ Cobb Date: Mon Dec 14 20:39:54 2020 -0500 Initial Commit diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000..ba409869 --- /dev/null +++ b/.clang-format @@ -0,0 +1,87 @@ +--- +Language: Cpp +AccessModifierOffset: -4 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: true +AlignOperands: false +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: All +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: false + AfterFunction: true + AfterNamespace: false + AfterObjCDeclaration: true + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + IndentBraces: false +BreakBeforeBinaryOperators: false +BreakBeforeBraces: Custom +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ] +IncludeCategories: + - Regex: '^<(BeastConfig)' + Priority: 0 + - Regex: '^<(ripple)/' + Priority: 2 + - Regex: '^<(boost)/' + Priority: 3 + - Regex: '.*' + Priority: 4 +IncludeIsMainRegex: '$' +IndentCaseLabels: true +IndentFunctionDeclarationAfterType: false +IndentWidth: 4 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..567609b1 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +build/ diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..beaec158 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# These might help too +# Mark any lines that have had a commit skipped using --ignore-rev with a `?` +#git config --global blame.markIgnoredLines true +# Mark any lines that were added in a skipped commit and can not be attributed with a `*` +#git config --global blame.markUnblamableLines true + +# clang-format +e41150248a97e4bdc1cf21b54650c4bb7c63928e +2e542e7b0d94451a933c88778461cc8d3d7e6417 diff --git a/.githooks/ensure_release_tag b/.githooks/ensure_release_tag new file mode 100755 index 00000000..7d195756 --- /dev/null +++ b/.githooks/ensure_release_tag @@ -0,0 +1,20 @@ +#!/bin/bash + +# Pushing a release branch requires an annotated tag at the released commit +branch=$(git rev-parse --abbrev-ref HEAD) + +if [[ $branch =~ master ]]; then + # check if HEAD commit is tagged + if ! git describe --exact-match HEAD; then + echo "Commits to master must be tagged" + exit 1 + fi +elif [[ $branch =~ release/* ]]; then + IFS=/ read -r branch rel_ver <<< ${branch} + tag=$(git describe --tags --abbrev=0) + if [[ "${rel_ver}" != "${tag}" ]]; then + echo "release/${rel_ver} branches must have annotated tag ${rel_ver}" + echo "git tag -am\"${rel_ver}\" ${rel_ver}" + exit 1 + fi +fi diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 00000000..c53c897d --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,27 @@ +#!/bin/bash + +exec 1>&2 + +# paths to check and re-format +sources="src unittests" +formatter="clang-format -i" + +first=$(git diff $sources) +find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter +second=$(git diff $sources) +changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//') + +if [ "$changes" != "0" ]; then + cat <<\EOF + + WARNING +----------------------------------------------------------------------------- + Automatically re-formatted code with `clang-format` - commit was aborted. + Please manually add any updated files and commit again. +----------------------------------------------------------------------------- + +EOF + exit 1 +fi + +.githooks/ensure_release_tag diff --git a/.github/actions/lint/action.yml b/.github/actions/lint/action.yml new file mode 100644 index 00000000..87cec73f --- /dev/null +++ b/.github/actions/lint/action.yml @@ -0,0 +1,13 @@ +runs: + using: composite + steps: + # Github's ubuntu-20.04 image already has clang-format-11 installed + - run: | + find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i + shell: bash + + - name: Check for differences + id: assert + shell: bash + run: | + git diff --color --exit-code | tee "clang-format.patch" diff --git a/.github/actions/test/Dockerfile b/.github/actions/test/Dockerfile new file mode 100644 index 00000000..a2c10da4 --- /dev/null +++ b/.github/actions/test/Dockerfile @@ -0,0 +1,6 @@ +FROM cassandra:4.0.4 + +RUN apt-get update && apt-get install -y postgresql +COPY entrypoint.sh /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/test/entrypoint.sh b/.github/actions/test/entrypoint.sh new file mode 100755 index 00000000..86d5fadd --- /dev/null +++ b/.github/actions/test/entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +pg_ctlcluster 12 main start +su postgres -c"psql -c\"alter user postgres with password 'postgres'\"" +su cassandra -c "/opt/cassandra/bin/cassandra -R" +sleep 90 +chmod +x ./clio_tests +./clio_tests diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..f66ab689 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,196 @@ +name: Build Clio +on: + push: + branches: [master, release/*, develop, develop-next] + pull_request: + branches: [master, release/*, develop, develop-next] + workflow_dispatch: + +jobs: + lint: + name: Lint + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - name: Run clang-format + uses: ./.github/actions/lint + + build_clio: + name: Build Clio + runs-on: [self-hosted, Linux, heavy] + needs: lint + strategy: + fail-fast: false + matrix: + type: + - suffix: deb + image: rippleci/clio-dpkg-builder:2022-09-17 + script: dpkg + - suffix: rpm + image: rippleci/clio-rpm-builder:2022-09-17 + script: rpm + + container: + image: ${{ matrix.type.image }} + steps: + - uses: actions/checkout@v3 + with: + path: clio + fetch-depth: 0 + + - name: Clone Clio packaging repo + uses: actions/checkout@v3 + with: + path: clio-packages + repository: XRPLF/clio-packages + ref: main + + - name: Build + shell: bash + run: | + export CLIO_ROOT=$(realpath clio) + if [ ${{ matrix.type.suffix }} == "rpm" ]; then + source /opt/rh/devtoolset-11/enable + fi + cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT + cmake --build clio-packages/build --parallel $(nproc) + cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests . + mv ./clio-packages/build/*.${{ matrix.type.suffix }} . + - name: Artifact packages + uses: actions/upload-artifact@v3 + with: + name: clio_${{ matrix.type.suffix }}_packages + path: ${{ github.workspace }}/*.${{ matrix.type.suffix }} + + - name: Artifact clio_tests + uses: actions/upload-artifact@v3 + with: + name: clio_tests-${{ matrix.type.suffix }} + path: ${{ github.workspace }}/clio_tests + + build_dev: + name: Build on Mac/Clang14 and run tests + needs: lint + continue-on-error: false + runs-on: macos-12 + + steps: + - uses: actions/checkout@v3 + with: + path: clio + + - name: Check Boost cache + id: boost + uses: actions/cache@v3 + with: + path: boost + key: ${{ runner.os }}-boost + + - name: Build boost + if: steps.boost.outputs.cache-hit != 'true' + run: | + curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz" + tar zxf boost_1_77_0.tar.gz + mv boost_1_77_0 boost + cd boost + ./bootstrap.sh + ./b2 cxxflags="-std=c++14" + - name: install deps + run: | + brew install pkg-config protobuf openssl ninja cassandra-cpp-driver bison + - name: Build clio + run: | + export BOOST_ROOT=$(pwd)/boost + cd clio + cmake -B build + if ! cmake --build build -j$(nproc); then + echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY + exit 1 + fi + - name: Run Test + run: | + cd clio/build + ./clio_tests --gtest_filter="-Backend*" + + test_clio: + name: Test Clio + runs-on: [self-hosted, Linux] + needs: build_clio + strategy: + fail-fast: false + matrix: + suffix: [rpm, deb] + steps: + - uses: actions/checkout@v3 + + - name: Get clio_tests artifact + uses: actions/download-artifact@v3 + with: + name: clio_tests-${{ matrix.suffix }} + + - name: Run tests + timeout-minutes: 10 + uses: ./.github/actions/test + + code_coverage: + name: Build on Linux and code coverage + needs: lint + continue-on-error: false + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v3 + with: + path: clio + + - name: Check Boost cache + id: boost + uses: actions/cache@v3 + with: + path: boost + key: ${{ runner.os }}-boost + + - name: Build boost + if: steps.boost.outputs.cache-hit != 'true' + run: | + curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz" + tar zxf boost_1_77_0.tar.gz + mv boost_1_77_0 boost + cd boost + ./bootstrap.sh + ./b2 + + - name: install deps + run: | + sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr + + - name: Build clio + run: | + export BOOST_ROOT=$(pwd)/boost + cd clio + cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-Backend*"' + if ! cmake --build build -j$(nproc); then + echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY + exit 1 + fi + cd build + make clio_tests-ccov + + - name: Code Coverage Summary Report + uses: irongut/CodeCoverageSummary@v1.2.0 + with: + filename: clio/build/clio_tests-gcc-cov/out.xml + badge: true + output: both + format: markdown + + - name: Save PR number and ccov report + run: | + mkdir -p ./UnitTestCoverage + echo ${{ github.event.number }} > ./UnitTestCoverage/NR + cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html + cp code-coverage-results.md ./UnitTestCoverage/out.md + - uses: actions/upload-artifact@v2 + with: + name: UnitTestCoverage + path: UnitTestCoverage/ diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..251e4981 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*clio*.log +build*/ +.vscode +.python-version +config.json +src/main/impl/Build.cpp diff --git a/CMake/Build.cpp.in b/CMake/Build.cpp.in new file mode 100644 index 00000000..44f5b833 --- /dev/null +++ b/CMake/Build.cpp.in @@ -0,0 +1,39 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include
+ +namespace Build { +static constexpr char versionString[] = "@VERSION@"; + +std::string const& +getClioVersionString() +{ + static std::string const value = versionString; + return value; +} + +std::string const& +getClioFullVersionString() +{ + static std::string const value = "clio-" + getClioVersionString(); + return value; +} + +} // namespace Build diff --git a/CMake/ClioVersion.cmake b/CMake/ClioVersion.cmake new file mode 100644 index 00000000..177ed33e --- /dev/null +++ b/CMake/ClioVersion.cmake @@ -0,0 +1,33 @@ +#[===================================================================[ + write version to source +#]===================================================================] + +find_package(Git REQUIRED) + +set(GIT_COMMAND rev-parse --short HEAD) +execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE) + +set(GIT_COMMAND branch --show-current) +execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE) + +if(BRANCH STREQUAL "") + set(BRANCH "dev") +endif() + +if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-- + execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE) + set(VERSION "${DATE}-${BRANCH}-${REV}") +else() + set(GIT_COMMAND describe --tags) + execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) + set(VERSION "${TAG_VERSION}-${REV}") +endif() + +if(CMAKE_BUILD_TYPE MATCHES Debug) + set(VERSION "${VERSION}+DEBUG") +endif() + +message(STATUS "Build version: ${VERSION}") +set(clio_version "${VERSION}") + +configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp) diff --git a/CMake/coverage.cmake b/CMake/coverage.cmake new file mode 100644 index 00000000..52285ca6 --- /dev/null +++ b/CMake/coverage.cmake @@ -0,0 +1,126 @@ +# call add_converage(module_name) to add coverage targets for the given module +function(add_converage module) + if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang" + OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") + message("[Coverage] Building with llvm Code Coverage Tools") + # Using llvm gcov ; llvm install by xcode + set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin) + if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov) + message(FATAL_ERROR "llvm-cov not found! Aborting.") + endif() + + # set Flags + target_compile_options(${module} PRIVATE -fprofile-instr-generate + -fcoverage-mapping) + target_link_options(${module} PUBLIC -fprofile-instr-generate + -fcoverage-mapping) + + target_compile_options(clio PRIVATE -fprofile-instr-generate + -fcoverage-mapping) + target_link_options(clio PUBLIC -fprofile-instr-generate + -fcoverage-mapping) + + # llvm-cov + add_custom_target( + ${module}-ccov-preprocessing + COMMAND LLVM_PROFILE_FILE=${module}.profraw $ + COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o + ${module}.profdata + DEPENDS ${module}) + + add_custom_target( + ${module}-ccov-show + COMMAND ${LLVM_COV_PATH}/llvm-cov show $ + -instr-profile=${module}.profdata -show-line-counts-or-regions + DEPENDS ${module}-ccov-preprocessing) + + # add summary for CI parse + add_custom_target( + ${module}-ccov-report + COMMAND + ${LLVM_COV_PATH}/llvm-cov report $ + -instr-profile=${module}.profdata + -ignore-filename-regex=".*_makefiles|.*unittests" + -show-region-summary=false + DEPENDS ${module}-ccov-preprocessing) + + # exclude libs and unittests self + add_custom_target( + ${module}-ccov + COMMAND + ${LLVM_COV_PATH}/llvm-cov show $ + -instr-profile=${module}.profdata -show-line-counts-or-regions + -output-dir=${module}-llvm-cov -format="html" + -ignore-filename-regex=".*_makefiles|.*unittests" > /dev/null 2>&1 + DEPENDS ${module}-ccov-preprocessing) + + add_custom_command( + TARGET ${module}-ccov + POST_BUILD + COMMENT + "Open ${module}-llvm-cov/index.html in your browser to view the coverage report." + ) + elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" + MATCHES "GNU") + message("[Coverage] Building with Gcc Code Coverage Tools") + + find_program(GCOV_PATH gcov) + if(NOT GCOV_PATH) + message(FATAL_ERROR "gcov not found! Aborting...") + endif() # NOT GCOV_PATH + find_program(GCOVR_PATH gcovr) + if(NOT GCOVR_PATH) + message(FATAL_ERROR "gcovr not found! Aborting...") + endif() # NOT GCOVR_PATH + + set(COV_OUTPUT_PATH ${module}-gcc-cov) + target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage + -fPIC) + target_link_libraries(${module} PRIVATE gcov) + + target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage + -fPIC) + target_link_libraries(clio PRIVATE gcov) + # this target is used for CI as well generate the summary out.xml will send + # to github action to generate markdown, we can paste it to comments or + # readme + add_custom_target( + ${module}-ccov + COMMAND ${module} ${TEST_PARAMETER} + COMMAND rm -rf ${COV_OUTPUT_PATH} + COMMAND mkdir ${COV_OUTPUT_PATH} + COMMAND + gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} -x + ${COV_OUTPUT_PATH}/out.xml --exclude='${CMAKE_SOURCE_DIR}/unittests/' + --exclude='${PROJECT_BINARY_DIR}/' + COMMAND + gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} + --html ${COV_OUTPUT_PATH}/report.html + --exclude='${CMAKE_SOURCE_DIR}/unittests/' + --exclude='${PROJECT_BINARY_DIR}/' + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Running gcovr to produce Cobertura code coverage report.") + + # generate the detail report + add_custom_target( + ${module}-ccov-report + COMMAND ${module} ${TEST_PARAMETER} + COMMAND rm -rf ${COV_OUTPUT_PATH} + COMMAND mkdir ${COV_OUTPUT_PATH} + COMMAND + gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} + --html-details ${COV_OUTPUT_PATH}/index.html + --exclude='${CMAKE_SOURCE_DIR}/unittests/' + --exclude='${PROJECT_BINARY_DIR}/' + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Running gcovr to produce Cobertura code coverage report.") + add_custom_command( + TARGET ${module}-ccov-report + POST_BUILD + COMMENT + "Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report." + ) + else() + message(FATAL_ERROR "Complier not support yet") + endif() +endfunction() diff --git a/CMake/deps/Boost.cmake b/CMake/deps/Boost.cmake new file mode 100644 index 00000000..01495eb6 --- /dev/null +++ b/CMake/deps/Boost.cmake @@ -0,0 +1,6 @@ +set(Boost_USE_STATIC_LIBS ON) +set(Boost_USE_STATIC_RUNTIME ON) + +find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED) + +target_link_libraries(clio PUBLIC ${Boost_LIBRARIES}) diff --git a/CMake/deps/Remove-bitset-operator.patch b/CMake/deps/Remove-bitset-operator.patch new file mode 100644 index 00000000..80dce64a --- /dev/null +++ b/CMake/deps/Remove-bitset-operator.patch @@ -0,0 +1,24 @@ +From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001 +From: CJ Cobb +Date: Wed, 10 Aug 2022 12:30:01 -0400 +Subject: [PATCH] Remove bitset operator != + +--- + src/ripple/protocol/Feature.h | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h +index b3ecb099b..6424be411 100644 +--- a/src/ripple/protocol/Feature.h ++++ b/src/ripple/protocol/Feature.h +@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset + public: + using base::bitset; + using base::operator==; +- using base::operator!=; + + using base::all; + using base::any; +-- +2.32.0 + diff --git a/CMake/deps/SourceLocation.cmake b/CMake/deps/SourceLocation.cmake new file mode 100644 index 00000000..afc9835b --- /dev/null +++ b/CMake/deps/SourceLocation.cmake @@ -0,0 +1,11 @@ +include(CheckIncludeFileCXX) + +check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE) +if(SOURCE_LOCATION_AVAILABLE) + target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION") +endif() + +check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE) +if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE) + target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION") +endif() diff --git a/CMake/deps/cassandra.cmake b/CMake/deps/cassandra.cmake new file mode 100644 index 00000000..63991411 --- /dev/null +++ b/CMake/deps/cassandra.cmake @@ -0,0 +1,153 @@ +find_package(ZLIB REQUIRED) + +find_library(cassandra NAMES cassandra) +if(NOT cassandra) + message("System installed Cassandra cpp driver not found. Will build") + find_library(zlib NAMES zlib1g-dev zlib-devel zlib z) + if(NOT zlib) + message("zlib not found. will build") + add_library(zlib STATIC IMPORTED GLOBAL) + ExternalProject_Add(zlib_src + PREFIX ${nih_cache_path} + GIT_REPOSITORY https://github.com/madler/zlib.git + GIT_TAG v1.2.12 + INSTALL_COMMAND "" + BUILD_BYPRODUCTS /${CMAKE_STATIC_LIBRARY_PREFIX}z.a + ) + ExternalProject_Get_Property (zlib_src SOURCE_DIR) + ExternalProject_Get_Property (zlib_src BINARY_DIR) + set (zlib_src_SOURCE_DIR "${SOURCE_DIR}") + file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include) + set_target_properties (zlib PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a + INTERFACE_INCLUDE_DIRECTORIES + ${SOURCE_DIR}/include) + add_dependencies(zlib zlib_src) + file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR) + endif() + find_library(krb5 NAMES krb5-dev libkrb5-dev) + if(NOT krb5) + message("krb5 not found. will build") + add_library(krb5 STATIC IMPORTED GLOBAL) + ExternalProject_Add(krb5_src + PREFIX ${nih_cache_path} + GIT_REPOSITORY https://github.com/krb5/krb5.git + GIT_TAG krb5-1.20 + UPDATE_COMMAND "" + CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared + BUILD_IN_SOURCE 1 + BUILD_COMMAND make + INSTALL_COMMAND "" + BUILD_BYPRODUCTS /lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a + ) + message(${ep_lib_prefix}/krb5.a) + message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a) + ExternalProject_Get_Property (krb5_src SOURCE_DIR) + ExternalProject_Get_Property (krb5_src BINARY_DIR) + set (krb5_src_SOURCE_DIR "${SOURCE_DIR}") + file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include) + set_target_properties (krb5 PROPERTIES + IMPORTED_LOCATION + ${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a + INTERFACE_INCLUDE_DIRECTORIES + ${SOURCE_DIR}/include) + add_dependencies(krb5 krb5_src) + file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR) + endif() + + + find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64) + + + if(NOT libuv1) + message("libuv1 not found, will build") + add_library(libuv1 STATIC IMPORTED GLOBAL) + ExternalProject_Add(libuv_src + PREFIX ${nih_cache_path} + GIT_REPOSITORY https://github.com/libuv/libuv.git + GIT_TAG v1.44.1 + INSTALL_COMMAND "" + BUILD_BYPRODUCTS /${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a + ) + + ExternalProject_Get_Property (libuv_src SOURCE_DIR) + ExternalProject_Get_Property (libuv_src BINARY_DIR) + set (libuv_src_SOURCE_DIR "${SOURCE_DIR}") + file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include) + + set_target_properties (libuv1 PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a + INTERFACE_INCLUDE_DIRECTORIES + ${SOURCE_DIR}/include) + add_dependencies(libuv1 libuv_src) + + file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR) + endif() + add_library (cassandra STATIC IMPORTED GLOBAL) + ExternalProject_Add(cassandra_src + PREFIX ${nih_cache_path} + GIT_REPOSITORY https://github.com/datastax/cpp-driver.git + GIT_TAG 2.16.2 + CMAKE_ARGS + -DLIBUV_ROOT_DIR=${BINARY_DIR} + -DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include + -DCASS_BUILD_STATIC=ON + -DCASS_BUILD_SHARED=OFF + INSTALL_COMMAND "" + BUILD_BYPRODUCTS /${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a + ) + + ExternalProject_Get_Property (cassandra_src SOURCE_DIR) + ExternalProject_Get_Property (cassandra_src BINARY_DIR) + set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}") + file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include) + + set_target_properties (cassandra PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a + INTERFACE_INCLUDE_DIRECTORIES + ${SOURCE_DIR}/include) + message("cass dirs") + message(${BINARY_DIR}) + message(${SOURCE_DIR}) + message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a) + add_dependencies(cassandra cassandra_src) + + if(NOT libuv1) + ExternalProject_Add_StepDependencies(cassandra_src build libuv1) + target_link_libraries(cassandra INTERFACE libuv1) + else() + target_link_libraries(cassandra INTERFACE ${libuv1}) + endif() + if(NOT krb5) + + ExternalProject_Add_StepDependencies(cassandra_src build krb5) + target_link_libraries(cassandra INTERFACE krb5) + else() + target_link_libraries(cassandra INTERFACE ${krb5}) + endif() + + if(NOT zlib) + ExternalProject_Add_StepDependencies(cassandra_src build zlib) + target_link_libraries(cassandra INTERFACE zlib) + else() + target_link_libraries(cassandra INTERFACE ${zlib}) + endif() + set(OPENSSL_USE_STATIC_LIBS TRUE) + find_package(OpenSSL REQUIRED) + target_link_libraries(cassandra INTERFACE OpenSSL::SSL) + + file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR) + target_link_libraries(clio PUBLIC cassandra) +else() + message("Found system installed cassandra cpp driver") + message(${cassandra}) + find_path(cassandra_includes NAMES cassandra.h REQUIRED) + message(${cassandra_includes}) + get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH) + get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY) + target_link_libraries (clio PUBLIC ${cassandra}) + target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR}) +endif() diff --git a/CMake/deps/clio.cmake b/CMake/deps/clio.cmake new file mode 100644 index 00000000..9dd11c09 --- /dev/null +++ b/CMake/deps/clio.cmake @@ -0,0 +1,48 @@ +set(CLIO_REPO "https://github.com/XRPLF/clio.git") +set(CLIO_BRANCH "1.0.4") + +add_library(clio STATIC IMPORTED GLOBAL) +add_library(xrpl_core STATIC IMPORTED GLOBAL) + +ExternalProject_Add(clio_src + GIT_REPOSITORY "${CLIO_REPO}" + GIT_TAG "${CLIO_BRANCH}" + GIT_SHALLOW ON + INSTALL_COMMAND "" + CMAKE_ARGS + -DBUILD_TESTS=OFF + -DPACKAGING=OFF + ) + +ExternalProject_Get_Property(clio_src SOURCE_DIR) +ExternalProject_Get_Property(clio_src BINARY_DIR) + +file(MAKE_DIRECTORY ${SOURCE_DIR}/src) +set_target_properties(clio PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}clio_static.a + INTERFACE_INCLUDE_DIRECTORIES + ${SOURCE_DIR}/src + ) + +file(MAKE_DIRECTORY ${BINARY_DIR}/_deps/rippled-build) +file(MAKE_DIRECTORY ${BINARY_DIR}/_deps/rippled-src/src) +set_target_properties(xrpl_core PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/_deps/rippled-build/${CMAKE_STATIC_LIBRARY_PREFIX}xrpl_core.a + INTERFACE_INCLUDE_DIRECTORIES + ${BINARY_DIR}/_deps/rippled-src/src + ) + +add_dependencies(clio clio_src) +add_dependencies(xrpl_core clio_src) + +add_library(date STATIC IMPORTED GLOBAL) +file(MAKE_DIRECTORY + ${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include) +set_target_properties(date PROPERTIES + IMPORTED_LOCATION + ${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include/date/date.h + INTERFACE_INCLUDE_DIRECTORIES + ${BINARY_DIR}/unix_makefiles/AppleClang_14.0.0.14000029/Release/hh_date_src-src/include + ) diff --git a/CMake/deps/gtest.cmake b/CMake/deps/gtest.cmake new file mode 100644 index 00000000..b5760740 --- /dev/null +++ b/CMake/deps/gtest.cmake @@ -0,0 +1,20 @@ +FetchContent_Declare( + googletest + URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip +) + +FetchContent_GetProperties(googletest) + +if(NOT googletest_POPULATED) + FetchContent_Populate(googletest) + add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL) +endif() + +target_link_libraries(clio_tests PUBLIC clio gmock_main) +target_include_directories(clio_tests PRIVATE unittests) + +enable_testing() + +include(GoogleTest) + +gtest_discover_tests(clio_tests) diff --git a/CMake/deps/libfmt.cmake b/CMake/deps/libfmt.cmake new file mode 100644 index 00000000..0449bf4a --- /dev/null +++ b/CMake/deps/libfmt.cmake @@ -0,0 +1,14 @@ +FetchContent_Declare( + libfmt + URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip +) + +FetchContent_GetProperties(libfmt) + +if(NOT libfmt_POPULATED) + FetchContent_Populate(libfmt) + add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL) +endif() + +target_link_libraries(clio PUBLIC fmt) + diff --git a/CMake/deps/rippled.cmake b/CMake/deps/rippled.cmake new file mode 100644 index 00000000..ef91eb7f --- /dev/null +++ b/CMake/deps/rippled.cmake @@ -0,0 +1,20 @@ +set(RIPPLED_REPO "https://github.com/ripple/rippled.git") +set(RIPPLED_BRANCH "1.9.2") +set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "") +set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch) +message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}") +FetchContent_Declare(rippled + GIT_REPOSITORY "${RIPPLED_REPO}" + GIT_TAG "${RIPPLED_BRANCH}" + GIT_SHALLOW ON + PATCH_COMMAND "${patch_command}" +) + +FetchContent_GetProperties(rippled) +if(NOT rippled_POPULATED) + FetchContent_Populate(rippled) + add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL) +endif() + +target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs) +target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed? diff --git a/CMake/install/clio.service.in b/CMake/install/clio.service.in new file mode 100644 index 00000000..1ecdddb6 --- /dev/null +++ b/CMake/install/clio.service.in @@ -0,0 +1,17 @@ +[Unit] +Description=Clio XRPL API server +Documentation=https://github.com/XRPLF/clio.git + +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json +Restart=on-failure +User=clio +Group=clio +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/CMake/install/install.cmake b/CMake/install/install.cmake new file mode 100644 index 00000000..f4f8a5b1 --- /dev/null +++ b/CMake/install/install.cmake @@ -0,0 +1,16 @@ +set(CLIO_INSTALL_DIR "/opt/clio") +set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR}) + +install(TARGETS clio_server DESTINATION bin) +# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests? + +#install(FILES example-config.json DESTINATION etc RENAME config.json) +file(READ example-config.json config) +string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}") +file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}") +install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json) + +configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service") + +install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system) + diff --git a/CMake/settings.cmake b/CMake/settings.cmake new file mode 100644 index 00000000..255d0526 --- /dev/null +++ b/CMake/settings.cmake @@ -0,0 +1,6 @@ +target_compile_options(clio + PUBLIC -Wall + -Werror + -Wno-narrowing + -Wno-deprecated-declarations + -Wno-dangling-else) diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000..fc978625 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,98 @@ +cmake_minimum_required(VERSION 3.16.3) + +project(clio_migrator) + +if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11) + message(FATAL_ERROR "GCC 11+ required for building clio_migrator") +endif() + +option(VERBOSE "Verbose build" TRUE) +if(VERBOSE) + set(CMAKE_VERBOSE_MAKEFILE TRUE) + set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()") +endif() + +add_library(clio) +target_compile_features(clio PUBLIC cxx_std_20) +target_include_directories(clio PUBLIC src) + +include(FetchContent) +include(ExternalProject) +include(CMake/settings.cmake) +include(CMake/ClioVersion.cmake) +include(CMake/deps/rippled.cmake) +include(CMake/deps/libfmt.cmake) +include(CMake/deps/Boost.cmake) +include(CMake/deps/cassandra.cmake) +include(CMake/deps/SourceLocation.cmake) + +target_sources(clio PRIVATE + ## Main + src/main/impl/Build.cpp + ## Backend + src/backend/BackendInterface.cpp + src/backend/CassandraBackend.cpp + src/backend/SimpleCache.cpp + ## ETL + src/etl/ETLSource.cpp + src/etl/ProbingETLSource.cpp + src/etl/NFTHelpers.cpp + src/etl/ReportingETL.cpp + ## Subscriptions + src/subscriptions/SubscriptionManager.cpp + ## RPC + src/rpc/Errors.cpp + src/rpc/RPC.cpp + src/rpc/RPCHelpers.cpp + src/rpc/Counters.cpp + src/rpc/WorkQueue.cpp + ## NextGen RPC + src/rpc/common/Specs.cpp + src/rpc/common/Validators.cpp + ## NextGen RPC handler + src/rpc/ngHandlers/AccountChannels.cpp + src/rpc/ngHandlers/AccountCurrencies.cpp + src/rpc/ngHandlers/Tx.cpp + src/rpc/ngHandlers/GatewayBalances.cpp + src/rpc/ngHandlers/LedgerEntry.cpp + ## RPC Methods + # Account + src/rpc/handlers/AccountChannels.cpp + src/rpc/handlers/AccountCurrencies.cpp + src/rpc/handlers/AccountInfo.cpp + src/rpc/handlers/AccountLines.cpp + src/rpc/handlers/AccountOffers.cpp + src/rpc/handlers/AccountObjects.cpp + src/rpc/handlers/GatewayBalances.cpp + src/rpc/handlers/NoRippleCheck.cpp + # NFT + src/rpc/handlers/NFTHistory.cpp + src/rpc/handlers/NFTInfo.cpp + src/rpc/handlers/NFTOffers.cpp + # Ledger + src/rpc/handlers/Ledger.cpp + src/rpc/handlers/LedgerData.cpp + src/rpc/handlers/LedgerEntry.cpp + src/rpc/handlers/LedgerRange.cpp + # Transaction + src/rpc/handlers/Tx.cpp + src/rpc/handlers/TransactionEntry.cpp + src/rpc/handlers/AccountTx.cpp + # Dex + src/rpc/handlers/BookChanges.cpp + src/rpc/handlers/BookOffers.cpp + # Payment Channel + src/rpc/handlers/ChannelAuthorize.cpp + src/rpc/handlers/ChannelVerify.cpp + # Subscribe + src/rpc/handlers/Subscribe.cpp + # Server + src/rpc/handlers/ServerInfo.cpp + # Utilities + src/rpc/handlers/Random.cpp + src/config/Config.cpp + src/log/Logger.cpp + src/util/Taggable.cpp) + +add_executable(clio_migrator src/main/main.cpp) +target_link_libraries(clio_migrator PUBLIC clio) diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 00000000..1da44b70 --- /dev/null +++ b/Doxyfile @@ -0,0 +1,3 @@ +PROJECT_NAME = "Clio" +INPUT = src +RECURSIVE = YES \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..0d077ef5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +ISC License + +Copyright (c) 2023, XRPL Foundation + +Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..52b4e559 --- /dev/null +++ b/README.md @@ -0,0 +1,43 @@ +# CLIO MIGRATOR (ONE OFF!) + +This tool is a (really) hacky way of migrating some data from +[clio](https://github.com/XRPLF/clio) due to the [specific pull request +313](https://github.com/XRPLF/clio/pull/313) in that repo. + +Specifically, it is meant to migrate NFT data such that: + +* The new `nf_token_uris` table is populated with all URIs for all NFTs known +* The new `issuer_nf_tokens_v2` table is populated with all NFTs known +* The old `issuer_nf_tokens` table is dropped. This table was never used prior + to the above-referenced PR, so it is very safe to drop. + +This tool should be used as follows, with regard to the above update: + +1) Stop serving requests from your clio +2) Stop your clio and upgrade it to the version after the after PR +3) Start your clio +4) Now, your clio is writing new data correctly. This tool will update your +old data, while your new clio is running. +5) Run this tool, using the _exact_ same config as what you are using for your +production clio. +6) Once this tool terminates successfully, you can resume serving requests +from your clio. + + +## Compiling + +Git-clone this project to your server. Then from the top-level directory: +``` +mkdir build +cd build +cmake .. +cmake --build . -j 4 +``` + +Once this completes, the migrator will be compiled as `clio_migrator`. Then +you should copy your existing clio config somewhere and: +``` +./clio_migrator +``` + +This migration will take a few hours to complete. diff --git a/cloud-example-config.json b/cloud-example-config.json new file mode 100644 index 00000000..e985d764 --- /dev/null +++ b/cloud-example-config.json @@ -0,0 +1,38 @@ +{ + "database": + { + "type":"cassandra", + "cassandra": + { + "secure_connect_bundle":"[path/to/zip. ignore if using contact_points]", + "contact_points":"[ip. ignore if using secure_connect_bundle]", + "port":"[port. ignore if using_secure_connect_bundle]", + "keyspace":"clio", + "username":"[username, if any]", + "password":"[password, if any]", + "max_requests_outstanding":25000, + "threads":8 + } + }, + "etl_sources": + [ + { + "ip":"[rippled ip]", + "ws_port":"6006", + "grpc_port":"50051" + } + ], + "dos_guard": + { + "whitelist":["127.0.0.1"] + }, + "server":{ + "ip":"0.0.0.0", + "port":8080 + }, + "log_level":"debug", + "log_file":"./clio.log", + "online_delete":0, + "extractor_threads":8, + "read_only":false +} diff --git a/docker/centos/Dockerfile b/docker/centos/Dockerfile new file mode 100644 index 00000000..763a336e --- /dev/null +++ b/docker/centos/Dockerfile @@ -0,0 +1,49 @@ +# FROM centos:7 as deps +FROM centos:7 as build + +ENV CLIO_DIR=/opt/clio/ +# ENV OPENSSL_DIR=/opt/openssl + +RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl +RUN yum install -y devtoolset-11 +ENV version=3.16 +ENV build=3 +# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz +COPY docker/shared/install_cmake.sh /install_cmake.sh +RUN /install_cmake.sh 3.16.3 /usr/local +RUN source /opt/rh/devtoolset-11/enable +WORKDIR /tmp +# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR +COPY docker/centos/build_git_centos7.sh build_git_centos7.sh + +RUN ./build_git_centos7.sh +RUN git clone https://github.com/openssl/openssl +WORKDIR /tmp/openssl +RUN git checkout OpenSSL_1_1_1q +#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic +RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \ + make -j $(nproc) && \ + make install_sw +WORKDIR /tmp +# FROM centos:7 as build + +RUN git clone https://github.com/xrplf/clio.git +COPY docker/shared/build_boost.sh build_boost.sh +ENV OPENSSL_ROOT=/opt/local/openssl +ENV BOOST_ROOT=/boost +RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0 +RUN yum install -y bison flex +RUN yum install -y rpmdevtools rpmlint +RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \ + cmake -B build -DBUILD_TESTS=1 && \ + cmake --build build --parallel $(nproc) +RUN mkdir output +RUN strip clio/build/clio_server && strip clio/build/clio_tests +RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/ +RUN cp clio/example-config.json output/example-config.json + +FROM centos:7 +COPY --from=build /tmp/output /clio +RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json + +CMD ["/clio/clio_server", "/opt/clio/etc/config.json"] diff --git a/docker/centos/build_git_centos7.sh b/docker/centos/build_git_centos7.sh new file mode 100755 index 00000000..0db195f3 --- /dev/null +++ b/docker/centos/build_git_centos7.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -ex +GIT_VERSION="2.37.1" +curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz +tar zxvf git-${GIT_VERSION}.tar.gz +cd git-${GIT_VERSION} + +yum install -y centos-release-scl epel-release +yum update -y +yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel + +source /opt/rh/devtoolset-11/enable +make configure +./configure +make git -j$(nproc) +make install git +git --version | cut -d ' ' -f3 diff --git a/docker/centos/install_cmake.sh b/docker/centos/install_cmake.sh new file mode 100755 index 00000000..6a3cba4f --- /dev/null +++ b/docker/centos/install_cmake.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +CMAKE_VERSION=${1:-"3.16.3"} +cd /tmp +URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz" +curl -OJLs $URL +tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz +mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/ +ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake diff --git a/docker/clio_docker/centos/build_boost.sh b/docker/clio_docker/centos/build_boost.sh new file mode 100755 index 00000000..a7253128 --- /dev/null +++ b/docker/clio_docker/centos/build_boost.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exu + +#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static +# it's either those or link=static that halves the failures. probably link=static +BOOST_VERSION=$1 +BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _) +echo "BOOST_VERSION: ${BOOST_VERSION}" +echo "BOOST_VERSION_: ${BOOST_VERSION_}" +curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz" +tar zxf "boost_${BOOST_VERSION_}.tar.gz" +cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc) +mkdir -p /boost && mv boost /boost && mv stage /boost diff --git a/docker/clio_docker/centos/build_git_centos7.sh b/docker/clio_docker/centos/build_git_centos7.sh new file mode 100755 index 00000000..0db195f3 --- /dev/null +++ b/docker/clio_docker/centos/build_git_centos7.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -ex +GIT_VERSION="2.37.1" +curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz +tar zxvf git-${GIT_VERSION}.tar.gz +cd git-${GIT_VERSION} + +yum install -y centos-release-scl epel-release +yum update -y +yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel + +source /opt/rh/devtoolset-11/enable +make configure +./configure +make git -j$(nproc) +make install git +git --version | cut -d ' ' -f3 diff --git a/docker/clio_docker/centos/dockerfile b/docker/clio_docker/centos/dockerfile new file mode 100644 index 00000000..012ce09e --- /dev/null +++ b/docker/clio_docker/centos/dockerfile @@ -0,0 +1,34 @@ +FROM centos:7 + +ENV CLIO_DIR=/opt/clio/ +# ENV OPENSSL_DIR=/opt/openssl + +RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl +RUN yum install -y devtoolset-11 +ENV version=3.16 +ENV build=3 +# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz +COPY install_cmake.sh /install_cmake.sh +RUN /install_cmake.sh 3.16.3 /usr/local +RUN source /opt/rh/devtoolset-11/enable +WORKDIR /tmp +# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR +COPY build_git_centos7.sh build_git_centos7.sh + +RUN ./build_git_centos7.sh +RUN git clone https://github.com/openssl/openssl +WORKDIR /tmp/openssl +RUN git checkout OpenSSL_1_1_1q +#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic +RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \ + make -j $(nproc) && \ + make install_sw +WORKDIR /tmp +RUN git clone https://github.com/xrplf/clio.git +COPY build_boost.sh build_boost.sh +ENV OPENSSL_ROOT=/opt/local/openssl +ENV BOOST_ROOT=/boost +RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0 +RUN yum install -y bison flex +RUN source /opt/rh/devtoolset-11/enable && \ + cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc) diff --git a/docker/clio_docker/centos/install_cmake.sh b/docker/clio_docker/centos/install_cmake.sh new file mode 100755 index 00000000..6a3cba4f --- /dev/null +++ b/docker/clio_docker/centos/install_cmake.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +CMAKE_VERSION=${1:-"3.16.3"} +cd /tmp +URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz" +curl -OJLs $URL +tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz +mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/ +ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake diff --git a/docker/shared/build_boost.sh b/docker/shared/build_boost.sh new file mode 100755 index 00000000..a7253128 --- /dev/null +++ b/docker/shared/build_boost.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exu + +#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static +# it's either those or link=static that halves the failures. probably link=static +BOOST_VERSION=$1 +BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _) +echo "BOOST_VERSION: ${BOOST_VERSION}" +echo "BOOST_VERSION_: ${BOOST_VERSION_}" +curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz" +tar zxf "boost_${BOOST_VERSION_}.tar.gz" +cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc) +mkdir -p /boost && mv boost /boost && mv stage /boost diff --git a/docker/shared/install_cmake.sh b/docker/shared/install_cmake.sh new file mode 100755 index 00000000..6a3cba4f --- /dev/null +++ b/docker/shared/install_cmake.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +CMAKE_VERSION=${1:-"3.16.3"} +cd /tmp +URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz" +curl -OJLs $URL +tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz +mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/ +ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake diff --git a/docker/shared/install_openssl.sh b/docker/shared/install_openssl.sh new file mode 100755 index 00000000..d9628ae7 --- /dev/null +++ b/docker/shared/install_openssl.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +set -e diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile new file mode 100644 index 00000000..9078429f --- /dev/null +++ b/docker/ubuntu/Dockerfile @@ -0,0 +1,24 @@ +FROM ubuntu:20.04 AS boost + +RUN apt-get update && apt-get install -y build-essential +ARG BOOST_VERSION_=1_75_0 +ARG BOOST_VERSION=1.75.0 +COPY docker/shared/build_boost.sh . +RUN apt install -y curl +RUN ./build_boost.sh ${BOOST_VERSION} +ENV BOOST_ROOT=/boost + +FROM ubuntu:20.04 AS build +ENV BOOST_ROOT=/boost +COPY --from=boost /boost /boost +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release +RUN apt install -y gpg-agent +RUN wget https://apt.llvm.org/llvm.sh +RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15 +# COPY . /clio +## Install cmake +ARG CMAKE_VERSION=3.16.3 +COPY docker/shared/install_cmake.sh . +RUN ./install_cmake.sh ${CMAKE_VERSION} +ENV PATH="/opt/local/cmake/bin:$PATH" diff --git a/example-config.json b/example-config.json new file mode 100644 index 00000000..49f83b0e --- /dev/null +++ b/example-config.json @@ -0,0 +1,93 @@ +{ + "database": { + "type": "cassandra", + "cassandra": { + "contact_points": "127.0.0.1", + "port": 9042, + "keyspace": "clio", + "replication_factor": 1, + "table_prefix": "", + "max_write_requests_outstanding": 25000, + "max_read_requests_outstanding": 30000, + "threads": 8 + } + }, + "etl_sources": [ + { + "ip": "127.0.0.1", + "ws_port": "6006", + "grpc_port": "50051" + } + ], + "dos_guard": { + "whitelist": [ + "127.0.0.1" + ], // comma-separated list of ips to exclude from rate limiting + /* The below values are the default values and are only specified here + * for documentation purposes. The rate limiter currently limits + * connections and bandwidth per ip. The rate limiter looks at the raw + * ip of a client connection, and so requests routed through a load + * balancer will all have the same ip and be treated as a single client + */ + "max_fetches": 1000000, // max bytes per ip per sweep interval + "max_connections": 20, // max connections per ip + "max_requests": 20, // max connections per ip + "sweep_interval": 1 // time in seconds before resetting bytes per ip count + }, + "cache": { + "peers": [ + { + "ip": "127.0.0.1", + "port": 51234 + } + ] + }, + "server": { + "ip": "0.0.0.0", + "port": 51233, + /* Max number of requests to queue up before rejecting further requests. + * Defaults to 0, which disables the limit + */ + "max_queue_size": 500 + }, + "log_channels": [ + { + "channel": "Backend", + "log_level": "fatal" + }, + { + "channel": "WebServer", + "log_level": "info" + }, + { + "channel": "Subscriptions", + "log_level": "info" + }, + { + "channel": "RPC", + "log_level": "error" + }, + { + "channel": "ETL", + "log_level": "debug" + }, + { + "channel": "Performance", + "log_level": "trace" + } + ], + "log_level": "info", + "log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format + "log_to_console": true, + "log_directory": "./clio_log", + "log_rotation_size": 2048, + "log_directory_max_size": 51200, + "log_rotation_hour_interval": 12, + "log_tag_style": "uint", + "extractor_threads": 8, + "read_only": false, + //"start_sequence": [integer] the ledger index to start from, + //"finish_sequence": [integer] the ledger index to finish at, + //"ssl_cert_file" : "/full/path/to/cert.file", + //"ssl_key_file" : "/full/path/to/key.file" +} diff --git a/metrics.py b/metrics.py new file mode 100644 index 00000000..4fa96bfd --- /dev/null +++ b/metrics.py @@ -0,0 +1,181 @@ +#!/usr/bin/python3 +import argparse + +from datetime import datetime + +def getTime(line): + bracketOpen = line.find("[") + bracketClose = line.find("]") + timestampSub = line[bracketOpen+1:bracketClose] + timestamp = datetime.strptime(timestampSub, '%Y-%m-%d %H:%M:%S.%f') + return timestamp.timestamp() + +def parseAccountTx(filename): + + + with open(filename) as f: + totalProcTime = 0.0 + totalTxnTime = 0.0 + numCalls = 0 + for line in f: + if "executed stored_procedure" in line: + idx = line.find("in ") + idx = idx + 3 + idx2 = line.find("num") + procTime = float(line[idx:idx2]) + totalProcTime += procTime + if "fetchTransactions fetched" in line: + idx = line.find("took ") + idx = idx + 5 + txnTime = float(line[idx:]) + totalTxnTime += txnTime + numCalls = numCalls + 1 + print(totalProcTime) + print(totalProcTime/numCalls) + print(totalTxnTime) + print(totalTxnTime/numCalls) + + + + + +def parseLogs(filename, interval): + + with open(filename) as f: + + totalTime = 0 + totalTxns = 0 + totalObjs = 0 + totalLoadTime = 0 + + + start = 0 + end = 0 + totalLedgers = 0 + + intervalTime = 0 + intervalTxns = 0 + intervalObjs = 0 + intervalLoadTime = 0 + + intervalStart = 0 + intervalEnd = 0 + intervalLedgers = 0 + ledgersPerSecond = 0 + + print("ledgers, transactions, objects, loadTime, loadTime/ledger, ledgers/sec, txns/sec, objs/sec") + for line in f: + if "Load phase" in line: + sequenceIdx = line.find("Sequence : ") + hashIdx = line.find(" Hash :") + sequence = line[sequenceIdx + len("Sequence : "):hashIdx] + txnCountSubstr = "txn count = " + objCountSubstr = ". object count = " + loadTimeSubstr = ". load time = " + txnsSubstr = ". load txns per second = " + objsSubstr = ". load objs per second = " + txnCountIdx = line.find(txnCountSubstr) + objCountIdx = line.find(objCountSubstr) + loadTimeIdx = line.find(loadTimeSubstr) + txnsIdx = line.find(txnsSubstr) + objsIdx = line.find(objsSubstr) + txnCount = line[txnCountIdx + len(txnCountSubstr):objCountIdx] + objCount = line[objCountIdx + len(objCountSubstr):loadTimeIdx] + loadTime = line[loadTimeIdx + len(loadTimeSubstr):txnsIdx] + txnsPerSecond = line[txnsIdx + len(txnsSubstr):objsIdx] + objsPerSecond = line[objsIdx + len(objsSubstr):-1] + totalTime += float(loadTime); + totalTxns += float(txnCount) + totalObjs += float(objCount) + intervalTime += float(loadTime) + intervalTxns += float(txnCount) + intervalObjs += float(objCount) + + totalLoadTime += float(loadTime) + intervalLoadTime += float(loadTime) + + + if start == 0: + start = getTime(line) + + + prevEnd = end + end = getTime(line) + + if intervalStart == 0: + intervalStart = getTime(line) + + intervalEnd = getTime(line) + + totalLedgers+=1 + intervalLedgers+=1 + ledgersPerSecond = 0 + if end != start: + ledgersPerSecond = float(totalLedgers) / float((end - start)) + intervalLedgersPerSecond = 0 + if intervalEnd != intervalStart: + intervalLedgersPerSecond = float(intervalLedgers) / float((intervalEnd - intervalStart)) + + + + if int(sequence) % interval == 0: + + # print("Sequence = " + sequence + " : [time, txCount, objCount, txPerSec, objsPerSec]") + # print(loadTime + " , " + # + txnCount + " , " + # + objCount + " , " + # + txnsPerSecond + " , " + # + objsPerSecond) + # print("Interval Aggregate ( " + str(interval) + " ) [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]: ") + print(str(intervalLedgers) + " , " + + str(intervalTxns) + " , " + + str(intervalObjs) + " , " + + str(intervalLoadTime) + " , " + + str(intervalLoadTime/intervalLedgers) + " , " + + str(intervalLedgers/intervalLoadTime) + " , " + + str(intervalTxns/intervalLoadTime) + " , " + + str(intervalObjs/intervalLoadTime)) + # print("Total Aggregate: [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]") + # print(str(totalLedgers) + " , " + # + str(totalTxns) + " , " + # + str(totalObjs) + " , " + # + str(end-start) + " , " + # + str(ledgersPerSecond) + " , " + # + str(totalLoadTime/totalLedgers) + " , " + # + str(totalTxns/totalTime) + " , " + # + str(totalObjs/totalTime)) + if int(sequence) % interval == 0: + intervalTime = 0 + intervalTxns = 0 + intervalObjs = 0 + intervalStart = 0 + intervalEnd = 0 + intervalLedgers = 0 + intervalLoadTime = 0 + print("Total Aggregate: [ledgers, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]") + print(totalLedgers) + print(totalLoadTime) + print(str(totalLedgers) + " : " + + str(end-start) + " : " + + str(ledgersPerSecond) + " : " + + str(totalLoadTime/totalLedgers) + " : " + + str(totalTxns/totalTime) + " : " + + str(totalObjs/totalTime)) + + + + +parser = argparse.ArgumentParser(description='parses logs') +parser.add_argument("--filename") +parser.add_argument("--interval",default=100000) +parser.add_argument("--account_tx",default=False) + +args = parser.parse_args() + +def run(args): + if args.account_tx: + parseAccountTx(args.filename) + else: + parseLogs(args.filename, int(args.interval)) + +run(args) diff --git a/src/backend/BackendFactory.h b/src/backend/BackendFactory.h new file mode 100644 index 00000000..4e69e3b6 --- /dev/null +++ b/src/backend/BackendFactory.h @@ -0,0 +1,66 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +#include + +namespace Backend { +std::shared_ptr +make_Backend(boost::asio::io_context& ioc, clio::Config const& config) +{ + static clio::Logger log{"Backend"}; + log.info() << "Constructing BackendInterface"; + + auto readOnly = config.valueOr("read_only", false); + auto type = config.value("database.type"); + std::shared_ptr backend = nullptr; + + if (boost::iequals(type, "cassandra")) + { + auto cfg = config.section("database." + type); + auto ttl = config.valueOr("online_delete", 0) * 4; + backend = std::make_shared(ioc, cfg, ttl); + } + + if (!backend) + throw std::runtime_error("Invalid database type"); + + backend->open(readOnly); +<<<<<<< HEAD + backend->checkFlagLedgers(); +======= + auto rng = backend->hardFetchLedgerRangeNoThrow(); + if (rng) + { + backend->updateRange(rng->minSequence); + backend->updateRange(rng->maxSequence); + } +>>>>>>> c7e31af... Add state data cache and successor table. Remove keys table + + log.info() << "Constructed BackendInterface Successfully"; + + return backend; +} +} // namespace Backend diff --git a/src/backend/BackendIndexer.cpp b/src/backend/BackendIndexer.cpp new file mode 100644 index 00000000..f3ddf8c5 --- /dev/null +++ b/src/backend/BackendIndexer.cpp @@ -0,0 +1,241 @@ +#include +#include + +namespace Backend { +BackendIndexer::BackendIndexer(boost::json::object const& config) + : strand_(ioc_) +{ + if (config.contains("indexer_key_shift")) + keyShift_ = config.at("indexer_key_shift").as_int64(); + work_.emplace(ioc_); + ioThread_ = std::thread{[this]() { ioc_.run(); }}; +}; +BackendIndexer::~BackendIndexer() +{ + work_.reset(); + ioThread_.join(); +} + +void +BackendIndexer::addKey(ripple::uint256&& key) +{ + keys.insert(std::move(key)); +} + +void +BackendIndexer::doKeysRepair( + BackendInterface const& backend, + std::optional sequence) +{ + auto rng = backend.fetchLedgerRangeNoThrow(); + + if (!rng) + return; + + if (!sequence) + sequence = rng->maxSequence; + + if (sequence < rng->minSequence) + sequence = rng->minSequence; + + BOOST_LOG_TRIVIAL(info) + << __func__ << " sequence = " << std::to_string(*sequence); + + std::optional cursor; + while (true) + { + try + { + if (backend.isLedgerIndexed(*sequence)) + { + BOOST_LOG_TRIVIAL(info) + << __func__ << " - " << std::to_string(*sequence) + << " flag ledger already written. returning"; + return; + } + else + { + BOOST_LOG_TRIVIAL(info) + << __func__ << " - " << std::to_string(*sequence) + << " flag ledger not written. recursing.."; + uint32_t lower = (*sequence - 1) >> keyShift_ << keyShift_; + doKeysRepair(backend, lower); + BOOST_LOG_TRIVIAL(info) + << __func__ << " - " + << " sequence = " << std::to_string(*sequence) + << " lower = " << std::to_string(lower) + << " finished recursing. submitting repair "; + writeKeyFlagLedger(lower, backend); + return; + } + } + catch (DatabaseTimeout const& e) + { + BOOST_LOG_TRIVIAL(warning) + << __func__ << " Database timeout fetching keys"; + std::this_thread::sleep_for(std::chrono::seconds(2)); + } + } + BOOST_LOG_TRIVIAL(info) + << __func__ << " finished. sequence = " << std::to_string(*sequence); +} +void +BackendIndexer::doKeysRepairAsync( + BackendInterface const& backend, + std::optional sequence) +{ + boost::asio::post(strand_, [this, sequence, &backend]() { + doKeysRepair(backend, sequence); + }); +} +void +BackendIndexer::writeKeyFlagLedger( + uint32_t ledgerSequence, + BackendInterface const& backend) +{ + auto nextFlag = getKeyIndexOfSeq(ledgerSequence + 1); + uint32_t lower = ledgerSequence >> keyShift_ << keyShift_; + BOOST_LOG_TRIVIAL(info) + << "writeKeyFlagLedger - " + << "next flag = " << std::to_string(nextFlag.keyIndex) + << "lower = " << std::to_string(lower) + << "ledgerSequence = " << std::to_string(ledgerSequence) << " starting"; + ripple::uint256 zero = {}; + std::optional cursor; + size_t numKeys = 0; + auto begin = std::chrono::system_clock::now(); + while (true) + { + try + { + { + BOOST_LOG_TRIVIAL(info) + << "writeKeyFlagLedger - checking for complete..."; + if (backend.isLedgerIndexed(nextFlag.keyIndex)) + { + BOOST_LOG_TRIVIAL(warning) + << "writeKeyFlagLedger - " + << "flag ledger already written. flag = " + << std::to_string(nextFlag.keyIndex) + << " , ledger sequence = " + << std::to_string(ledgerSequence); + return; + } + BOOST_LOG_TRIVIAL(info) + << "writeKeyFlagLedger - is not complete"; + } + indexing_ = nextFlag.keyIndex; + auto start = std::chrono::system_clock::now(); + auto [objects, curCursor, warning] = + backend.fetchLedgerPage(cursor, lower, 2048); + auto mid = std::chrono::system_clock::now(); + // no cursor means this is the first page + if (!cursor) + { + if (warning) + { + BOOST_LOG_TRIVIAL(error) + << "writeKeyFlagLedger - " + << " prev flag ledger not written " + << std::to_string(nextFlag.keyIndex) << " : " + << std::to_string(ledgerSequence); + assert(false); + throw std::runtime_error("Missing prev flag"); + } + } + + cursor = curCursor; + std::unordered_set keys; + for (auto& obj : objects) + { + keys.insert(obj.key); + } + backend.writeKeys(keys, nextFlag, true); + auto end = std::chrono::system_clock::now(); + BOOST_LOG_TRIVIAL(debug) + << "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex) + << " fetched a page " + << " cursor = " + << (cursor.has_value() ? ripple::strHex(*cursor) + : std::string{}) + << " num keys = " << std::to_string(numKeys) << " fetch time = " + << std::chrono::duration_cast( + mid - start) + .count() + << " write time = " + << std::chrono::duration_cast( + end - mid) + .count(); + if (!cursor) + break; + } + catch (DatabaseTimeout const& e) + { + BOOST_LOG_TRIVIAL(warning) + << __func__ << " Database timeout fetching keys"; + std::this_thread::sleep_for(std::chrono::seconds(2)); + } + } + backend.writeKeys({zero}, nextFlag, true); + auto end = std::chrono::system_clock::now(); + BOOST_LOG_TRIVIAL(info) + << "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex) + << " finished. " + << " num keys = " << std::to_string(numKeys) << " total time = " + << std::chrono::duration_cast(end - begin) + .count(); + indexing_ = 0; +} +void +BackendIndexer::writeKeyFlagLedgerAsync( + uint32_t ledgerSequence, + BackendInterface const& backend) +{ + BOOST_LOG_TRIVIAL(info) + << __func__ + << " starting. sequence = " << std::to_string(ledgerSequence); + + boost::asio::post(strand_, [this, ledgerSequence, &backend]() { + writeKeyFlagLedger(ledgerSequence, backend); + }); + BOOST_LOG_TRIVIAL(info) + << __func__ + << " finished. sequence = " << std::to_string(ledgerSequence); +} + +void +BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend) +{ + BOOST_LOG_TRIVIAL(debug) + << __func__ + << " starting. sequence = " << std::to_string(ledgerSequence); + auto keyIndex = getKeyIndexOfSeq(ledgerSequence); + if (isFirst_) + { + auto rng = backend.fetchLedgerRangeNoThrow(); + if (rng && rng->minSequence != ledgerSequence) + isFirst_ = false; + else + { + keyIndex = KeyIndex{ledgerSequence}; + } + } + + backend.writeKeys(keys, keyIndex); + if (isFirst_) + { + // write completion record + ripple::uint256 zero = {}; + backend.writeKeys({zero}, keyIndex); + // write next flag sychronously + keyIndex = getKeyIndexOfSeq(ledgerSequence + 1); + backend.writeKeys(keys, keyIndex); + backend.writeKeys({zero}, keyIndex); + } + isFirst_ = false; + keys = {}; + BOOST_LOG_TRIVIAL(debug) + << __func__ + << " finished. sequence = " << std::to_string(ledgerSequence); +} +} // namespace Backend diff --git a/src/backend/BackendInterface.cpp b/src/backend/BackendInterface.cpp new file mode 100644 index 00000000..aa8db2e3 --- /dev/null +++ b/src/backend/BackendInterface.cpp @@ -0,0 +1,346 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"Backend"}; +} // namespace + +namespace Backend { +bool +BackendInterface::finishWrites(std::uint32_t const ledgerSequence) +{ + auto commitRes = doFinishWrites(); + if (commitRes) + { + updateRange(ledgerSequence); + } + return commitRes; +} +void +BackendInterface::writeLedgerObject( + std::string&& key, + std::uint32_t const seq, + std::string&& blob) +{ + assert(key.size() == sizeof(ripple::uint256)); + doWriteLedgerObject(std::move(key), seq, std::move(blob)); +} + +std::optional +BackendInterface::hardFetchLedgerRangeNoThrow( + boost::asio::yield_context& yield) const +{ + gLog.trace() << "called"; + while (true) + { + try + { + return hardFetchLedgerRange(yield); + } + catch (DatabaseTimeout& t) + { + ; + } + } +} + +std::optional +BackendInterface::hardFetchLedgerRangeNoThrow() const +{ + gLog.trace() << "called"; + return retryOnTimeout([&]() { return hardFetchLedgerRange(); }); +} + +// *** state data methods +std::optional +BackendInterface::fetchLedgerObject( + ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const +{ + auto obj = cache_.get(key, sequence); + if (obj) + { + gLog.trace() << "Cache hit - " << ripple::strHex(key); + return *obj; + } + else + { + gLog.trace() << "Cache miss - " << ripple::strHex(key); + auto dbObj = doFetchLedgerObject(key, sequence, yield); + if (!dbObj) + gLog.trace() << "Missed cache and missed in db"; + else + gLog.trace() << "Missed cache but found in db"; + return dbObj; + } +} + +std::vector +BackendInterface::fetchLedgerObjects( + std::vector const& keys, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const +{ + std::vector results; + results.resize(keys.size()); + std::vector misses; + for (size_t i = 0; i < keys.size(); ++i) + { + auto obj = cache_.get(keys[i], sequence); + if (obj) + results[i] = *obj; + else + misses.push_back(keys[i]); + } + gLog.trace() << "Cache hits = " << keys.size() - misses.size() + << " - cache misses = " << misses.size(); + + if (misses.size()) + { + auto objs = doFetchLedgerObjects(misses, sequence, yield); + for (size_t i = 0, j = 0; i < results.size(); ++i) + { + if (results[i].size() == 0) + { + results[i] = objs[j]; + ++j; + } + } + } + + return results; +} +// Fetches the successor to key/index +std::optional +BackendInterface::fetchSuccessorKey( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + auto succ = cache_.getSuccessor(key, ledgerSequence); + if (succ) + gLog.trace() << "Cache hit - " << ripple::strHex(key); + else + gLog.trace() << "Cache miss - " << ripple::strHex(key); + return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield); +} + +std::optional +BackendInterface::fetchSuccessorObject( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + auto succ = fetchSuccessorKey(key, ledgerSequence, yield); + if (succ) + { + auto obj = fetchLedgerObject(*succ, ledgerSequence, yield); + if (!obj) + return {{*succ, {}}}; + + return {{*succ, *obj}}; + } + return {}; +} + +BookOffersPage +BackendInterface::fetchBookOffers( + ripple::uint256 const& book, + std::uint32_t const ledgerSequence, + std::uint32_t const limit, + std::optional const& cursor, + boost::asio::yield_context& yield) const +{ + // TODO try to speed this up. This can take a few seconds. The goal is + // to get it down to a few hundred milliseconds. + BookOffersPage page; + const ripple::uint256 bookEnd = ripple::getQualityNext(book); + ripple::uint256 uTipIndex = book; + std::vector keys; + auto getMillis = [](auto diff) { + return std::chrono::duration_cast(diff) + .count(); + }; + auto begin = std::chrono::system_clock::now(); + std::uint32_t numSucc = 0; + std::uint32_t numPages = 0; + long succMillis = 0; + long pageMillis = 0; + while (keys.size() < limit) + { + auto mid1 = std::chrono::system_clock::now(); + auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield); + auto mid2 = std::chrono::system_clock::now(); + numSucc++; + succMillis += getMillis(mid2 - mid1); + if (!offerDir || offerDir->key >= bookEnd) + { + gLog.trace() << "offerDir.has_value() " << offerDir.has_value() + << " breaking"; + break; + } + uTipIndex = offerDir->key; + while (keys.size() < limit) + { + ++numPages; + ripple::STLedgerEntry sle{ + ripple::SerialIter{ + offerDir->blob.data(), offerDir->blob.size()}, + offerDir->key}; + auto indexes = sle.getFieldV256(ripple::sfIndexes); + keys.insert(keys.end(), indexes.begin(), indexes.end()); + auto next = sle.getFieldU64(ripple::sfIndexNext); + if (!next) + { + gLog.trace() << "Next is empty. breaking"; + break; + } + auto nextKey = ripple::keylet::page(uTipIndex, next); + auto nextDir = + fetchLedgerObject(nextKey.key, ledgerSequence, yield); + assert(nextDir); + offerDir->blob = *nextDir; + offerDir->key = nextKey.key; + } + auto mid3 = std::chrono::system_clock::now(); + pageMillis += getMillis(mid3 - mid2); + } + auto mid = std::chrono::system_clock::now(); + auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); + for (size_t i = 0; i < keys.size() && i < limit; ++i) + { + gLog.trace() << "Key = " << ripple::strHex(keys[i]) + << " blob = " << ripple::strHex(objs[i]) + << " ledgerSequence = " << ledgerSequence; + assert(objs[i].size()); + page.offers.push_back({keys[i], objs[i]}); + } + auto end = std::chrono::system_clock::now(); + gLog.debug() << "Fetching " << std::to_string(keys.size()) + << " offers took " << std::to_string(getMillis(mid - begin)) + << " milliseconds. Fetching next dir took " + << std::to_string(succMillis) + << " milliseonds. Fetched next dir " << std::to_string(numSucc) + << " times" + << " Fetching next page of dir took " + << std::to_string(pageMillis) << " milliseconds" + << ". num pages = " << std::to_string(numPages) + << ". Fetching all objects took " + << std::to_string(getMillis(end - mid)) + << " milliseconds. total time = " + << std::to_string(getMillis(end - begin)) << " milliseconds" + << " book = " << ripple::strHex(book); + + return page; +} + +LedgerPage +BackendInterface::fetchLedgerPage( + std::optional const& cursor, + std::uint32_t const ledgerSequence, + std::uint32_t const limit, + bool outOfOrder, + boost::asio::yield_context& yield) const +{ + LedgerPage page; + + std::vector keys; + bool reachedEnd = false; + while (keys.size() < limit && !reachedEnd) + { + ripple::uint256 const& curCursor = keys.size() ? keys.back() + : cursor ? *cursor + : firstKey; + std::uint32_t const seq = + outOfOrder ? range->maxSequence : ledgerSequence; + auto succ = fetchSuccessorKey(curCursor, seq, yield); + if (!succ) + reachedEnd = true; + else + keys.push_back(std::move(*succ)); + } + + auto objects = fetchLedgerObjects(keys, ledgerSequence, yield); + for (size_t i = 0; i < objects.size(); ++i) + { + if (objects[i].size()) + page.objects.push_back({std::move(keys[i]), std::move(objects[i])}); + else if (!outOfOrder) + { + gLog.error() + << "Deleted or non-existent object in successor table. key = " + << ripple::strHex(keys[i]) << " - seq = " << ledgerSequence; + std::stringstream msg; + for (size_t j = 0; j < objects.size(); ++j) + { + msg << " - " << ripple::strHex(keys[j]); + } + gLog.error() << msg.str(); + } + } + if (keys.size() && !reachedEnd) + page.cursor = keys.back(); + + return page; +} + +std::optional +BackendInterface::fetchFees( + std::uint32_t const seq, + boost::asio::yield_context& yield) const +{ + ripple::Fees fees; + + auto key = ripple::keylet::fees().key; + auto bytes = fetchLedgerObject(key, seq, yield); + + if (!bytes) + { + gLog.error() << "Could not find fees"; + return {}; + } + + ripple::SerialIter it(bytes->data(), bytes->size()); + ripple::SLE sle{it, key}; + + if (sle.getFieldIndex(ripple::sfBaseFee) != -1) + fees.base = sle.getFieldU64(ripple::sfBaseFee); + + if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1) + fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits); + + if (sle.getFieldIndex(ripple::sfReserveBase) != -1) + fees.reserve = sle.getFieldU32(ripple::sfReserveBase); + + if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1) + fees.increment = sle.getFieldU32(ripple::sfReserveIncrement); + + return fees; +} + +} // namespace Backend diff --git a/src/backend/BackendInterface.h b/src/backend/BackendInterface.h new file mode 100644 index 00000000..2634650a --- /dev/null +++ b/src/backend/BackendInterface.h @@ -0,0 +1,663 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +namespace Backend { + +/** + * @brief Throws an error when database read time limit is exceeded. + * + * This class is throws an error when read time limit is exceeded but + * is also paired with a separate class to retry the connection. + */ +class DatabaseTimeout : public std::exception +{ +public: + const char* + what() const throw() override + { + return "Database read timed out. Please retry the request"; + } +}; + +/** + * @brief Separate class that reattempts connection after time limit. + * + * @tparam F Represents a class of handlers for Cassandra database. + * @param func Instance of Cassandra database handler class. + * @param waitMs Is the arbitrary time limit of 500ms. + * @return auto + */ +template +auto +retryOnTimeout(F func, size_t waitMs = 500) +{ + static clio::Logger log{"Backend"}; + + while (true) + { + try + { + return func(); + } + catch (DatabaseTimeout& t) + { + log.error() + << "Database request timed out. Sleeping and retrying ... "; + std::this_thread::sleep_for(std::chrono::milliseconds(waitMs)); + } + } +} + +/** + * @brief Passes in serialized handlers in an asynchronous fashion. + * + * Note that the synchronous auto passes handlers critical to supporting + * the Clio backend. The coroutine types are checked if same/different. + * + * @tparam F Represents a class of handlers for Cassandra database. + * @param f R-value instance of Cassandra handler class. + * @return auto + */ +template +auto +synchronous(F&& f) +{ + /** @brief Serialized handlers and their execution. + * + * The ctx class is converted into a serialized handler, also named + * ctx, and is used to pass a stream of data into the method. + */ + boost::asio::io_context ctx; + boost::asio::io_context::strand strand(ctx); + std::optional work; + + /*! @brief Place the ctx within the vector of serialized handlers. */ + work.emplace(ctx); + + /** + * @brief If/else statements regarding coroutine type matching. + * + * R is the currently executing coroutine that is about to get passed. + * If corountine types do not match, the current one's type is stored. + */ + using R = typename std::result_of::type; + if constexpr (!std::is_same::value) + { + /** + * @brief When the coroutine type is the same + * + * The spawn function enables programs to implement asynchronous logic + * in a synchronous manner. res stores the instance of the currently + * executing coroutine, yield. The different type is returned. + */ + R res; + boost::asio::spawn( + strand, [&f, &work, &res](boost::asio::yield_context yield) { + res = f(yield); + work.reset(); + }); + + ctx.run(); + return res; + } + else + { + /*! @brief When the corutine type is different, run as normal. */ + boost::asio::spawn( + strand, [&f, &work](boost::asio::yield_context yield) { + f(yield); + work.reset(); + }); + + ctx.run(); + } +} + +/** + * @brief Reestablishes synchronous connection on timeout. + * + * @tparam Represents a class of handlers for Cassandra database. + * @param f R-value instance of Cassandra database handler class. + * @return auto + */ +template +auto +synchronousAndRetryOnTimeout(F&& f) +{ + return retryOnTimeout([&]() { return synchronous(f); }); +} + +/*! @brief Handles ledger and transaction backend data. */ +class BackendInterface +{ + /** + * @brief Shared mutexes and a cache for the interface. + * + * rngMutex is a shared mutex. Shared mutexes prevent shared data + * from being accessed by multiple threads and has two levels of + * access: shared and exclusive. + */ +protected: + mutable std::shared_mutex rngMtx_; + std::optional range; + SimpleCache cache_; + + /** + * @brief Public read methods + * + * All of these reads methods can throw DatabaseTimeout. When writing + * code in an RPC handler, this exception does not need to be caught: + * when an RPC results in a timeout, an error is returned to the client. + */ + +public: + BackendInterface(clio::Config const& config) + { + } + virtual ~BackendInterface() + { + } + + /*! @brief LEDGER METHODS */ +public: + /** + * @brief Cache that holds states of the ledger + * + * const version holds the original cache state; the other tracks + * historical changes. + * + * @return SimpleCache const& + */ + SimpleCache const& + cache() const + { + return cache_; + } + + SimpleCache& + cache() + { + return cache_; + } + + /*! @brief Fetches a specific ledger by sequence number. */ + virtual std::optional + fetchLedgerBySequence( + std::uint32_t const sequence, + boost::asio::yield_context& yield) const = 0; + + /*! @brief Fetches a specific ledger by hash. */ + virtual std::optional + fetchLedgerByHash( + ripple::uint256 const& hash, + boost::asio::yield_context& yield) const = 0; + + /*! @brief Fetches the latest ledger sequence. */ + virtual std::optional + fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0; + + /*! @brief Fetches the current ledger range while locking that process */ + std::optional + fetchLedgerRange() const + { + std::shared_lock lck(rngMtx_); + return range; + } + + /** + * @brief Updates the range of sequences to be tracked. + * + * Function that continues updating the range sliding window or creates + * a new sliding window once the maxSequence limit has been reached. + * + * @param newMax Unsigned 32-bit integer representing new max of range. + */ + void + updateRange(uint32_t newMax) + { + std::scoped_lock lck(rngMtx_); + assert(!range || newMax >= range->maxSequence); + if (!range) + range = {newMax, newMax}; + else + range->maxSequence = newMax; + } + + /** + * @brief Returns the fees for specific transactions. + * + * @param seq Unsigned 32-bit integer reprsenting sequence. + * @param yield The currently executing coroutine. + * @return std::optional + */ + std::optional + fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const; + + /*! @brief TRANSACTION METHODS */ + /** + * @brief Fetches a specific transaction. + * + * @param hash Unsigned 256-bit integer representing hash. + * @param yield The currently executing coroutine. + * @return std::optional + */ + virtual std::optional + fetchTransaction( + ripple::uint256 const& hash, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches multiple transactions. + * + * @param hashes Unsigned integer value representing a hash. + * @param yield The currently executing coroutine. + * @return std::vector + */ + virtual std::vector + fetchTransactions( + std::vector const& hashes, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches all transactions for a specific account + * + * @param account A specific XRPL Account, speciifed by unique type + * accountID. + * @param limit Paging limit for how many transactions can be returned per + * page. + * @param forward Boolean whether paging happens forwards or backwards. + * @param cursor Important metadata returned every time paging occurs. + * @param yield Currently executing coroutine. + * @return TransactionsAndCursor + */ + virtual TransactionsAndCursor + fetchAccountTransactions( + ripple::AccountID const& account, + std::uint32_t const limit, + bool forward, + std::optional const& cursor, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches all transactions from a specific ledger. + * + * @param ledgerSequence Unsigned 32-bit integer for latest total + * transactions. + * @param yield Currently executing coroutine. + * @return std::vector + */ + virtual std::vector + fetchAllTransactionsInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches all transaction hashes from a specific ledger. + * + * @param ledgerSequence Standard unsigned integer. + * @param yield Currently executing coroutine. + * @return std::vector + */ + virtual std::vector + fetchAllTransactionHashesInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const = 0; + + /*! @brief NFT methods */ + /** + * @brief Fetches a specific NFT + * + * @param tokenID Unsigned 256-bit integer. + * @param ledgerSequence Standard unsigned integer. + * @param yield Currently executing coroutine. + * @return std::optional + */ + virtual std::optional + fetchNFT( + ripple::uint256 const& tokenID, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches all transactions for a specific NFT. + * + * @param tokenID Unsigned 256-bit integer. + * @param limit Paging limit as to how many transactions return per page. + * @param forward Boolean whether paging happens forwards or backwards. + * @param cursorIn Represents transaction number and ledger sequence. + * @param yield Currently executing coroutine is passed in as input. + * @return TransactionsAndCursor + */ + virtual TransactionsAndCursor + fetchNFTTransactions( + ripple::uint256 const& tokenID, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) const = 0; + + /*! @brief STATE DATA METHODS */ + /** + * @brief Fetches a specific ledger object: vector of unsigned chars + * + * @param key Unsigned 256-bit integer. + * @param sequence Unsigned 32-bit integer. + * @param yield Currently executing coroutine. + * @return std::optional + */ + std::optional + fetchLedgerObject( + ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const; + + /** + * @brief Fetches all ledger objects: a vector of vectors of unsigned chars. + * + * @param keys Unsigned 256-bit integer. + * @param sequence Unsigned 32-bit integer. + * @param yield Currently executing coroutine. + * @return std::vector + */ + std::vector + fetchLedgerObjects( + std::vector const& keys, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const; + + /*! @brief Virtual function version of fetchLedgerObject */ + virtual std::optional + doFetchLedgerObject( + ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const = 0; + + /*! @brief Virtual function version of fetchLedgerObjects */ + virtual std::vector + doFetchLedgerObjects( + std::vector const& keys, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Returns the difference between ledgers: vector of objects + * + * Objects are made of a key value, vector of unsigned chars (blob), + * and a boolean detailing whether keys and blob match. + * + * @param ledgerSequence Standard unsigned integer. + * @param yield Currently executing coroutine. + * @return std::vector + */ + virtual std::vector + fetchLedgerDiff( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches a page of ledger objects, ordered by key/index. + * + * @param cursor Important metadata returned every time paging occurs. + * @param ledgerSequence Standard unsigned integer. + * @param limit Paging limit as to how many transactions returned per page. + * @param outOfOrder Boolean on whether ledger page is out of order. + * @param yield Currently executing coroutine. + * @return LedgerPage + */ + LedgerPage + fetchLedgerPage( + std::optional const& cursor, + std::uint32_t const ledgerSequence, + std::uint32_t const limit, + bool outOfOrder, + boost::asio::yield_context& yield) const; + + /*! @brief Fetches successor object from key/index. */ + std::optional + fetchSuccessorObject( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const; + + /*! @brief Fetches successor key from key/index. */ + std::optional + fetchSuccessorKey( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const; + + /*! @brief Virtual function version of fetchSuccessorKey. */ + virtual std::optional + doFetchSuccessorKey( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Fetches book offers. + * + * @param book Unsigned 256-bit integer. + * @param ledgerSequence Standard unsigned integer. + * @param limit Pagaing limit as to how many transactions returned per page. + * @param cursor Important metadata returned every time paging occurs. + * @param yield Currently executing coroutine. + * @return BookOffersPage + */ + BookOffersPage + fetchBookOffers( + ripple::uint256 const& book, + std::uint32_t const ledgerSequence, + std::uint32_t const limit, + std::optional const& cursor, + boost::asio::yield_context& yield) const; + + /** + * @brief Returns a ledger range + * + * Ledger range is a struct of min and max sequence numbers). Due to + * the use of [&], which denotes a special case of a lambda expression + * where values found outside the scope are passed by reference, wrt the + * currently executing coroutine. + * + * @return std::optional + */ + std::optional + hardFetchLedgerRange() const + { + return synchronous([&](boost::asio::yield_context yield) { + return hardFetchLedgerRange(yield); + }); + } + + /*! @brief Virtual function equivalent of hardFetchLedgerRange. */ + virtual std::optional + hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0; + + /*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */ + std::optional + hardFetchLedgerRangeNoThrow() const; + /*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */ + std::optional + hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const; + + /** + * @brief Writes to a specific ledger. + * + * @param ledgerInfo Const on ledger information. + * @param ledgerHeader r-value string representing ledger header. + */ + virtual void + writeLedger( + ripple::LedgerInfo const& ledgerInfo, + std::string&& ledgerHeader) = 0; + + /** + * @brief Writes a new ledger object. + * + * The key and blob are r-value references and do NOT have memory addresses. + * + * @param key String represented as an r-value. + * @param seq Unsigned integer representing a sequence. + * @param blob r-value vector of unsigned characters (blob). + */ + virtual void + writeLedgerObject( + std::string&& key, + std::uint32_t const seq, + std::string&& blob); + + /** + * @brief Writes a new transaction. + * + * @param hash r-value reference. No memory address. + * @param seq Unsigned 32-bit integer. + * @param date Unsigned 32-bit integer. + * @param transaction r-value reference. No memory address. + * @param metadata r-value refrence. No memory address. + */ + virtual void + writeTransaction( + std::string&& hash, + std::uint32_t const seq, + std::uint32_t const date, + std::string&& transaction, + std::string&& metadata) = 0; + + /** + * @brief Write a new NFT. + * + * @param data Passed in as an r-value reference. + */ + virtual void + writeNFTs(std::vector&& data) = 0; + + /** + * @brief Write a new set of account transactions. + * + * @param data Passed in as an r-value reference. + */ + virtual void + writeAccountTransactions(std::vector&& data) = 0; + + /** + * @brief Write a new transaction for a specific NFT. + * + * @param data Passed in as an r-value reference. + */ + virtual void + writeNFTTransactions(std::vector&& data) = 0; + + /** + * @brief Write a new successor. + * + * @param key Passed in as an r-value reference. + * @param seq Unsigned 32-bit integer. + * @param successor Passed in as an r-value reference. + */ + virtual void + writeSuccessor( + std::string&& key, + std::uint32_t const seq, + std::string&& successor) = 0; + + /*! @brief Tells database we will write data for a specific ledger. */ + virtual void + startWrites() const = 0; + + /** + * @brief Tells database we finished writing all data for a specific ledger. + * + * TODO: change the return value to represent different results: + * Committed, write conflict, errored, successful but not committed + * + * @param ledgerSequence Const unsigned 32-bit integer on ledger sequence. + * @return true + * @return false + */ + bool + finishWrites(std::uint32_t const ledgerSequence); + + /** + * @brief Selectively delets parts of the database. + * + * @param numLedgersToKeep Unsigned 32-bit integer on number of ledgers to + * keep. + * @param yield Currently executing coroutine. + * @return true + * @return false + */ + virtual bool + doOnlineDelete( + std::uint32_t numLedgersToKeep, + boost::asio::yield_context& yield) const = 0; + + /** + * @brief Opens the database + * + * Open the database. Set up all of the necessary objects and + * datastructures. After this call completes, the database is + * ready for use. + * + * @param readOnly Boolean whether ledger is read only. + */ + virtual void + open(bool readOnly) = 0; + + /*! @brief Closes the database, releasing any resources. */ + virtual void + close(){}; + + virtual bool + isTooBusy() const = 0; + +private: + /** + * @brief Private helper method to write ledger object + * + * @param key r-value string representing key. + * @param seq Unsigned 32-bit integer representing sequence. + * @param blob r-value vector of unsigned chars. + */ + virtual void + doWriteLedgerObject( + std::string&& key, + std::uint32_t const seq, + std::string&& blob) = 0; + + virtual bool + doFinishWrites() = 0; +}; + +} // namespace Backend +using BackendInterface = Backend::BackendInterface; diff --git a/src/backend/CassandraBackend.cpp b/src/backend/CassandraBackend.cpp new file mode 100644 index 00000000..1325395b --- /dev/null +++ b/src/backend/CassandraBackend.cpp @@ -0,0 +1,1726 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +#include +#include + +using namespace clio; + +namespace Backend { + +// Type alias for async completion handlers +using completion_token = boost::asio::yield_context; +using function_type = void(boost::system::error_code); +using result_type = boost::asio::async_result; +using handler_type = typename result_type::completion_handler_type; + +template +void +processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) +{ + static clio::Logger log{"Backend"}; + + CassandraBackend const& backend = *requestParams.backend; + auto rc = cass_future_error_code(fut); + if (rc != CASS_OK) + { + // exponential backoff with a max wait of 2^10 ms (about 1 second) + auto wait = std::chrono::milliseconds( + lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); + log.error() << "ERROR!!! Cassandra write error: " << rc << ", " + << cass_error_desc(rc) + << " id= " << requestParams.toString() + << ", current retries " << requestParams.currentRetries + << ", retrying in " << wait.count() << " milliseconds"; + ++requestParams.currentRetries; + std::shared_ptr timer = + std::make_shared( + backend.getIOContext(), + std::chrono::steady_clock::now() + wait); + timer->async_wait([timer, &requestParams, func]( + const boost::system::error_code& error) { + func(requestParams, true); + }); + } + else + { + log.trace() << "Succesfully inserted a record"; + requestParams.finish(); + } +} + +template +void +processAsyncWrite(CassFuture* fut, void* cbData) +{ + T& requestParams = *static_cast(cbData); + // TODO don't pass in func + processAsyncWriteResponse(requestParams, fut, requestParams.retry); +} + +template +struct WriteCallbackData +{ + CassandraBackend const* backend; + T data; + std::function&, bool)> retry; + std::uint32_t currentRetries; + std::atomic refs = 1; + std::string id; + + WriteCallbackData( + CassandraBackend const* b, + T&& d, + B bind, + std::string const& identifier) + : backend(b), data(std::move(d)), id(identifier) + { + retry = [bind, this](auto& params, bool isRetry) { + auto statement = bind(params); + backend->executeAsyncWrite( + statement, + processAsyncWrite< + typename std::remove_reference::type>, + params, + isRetry); + }; + } + virtual void + start() + { + retry(*this, false); + } + + virtual void + finish() + { + backend->finishAsyncWrite(); + int remaining = --refs; + if (remaining == 0) + delete this; + } + virtual ~WriteCallbackData() + { + } + + std::string + toString() + { + return id; + } +}; + +template +struct BulkWriteCallbackData : public WriteCallbackData +{ + std::atomic_int& numRemaining; + std::mutex& mtx; + std::condition_variable& cv; + BulkWriteCallbackData( + CassandraBackend const* b, + T&& d, + B bind, + std::atomic_int& r, + std::mutex& m, + std::condition_variable& c) + : WriteCallbackData(b, std::move(d), bind, "bulk") + , numRemaining(r) + , mtx(m) + , cv(c) + { + } + void + start() override + { + this->retry(*this, true); + } + + void + finish() override + { + // TODO: it would be nice to avoid this lock. + std::lock_guard lck(mtx); + if (--numRemaining == 0) + cv.notify_one(); + } + ~BulkWriteCallbackData() + { + } +}; + +template +void +makeAndExecuteAsyncWrite( + CassandraBackend const* b, + T&& d, + B bind, + std::string const& id) +{ + auto* cb = new WriteCallbackData(b, std::move(d), bind, id); + cb->start(); +} + +template +std::shared_ptr> +makeAndExecuteBulkAsyncWrite( + CassandraBackend const* b, + T&& d, + B bind, + std::atomic_int& r, + std::mutex& m, + std::condition_variable& c) +{ + auto cb = std::make_shared>( + b, std::move(d), bind, r, m, c); + cb->start(); + return cb; +} + +void +CassandraBackend::doWriteLedgerObject( + std::string&& key, + std::uint32_t const seq, + std::string&& blob) +{ + log_.trace() << "Writing ledger object to cassandra"; + if (range) + makeAndExecuteAsyncWrite( + this, + std::make_tuple(seq, key), + [this](auto& params) { + auto& [sequence, key] = params.data; + + CassandraStatement statement{insertDiff_}; + statement.bindNextInt(sequence); + statement.bindNextBytes(key); + return statement; + }, + "ledger_diff"); + makeAndExecuteAsyncWrite( + this, + std::make_tuple(std::move(key), seq, std::move(blob)), + [this](auto& params) { + auto& [key, sequence, blob] = params.data; + + CassandraStatement statement{insertObject_}; + statement.bindNextBytes(key); + statement.bindNextInt(sequence); + statement.bindNextBytes(blob); + return statement; + }, + "ledger_object"); +} + +void +CassandraBackend::writeSuccessor( + std::string&& key, + std::uint32_t const seq, + std::string&& successor) +{ + log_.trace() << "Writing successor. key = " << key.size() << " bytes. " + << " seq = " << std::to_string(seq) + << " successor = " << successor.size() << " bytes."; + assert(key.size() != 0); + assert(successor.size() != 0); + makeAndExecuteAsyncWrite( + this, + std::make_tuple(std::move(key), seq, std::move(successor)), + [this](auto& params) { + auto& [key, sequence, successor] = params.data; + + CassandraStatement statement{insertSuccessor_}; + statement.bindNextBytes(key); + statement.bindNextInt(sequence); + statement.bindNextBytes(successor); + return statement; + }, + "successor"); +} +void +CassandraBackend::writeLedger( + ripple::LedgerInfo const& ledgerInfo, + std::string&& header) +{ + makeAndExecuteAsyncWrite( + this, + std::make_tuple(ledgerInfo.seq, std::move(header)), + [this](auto& params) { + auto& [sequence, header] = params.data; + CassandraStatement statement{insertLedgerHeader_}; + statement.bindNextInt(sequence); + statement.bindNextBytes(header); + return statement; + }, + "ledger"); + makeAndExecuteAsyncWrite( + this, + std::make_tuple(ledgerInfo.hash, ledgerInfo.seq), + [this](auto& params) { + auto& [hash, sequence] = params.data; + CassandraStatement statement{insertLedgerHash_}; + statement.bindNextBytes(hash); + statement.bindNextInt(sequence); + return statement; + }, + "ledger_hash"); + ledgerSequence_ = ledgerInfo.seq; +} + +void +CassandraBackend::writeAccountTransactions( + std::vector&& data) +{ + for (auto& record : data) + { + for (auto& account : record.accounts) + { + makeAndExecuteAsyncWrite( + this, + std::make_tuple( + std::move(account), + record.ledgerSequence, + record.transactionIndex, + record.txHash), + [this](auto& params) { + CassandraStatement statement(insertAccountTx_); + auto& [account, lgrSeq, txnIdx, hash] = params.data; + statement.bindNextBytes(account); + statement.bindNextIntTuple(lgrSeq, txnIdx); + statement.bindNextBytes(hash); + return statement; + }, + "account_tx"); + } + } +} + +void +CassandraBackend::writeNFTTransactions(std::vector&& data) +{ + for (NFTTransactionsData const& record : data) + { + makeAndExecuteAsyncWrite( + this, + std::make_tuple( + record.tokenID, + record.ledgerSequence, + record.transactionIndex, + record.txHash), + [this](auto const& params) { + CassandraStatement statement(insertNFTTx_); + auto const& [tokenID, lgrSeq, txnIdx, txHash] = params.data; + statement.bindNextBytes(tokenID); + statement.bindNextIntTuple(lgrSeq, txnIdx); + statement.bindNextBytes(txHash); + return statement; + }, + "nf_token_transactions"); + } +} + +void +CassandraBackend::writeTransaction( + std::string&& hash, + std::uint32_t const seq, + std::uint32_t const date, + std::string&& transaction, + std::string&& metadata) +{ + log_.trace() << "Writing txn to cassandra"; + std::string hashCpy = hash; + + makeAndExecuteAsyncWrite( + this, + std::make_pair(seq, hash), + [this](auto& params) { + CassandraStatement statement{insertLedgerTransaction_}; + statement.bindNextInt(params.data.first); + statement.bindNextBytes(params.data.second); + return statement; + }, + "ledger_transaction"); + makeAndExecuteAsyncWrite( + this, + std::make_tuple( + std::move(hash), + seq, + date, + std::move(transaction), + std::move(metadata)), + [this](auto& params) { + CassandraStatement statement{insertTransaction_}; + auto& [hash, sequence, date, transaction, metadata] = params.data; + statement.bindNextBytes(hash); + statement.bindNextInt(sequence); + statement.bindNextInt(date); + statement.bindNextBytes(transaction); + statement.bindNextBytes(metadata); + return statement; + }, + "transaction"); +} + +void +CassandraBackend::writeNFTs(std::vector&& data) +{ + for (NFTsData const& record : data) + { + makeAndExecuteAsyncWrite( + this, + std::make_tuple( + record.tokenID, + record.ledgerSequence, + record.owner, + record.isBurned), + [this](auto const& params) { + CassandraStatement statement{insertNFT_}; + auto const& [tokenID, lgrSeq, owner, isBurned] = params.data; + statement.bindNextBytes(tokenID); + statement.bindNextInt(lgrSeq); + statement.bindNextBytes(owner); + statement.bindNextBoolean(isBurned); + return statement; + }, + "nf_tokens"); + + // If `uri` is set (and it can be set to an empty uri), we know this + // is a net-new NFT. That is, this NFT has not been seen before by us + // _OR_ it is in the extreme edge case of a re-minted NFT ID with the + // same NFT ID as an already-burned token. In this case, we need to + // record the URI and link to the issuer_nf_tokens table. + if (record.uri) + { + makeAndExecuteAsyncWrite( + this, + std::make_tuple(record.tokenID), + [this](auto const& params) { + CassandraStatement statement{insertIssuerNFT_}; + auto const& [tokenID] = params.data; + statement.bindNextBytes(ripple::nft::getIssuer(tokenID)); + statement.bindNextInt( + ripple::nft::toUInt32(ripple::nft::getTaxon(tokenID))); + statement.bindNextBytes(tokenID); + return statement; + }, + "issuer_nf_tokens"); + + makeAndExecuteAsyncWrite( + this, + std::make_tuple( + record.tokenID, record.ledgerSequence, record.uri.value()), + [this](auto const& params) { + CassandraStatement statement{insertNFTURI_}; + auto const& [tokenID, lgrSeq, uri] = params.data; + statement.bindNextBytes(tokenID); + statement.bindNextInt(lgrSeq); + statement.bindNextBytes(uri); + return statement; + }, + "nf_token_uris"); + } + } +} + +std::optional +CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const +{ + log_.trace() << "Fetching from cassandra"; + CassandraStatement statement{selectLedgerRange_}; + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result) + { + log_.error() << "No rows"; + return {}; + } + LedgerRange range; + range.maxSequence = range.minSequence = result.getUInt32(); + if (result.nextRow()) + { + range.maxSequence = result.getUInt32(); + } + if (range.minSequence > range.maxSequence) + { + std::swap(range.minSequence, range.maxSequence); + } + return range; +} + +std::vector +CassandraBackend::fetchAllTransactionsInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield); + return fetchTransactions(hashes, yield); +} + +template +struct ReadCallbackData +{ + using handler_type = typename Result::completion_handler_type; + + std::atomic_int& numOutstanding; + handler_type handler; + std::function onSuccess; + + std::atomic_bool errored = false; + ReadCallbackData( + std::atomic_int& numOutstanding, + handler_type& handler, + std::function onSuccess) + : numOutstanding(numOutstanding), handler(handler), onSuccess(onSuccess) + { + } + + void + finish(CassFuture* fut) + { + CassError rc = cass_future_error_code(fut); + if (rc != CASS_OK) + { + errored = true; + } + else + { + CassandraResult result{cass_future_get_result(fut)}; + onSuccess(result); + } + + if (--numOutstanding == 0) + resume(); + } + + void + resume() + { + boost::asio::post( + boost::asio::get_associated_executor(handler), + [handler = std::move(handler)]() mutable { + handler(boost::system::error_code{}); + }); + } +}; + +void +processAsyncRead(CassFuture* fut, void* cbData) +{ + ReadCallbackData& cb = + *static_cast*>(cbData); + cb.finish(fut); +} + +std::vector +CassandraBackend::fetchTransactions( + std::vector const& hashes, + boost::asio::yield_context& yield) const +{ + if (hashes.size() == 0) + return {}; + numReadRequestsOutstanding_ += hashes.size(); + + handler_type handler(std::forward(yield)); + result_type result(handler); + + std::size_t const numHashes = hashes.size(); + std::atomic_int numOutstanding = numHashes; + std::vector results{numHashes}; + std::vector>> cbs; + cbs.reserve(numHashes); + auto timeDiff = util::timed([&]() { + for (std::size_t i = 0; i < hashes.size(); ++i) + { + CassandraStatement statement{selectTransaction_}; + statement.bindNextBytes(hashes[i]); + + cbs.push_back(std::make_shared>( + numOutstanding, handler, [i, &results](auto& result) { + if (result.hasResult()) + results[i] = { + result.getBytes(), + result.getBytes(), + result.getUInt32(), + result.getUInt32()}; + })); + + executeAsyncRead(statement, processAsyncRead, *cbs[i]); + } + assert(results.size() == cbs.size()); + + // suspend the coroutine until completion handler is called. + result.get(); + numReadRequestsOutstanding_ -= hashes.size(); + }); + for (auto const& cb : cbs) + { + if (cb->errored) + throw DatabaseTimeout(); + } + + log_.debug() << "Fetched " << numHashes + << " transactions from Cassandra in " << timeDiff + << " milliseconds"; + return results; +} + +std::vector +CassandraBackend::fetchAllTransactionHashesInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + CassandraStatement statement{selectAllTransactionHashesInLedger_}; + statement.bindNextInt(ledgerSequence); + auto start = std::chrono::system_clock::now(); + + CassandraResult result = executeAsyncRead(statement, yield); + + auto end = std::chrono::system_clock::now(); + if (!result) + { + log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence); + return {}; + } + std::vector hashes; + do + { + hashes.push_back(result.getUInt256()); + } while (result.nextRow()); + log_.debug() << "Fetched " << hashes.size() + << " transaction hashes from Cassandra in " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; + return hashes; +} + +std::optional +CassandraBackend::fetchNFT( + ripple::uint256 const& tokenID, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + CassandraStatement nftStatement{selectNFT_}; + nftStatement.bindNextBytes(tokenID); + nftStatement.bindNextInt(ledgerSequence); + CassandraResult nftResponse = executeAsyncRead(nftStatement, yield); + if (!nftResponse) + return {}; + + NFT result; + result.tokenID = tokenID; + result.ledgerSequence = nftResponse.getUInt32(); + result.owner = nftResponse.getBytes(); + result.isBurned = nftResponse.getBool(); + + // now fetch URI. Usually we will have the URI even for burned NFTs, but + // if the first ledger on this clio included NFTokenBurn transactions + // we will not have the URIs for any of those tokens. In any other case + // not having the URI indicates something went wrong with our data. + // + // TODO - in the future would be great for any handlers that use this + // could inject a warning in this case (the case of not having a URI + // because it was burned in the first ledger) to indicate that even though + // we are returning a blank URI, the NFT might have had one. + CassandraStatement uriStatement{selectNFTURI_}; + uriStatement.bindNextBytes(tokenID); + uriStatement.bindNextInt(ledgerSequence); + CassandraResult uriResponse = executeAsyncRead(uriStatement, yield); + if (uriResponse.hasResult()) + result.uri = uriResponse.getBytes(); + + return result; +} + +TransactionsAndCursor +CassandraBackend::fetchNFTTransactions( + ripple::uint256 const& tokenID, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) const +{ + auto cursor = cursorIn; + auto rng = fetchLedgerRange(); + if (!rng) + return {{}, {}}; + + CassandraStatement statement = forward + ? CassandraStatement(selectNFTTxForward_) + : CassandraStatement(selectNFTTx_); + + statement.bindNextBytes(tokenID); + + if (cursor) + { + statement.bindNextIntTuple( + cursor->ledgerSequence, cursor->transactionIndex); + log_.debug() << "token_id = " << ripple::strHex(tokenID) + << " tuple = " << cursor->ledgerSequence + << cursor->transactionIndex; + } + else + { + int const seq = forward ? rng->minSequence : rng->maxSequence; + int const placeHolder = + forward ? 0 : std::numeric_limits::max(); + + statement.bindNextIntTuple(placeHolder, placeHolder); + log_.debug() << "token_id = " << ripple::strHex(tokenID) + << " idx = " << seq << " tuple = " << placeHolder; + } + + statement.bindNextUInt(limit); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result.hasResult()) + { + log_.debug() << "No rows returned"; + return {}; + } + + std::vector hashes = {}; + auto numRows = result.numRows(); + log_.info() << "num_rows = " << numRows; + do + { + hashes.push_back(result.getUInt256()); + if (--numRows == 0) + { + log_.debug() << "Setting cursor"; + auto const [lgrSeq, txnIdx] = result.getInt64Tuple(); + cursor = { + static_cast(lgrSeq), + static_cast(txnIdx)}; + + // Only modify if forward because forward query + // (selectNFTTxForward_) orders by ledger/tx sequence >= whereas + // reverse query (selectNFTTx_) orders by ledger/tx sequence <. + if (forward) + ++cursor->transactionIndex; + } + } while (result.nextRow()); + + auto txns = fetchTransactions(hashes, yield); + log_.debug() << "Txns = " << txns.size(); + + if (txns.size() == limit) + { + log_.debug() << "Returning cursor"; + return {txns, cursor}; + } + + return {txns, {}}; +} + +TransactionsAndCursor +CassandraBackend::fetchAccountTransactions( + ripple::AccountID const& account, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) const +{ + auto rng = fetchLedgerRange(); + if (!rng) + return {{}, {}}; + + CassandraStatement statement = [this, forward]() { + if (forward) + return CassandraStatement{selectAccountTxForward_}; + else + return CassandraStatement{selectAccountTx_}; + }(); + + auto cursor = cursorIn; + statement.bindNextBytes(account); + if (cursor) + { + statement.bindNextIntTuple( + cursor->ledgerSequence, cursor->transactionIndex); + log_.debug() << "account = " << ripple::strHex(account) + << " tuple = " << cursor->ledgerSequence + << cursor->transactionIndex; + } + else + { + int const seq = forward ? rng->minSequence : rng->maxSequence; + int const placeHolder = + forward ? 0 : std::numeric_limits::max(); + + statement.bindNextIntTuple(placeHolder, placeHolder); + log_.debug() << "account = " << ripple::strHex(account) + << " idx = " << seq << " tuple = " << placeHolder; + } + statement.bindNextUInt(limit); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result.hasResult()) + { + log_.debug() << "No rows returned"; + return {}; + } + + std::vector hashes = {}; + auto numRows = result.numRows(); + log_.info() << "num_rows = " << std::to_string(numRows); + do + { + hashes.push_back(result.getUInt256()); + if (--numRows == 0) + { + log_.debug() << "Setting cursor"; + auto [lgrSeq, txnIdx] = result.getInt64Tuple(); + cursor = { + static_cast(lgrSeq), + static_cast(txnIdx)}; + + // Only modify if forward because forward query + // (selectAccountTxForward_) orders by ledger/tx sequence >= whereas + // reverse query (selectAccountTx_) orders by ledger/tx sequence <. + if (forward) + ++cursor->transactionIndex; + } + } while (result.nextRow()); + + auto txns = fetchTransactions(hashes, yield); + log_.debug() << "Txns = " << txns.size(); + + if (txns.size() == limit) + { + log_.debug() << "Returning cursor"; + return {txns, cursor}; + } + + return {txns, {}}; +} + +std::optional +CassandraBackend::doFetchSuccessorKey( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + log_.trace() << "Fetching from cassandra"; + CassandraStatement statement{selectSuccessor_}; + statement.bindNextBytes(key); + statement.bindNextInt(ledgerSequence); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result) + { + log_.debug() << "No rows"; + return {}; + } + auto next = result.getUInt256(); + if (next == lastKey) + return {}; + return next; +} + +std::optional +CassandraBackend::doFetchLedgerObject( + ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const +{ + log_.trace() << "Fetching from cassandra"; + CassandraStatement statement{selectObject_}; + statement.bindNextBytes(key); + statement.bindNextInt(sequence); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result) + { + log_.debug() << "No rows"; + return {}; + } + auto res = result.getBytes(); + if (res.size()) + return res; + return {}; +} + +std::vector +CassandraBackend::doFetchLedgerObjects( + std::vector const& keys, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const +{ + if (keys.size() == 0) + return {}; + + numReadRequestsOutstanding_ += keys.size(); + + handler_type handler(std::forward(yield)); + result_type result(handler); + + std::size_t const numKeys = keys.size(); + log_.trace() << "Fetching " << numKeys << " records from Cassandra"; + std::atomic_int numOutstanding = numKeys; + std::vector results{numKeys}; + std::vector>> cbs; + cbs.reserve(numKeys); + for (std::size_t i = 0; i < keys.size(); ++i) + { + cbs.push_back(std::make_shared>( + numOutstanding, handler, [i, &results](auto& result) { + if (result.hasResult()) + results[i] = result.getBytes(); + })); + CassandraStatement statement{selectObject_}; + statement.bindNextBytes(keys[i]); + statement.bindNextInt(sequence); + executeAsyncRead(statement, processAsyncRead, *cbs[i]); + } + assert(results.size() == cbs.size()); + + // suspend the coroutine until completion handler is called. + result.get(); + numReadRequestsOutstanding_ -= keys.size(); + + for (auto const& cb : cbs) + { + if (cb->errored) + throw DatabaseTimeout(); + } + + log_.trace() << "Fetched " << numKeys << " records from Cassandra"; + return results; +} + +std::vector +CassandraBackend::fetchLedgerDiff( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const +{ + CassandraStatement statement{selectDiff_}; + statement.bindNextInt(ledgerSequence); + auto start = std::chrono::system_clock::now(); + + CassandraResult result = executeAsyncRead(statement, yield); + + auto end = std::chrono::system_clock::now(); + + if (!result) + { + log_.error() << "No rows. Ledger = " << std::to_string(ledgerSequence); + return {}; + } + std::vector keys; + do + { + keys.push_back(result.getUInt256()); + } while (result.nextRow()); + log_.debug() << "Fetched " << keys.size() + << " diff hashes from Cassandra in " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; + auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); + std::vector results; + std::transform( + keys.begin(), + keys.end(), + objs.begin(), + std::back_inserter(results), + [](auto const& k, auto const& o) { + return LedgerObject{k, o}; + }); + return results; +} + +bool +CassandraBackend::doOnlineDelete( + std::uint32_t const numLedgersToKeep, + boost::asio::yield_context& yield) const +{ + // calculate TTL + // ledgers close roughly every 4 seconds. We double the TTL so that way + // there is a window of time to update the database, to prevent unchanging + // records from being deleted. + auto rng = fetchLedgerRange(); + if (!rng) + return false; + std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep; + if (minLedger <= rng->minSequence) + return false; + auto bind = [this](auto& params) { + auto& [key, seq, obj] = params.data; + CassandraStatement statement{insertObject_}; + statement.bindNextBytes(key); + statement.bindNextInt(seq); + statement.bindNextBytes(obj); + return statement; + }; + std::condition_variable cv; + std::mutex mtx; + std::vector, + typename std::remove_reference::type>>> + cbs; + std::uint32_t concurrentLimit = 10; + std::atomic_int numOutstanding = 0; + + // iterate through latest ledger, updating TTL + std::optional cursor; + while (true) + { + auto [objects, curCursor] = retryOnTimeout([&]() { + return fetchLedgerPage(cursor, minLedger, 256, false, yield); + }); + + for (auto& obj : objects) + { + ++numOutstanding; + cbs.push_back(makeAndExecuteBulkAsyncWrite( + this, + std::make_tuple( + std::move(obj.key), minLedger, std::move(obj.blob)), + bind, + numOutstanding, + mtx, + cv)); + + std::unique_lock lck(mtx); + log_.trace() << "Got the mutex"; + cv.wait(lck, [&numOutstanding, concurrentLimit]() { + return numOutstanding < concurrentLimit; + }); + } + log_.debug() << "Fetched a page"; + cursor = curCursor; + if (!cursor) + break; + } + std::unique_lock lck(mtx); + cv.wait(lck, [&numOutstanding]() { return numOutstanding == 0; }); + CassandraStatement statement{deleteLedgerRange_}; + statement.bindNextInt(minLedger); + executeSyncWrite(statement); + // update ledger_range + return true; +} + +bool +CassandraBackend::isTooBusy() const +{ + return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding; +} + +void +CassandraBackend::open(bool readOnly) +{ + if (open_) + { + assert(false); + log_.error() << "Database is already open"; + return; + } + + log_.info() << "Opening Cassandra Backend"; + + CassCluster* cluster = cass_cluster_new(); + if (!cluster) + throw std::runtime_error("nodestore:: Failed to create CassCluster"); + + std::string secureConnectBundle = + config_.valueOr("secure_connect_bundle", ""); + + if (!secureConnectBundle.empty()) + { + /* Setup driver to connect to the cloud using the secure connection + * bundle */ + if (cass_cluster_set_cloud_secure_connection_bundle( + cluster, secureConnectBundle.c_str()) != CASS_OK) + { + log_.error() << "Unable to configure cloud using the " + "secure connection bundle: " + << secureConnectBundle; + throw std::runtime_error( + "nodestore: Failed to connect using secure connection " + "bundle"); + return; + } + } + else + { + std::string contact_points = config_.valueOrThrow( + "contact_points", + "nodestore: Missing contact_points in Cassandra config"); + CassError rc = + cass_cluster_set_contact_points(cluster, contact_points.c_str()); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting Cassandra contact_points: " + << contact_points << ", result: " << rc << ", " + << cass_error_desc(rc); + + throw std::runtime_error(ss.str()); + } + + auto port = config_.maybeValue("port"); + if (port) + { + rc = cass_cluster_set_port(cluster, *port); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting Cassandra port: " << *port + << ", result: " << rc << ", " << cass_error_desc(rc); + + throw std::runtime_error(ss.str()); + } + } + } + cass_cluster_set_token_aware_routing(cluster, cass_true); + CassError rc = + cass_cluster_set_protocol_version(cluster, CASS_PROTOCOL_VERSION_V4); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting cassandra protocol version: " + << ", result: " << rc << ", " << cass_error_desc(rc); + + throw std::runtime_error(ss.str()); + } + + auto username = config_.maybeValue("username"); + if (username) + { + log_.debug() << "user = " << *username; + auto password = config_.value("password"); + cass_cluster_set_credentials( + cluster, username->c_str(), password.c_str()); + } + auto threads = + config_.valueOr("threads", std::thread::hardware_concurrency()); + + rc = cass_cluster_set_num_threads_io(cluster, threads); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting Cassandra io threads to " << threads + << ", result: " << rc << ", " << cass_error_desc(rc); + throw std::runtime_error(ss.str()); + } + + maxWriteRequestsOutstanding = config_.valueOr( + "max_write_requests_outstanding", maxWriteRequestsOutstanding); + maxReadRequestsOutstanding = config_.valueOr( + "max_read_requests_outstanding", maxReadRequestsOutstanding); + syncInterval_ = config_.valueOr("sync_interval", syncInterval_); + + log_.info() << "Sync interval is " << syncInterval_ + << ". max write requests outstanding is " + << maxWriteRequestsOutstanding + << ". max read requests outstanding is " + << maxReadRequestsOutstanding; + + cass_cluster_set_request_timeout(cluster, 10000); + + rc = cass_cluster_set_queue_size_io( + cluster, + maxWriteRequestsOutstanding + + maxReadRequestsOutstanding); // This number needs to scale w/ the + // number of request per sec + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting Cassandra max core connections per " + "host" + << ", result: " << rc << ", " << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + + if (auto certfile = config_.maybeValue("certfile"); certfile) + { + std::ifstream fileStream( + boost::filesystem::path(*certfile).string(), std::ios::in); + if (!fileStream) + { + std::stringstream ss; + ss << "opening config file " << *certfile; + throw std::system_error(errno, std::generic_category(), ss.str()); + } + std::string cert( + std::istreambuf_iterator{fileStream}, + std::istreambuf_iterator{}); + if (fileStream.bad()) + { + std::stringstream ss; + ss << "reading config file " << *certfile; + throw std::system_error(errno, std::generic_category(), ss.str()); + } + + CassSsl* context = cass_ssl_new(); + cass_ssl_set_verify_flags(context, CASS_SSL_VERIFY_NONE); + rc = cass_ssl_add_trusted_cert(context, cert.c_str()); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting Cassandra ssl context: " << rc + << ", " << cass_error_desc(rc); + throw std::runtime_error(ss.str()); + } + + cass_cluster_set_ssl(cluster, context); + cass_ssl_free(context); + } + + auto keyspace = config_.valueOr("keyspace", ""); + if (keyspace.empty()) + { + log_.warn() << "No keyspace specified. Using keyspace clio"; + keyspace = "clio"; + } + + auto rf = config_.valueOr("replication_factor", 3); + auto tablePrefix = config_.valueOr("table_prefix", ""); + if (tablePrefix.empty()) + { + log_.warn() << "Table prefix is empty"; + } + + cass_cluster_set_connect_timeout(cluster, 10000); + + auto ttl = ttl_ * 2; + log_.info() << "Setting ttl to " << std::to_string(ttl); + + auto executeSimpleStatement = [this](std::string const& query) { + CassStatement* statement = makeStatement(query.c_str(), 0); + CassFuture* fut = cass_session_execute(session_.get(), statement); + CassError rc = cass_future_error_code(fut); + cass_future_free(fut); + cass_statement_free(statement); + if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY) + { + std::stringstream ss; + ss << "nodestore: Error executing simple statement: " << rc << ", " + << cass_error_desc(rc) << " - " << query; + log_.error() << ss.str(); + return false; + } + return true; + }; + CassFuture* fut; + bool setupSessionAndTable = false; + while (!setupSessionAndTable) + { + std::this_thread::sleep_for(std::chrono::seconds(1)); + session_.reset(cass_session_new()); + assert(session_); + + fut = cass_session_connect_keyspace( + session_.get(), cluster, keyspace.c_str()); + rc = cass_future_error_code(fut); + cass_future_free(fut); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error connecting Cassandra session keyspace: " + << rc << ", " << cass_error_desc(rc) + << ", trying to create it ourselves"; + log_.error() << ss.str(); + // if the keyspace doesn't exist, try to create it + session_.reset(cass_session_new()); + fut = cass_session_connect(session_.get(), cluster); + rc = cass_future_error_code(fut); + cass_future_free(fut); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error connecting Cassandra session at all: " + << rc << ", " << cass_error_desc(rc); + log_.error() << ss.str(); + } + else + { + std::stringstream query; + query << "CREATE KEYSPACE IF NOT EXISTS " << keyspace + << " WITH replication = {'class': 'SimpleStrategy', " + "'replication_factor': '" + << std::to_string(rf) << "'} AND durable_writes = true"; + if (!executeSimpleStatement(query.str())) + continue; + query.str(""); + query << "USE " << keyspace; + if (!executeSimpleStatement(query.str())) + continue; + } + + continue; + } + + std::stringstream query; + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "objects" + << " ( key blob, sequence bigint, object blob, PRIMARY " + "KEY(key, " + "sequence)) WITH CLUSTERING ORDER BY (sequence DESC) AND" + << " default_time_to_live = " << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "objects" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query + << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions" + << " ( hash blob PRIMARY KEY, ledger_sequence bigint, date bigint, " + "transaction blob, metadata blob)" + << " WITH default_time_to_live = " << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix + << "ledger_transactions" + << " ( ledger_sequence bigint, hash blob, PRIMARY " + "KEY(ledger_sequence, hash))" + << " WITH default_time_to_live = " << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "transactions" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + query.str(""); + query << "SELECT * FROM " << tablePrefix << "ledger_transactions" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "successor" + << " (key blob, seq bigint, next blob, PRIMARY KEY (key, seq)) " + " WITH default_time_to_live = " + << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "successor" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "diff" + << " (seq bigint, key blob, PRIMARY KEY (seq, key)) " + " WITH default_time_to_live = " + << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "diff" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "account_tx" + << " ( account blob, seq_idx " + "tuple, " + " hash blob, " + "PRIMARY KEY " + "(account, seq_idx)) WITH " + "CLUSTERING ORDER BY (seq_idx desc)" + << " AND default_time_to_live = " << std::to_string(ttl); + + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "account_tx" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledgers" + << " ( sequence bigint PRIMARY KEY, header blob )" + << " WITH default_time_to_live = " << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "ledgers" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_hashes" + << " (hash blob PRIMARY KEY, sequence bigint)" + << " WITH default_time_to_live = " << std::to_string(ttl); + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "ledger_hashes" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_range" + << " (is_latest boolean PRIMARY KEY, sequence bigint)"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "ledger_range" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "nf_tokens" + << " (" + << " token_id blob," + << " sequence bigint," + << " owner blob," + << " is_burned boolean," + << " PRIMARY KEY (token_id, sequence)" + << " )" + << " WITH CLUSTERING ORDER BY (sequence DESC)" + << " AND default_time_to_live = " << ttl; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "nf_tokens" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix + << "issuer_nf_tokens_v2" + << " (" + << " issuer blob," + << " taxon bigint," + << " token_id blob," + << " PRIMARY KEY (issuer, taxon, token_id)" + << " )"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "issuer_nf_tokens_v2" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "nf_token_uris" + << " (" + << " token_id blob," + << " sequence bigint," + << " uri blob," + << " PRIMARY KEY (token_id, sequence)" + << " )" + << " WITH CLUSTERING ORDER BY (sequence DESC)" + << " AND default_time_to_live = " << ttl; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "nf_token_uris" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix + << "nf_token_transactions" + << " (" + << " token_id blob," + << " seq_idx tuple," + << " hash blob," + << " PRIMARY KEY (token_id, seq_idx)" + << " )" + << " WITH CLUSTERING ORDER BY (seq_idx DESC)" + << " AND default_time_to_live = " << ttl; + if (!executeSimpleStatement(query.str())) + continue; + + query.str(""); + query << "SELECT * FROM " << tablePrefix << "nf_token_transactions" + << " LIMIT 1"; + if (!executeSimpleStatement(query.str())) + continue; + + setupSessionAndTable = true; + } + + cass_cluster_free(cluster); + + bool setupPreparedStatements = false; + while (!setupPreparedStatements) + { + std::this_thread::sleep_for(std::chrono::seconds(1)); + std::stringstream query; + query << "INSERT INTO " << tablePrefix << "objects" + << " (key, sequence, object) VALUES (?, ?, ?)"; + if (!insertObject_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "transactions" + << " (hash, ledger_sequence, date, transaction, metadata) VALUES " + "(?, ?, ?, ?, ?)"; + if (!insertTransaction_.prepareStatement(query, session_.get())) + continue; + query.str(""); + query << "INSERT INTO " << tablePrefix << "ledger_transactions" + << " (ledger_sequence, hash) VALUES " + "(?, ?)"; + if (!insertLedgerTransaction_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "successor" + << " (key,seq,next) VALUES (?, ?, ?)"; + if (!insertSuccessor_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "diff" + << " (seq,key) VALUES (?, ?)"; + if (!insertDiff_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT next FROM " << tablePrefix << "successor" + << " WHERE key = ? AND seq <= ? ORDER BY seq DESC LIMIT 1"; + if (!selectSuccessor_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT key FROM " << tablePrefix << "diff" + << " WHERE seq = ?"; + if (!selectDiff_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT object, sequence FROM " << tablePrefix << "objects" + << " WHERE key = ? AND sequence <= ? ORDER BY sequence DESC " + "LIMIT 1"; + + if (!selectObject_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT transaction, metadata, ledger_sequence, date FROM " + << tablePrefix << "transactions" + << " WHERE hash = ?"; + if (!selectTransaction_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT hash FROM " << tablePrefix << "ledger_transactions" + << " WHERE ledger_sequence = ?"; + if (!selectAllTransactionHashesInLedger_.prepareStatement( + query, session_.get())) + continue; + + query.str(""); + query << "SELECT key FROM " << tablePrefix << "objects " + << " WHERE TOKEN(key) >= ? and sequence <= ? " + << " PER PARTITION LIMIT 1 LIMIT ?" + << " ALLOW FILTERING"; + if (!selectLedgerPageKeys_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT object,key FROM " << tablePrefix << "objects " + << " WHERE TOKEN(key) >= ? and sequence <= ? " + << " PER PARTITION LIMIT 1 LIMIT ? ALLOW FILTERING"; + + if (!selectLedgerPage_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT TOKEN(key) FROM " << tablePrefix << "objects " + << " WHERE key = ? LIMIT 1"; + + if (!getToken_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " INSERT INTO " << tablePrefix << "account_tx" + << " (account, seq_idx, hash) " + << " VALUES (?,?,?)"; + if (!insertAccountTx_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " SELECT hash,seq_idx FROM " << tablePrefix << "account_tx" + << " WHERE account = ? " + << " AND seq_idx < ? LIMIT ?"; + if (!selectAccountTx_.prepareStatement(query, session_.get())) + continue; + query.str(""); + query << " SELECT hash,seq_idx FROM " << tablePrefix << "account_tx" + << " WHERE account = ? " + << " AND seq_idx >= ? ORDER BY seq_idx ASC LIMIT ?"; + if (!selectAccountTxForward_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "nf_tokens" + << " (token_id,sequence,owner,is_burned)" + << " VALUES (?,?,?,?)"; + if (!insertNFT_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT sequence,owner,is_burned" + << " FROM " << tablePrefix << "nf_tokens WHERE" + << " token_id = ? AND" + << " sequence <= ?" + << " ORDER BY sequence DESC" + << " LIMIT 1"; + if (!selectNFT_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "issuer_nf_tokens_v2" + << " (issuer,taxon,token_id)" + << " VALUES (?,?,?)"; + if (!insertIssuerNFT_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "nf_token_uris" + << " (token_id,sequence,uri)" + << " VALUES (?,?,?)"; + if (!insertNFTURI_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT uri FROM " << tablePrefix << "nf_token_uris" + << " WHERE token_id = ? AND" + << " sequence <= ?" + << " ORDER BY sequence DESC" + << " LIMIT 1"; + if (!selectNFTURI_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "INSERT INTO " << tablePrefix << "nf_token_transactions" + << " (token_id,seq_idx,hash)" + << " VALUES (?,?,?)"; + if (!insertNFTTx_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT hash,seq_idx" + << " FROM " << tablePrefix << "nf_token_transactions WHERE" + << " token_id = ? AND" + << " seq_idx < ?" + << " ORDER BY seq_idx DESC" + << " LIMIT ?"; + if (!selectNFTTx_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT hash,seq_idx" + << " FROM " << tablePrefix << "nf_token_transactions WHERE" + << " token_id = ? AND" + << " seq_idx >= ?" + << " ORDER BY seq_idx ASC" + << " LIMIT ?"; + if (!selectNFTTxForward_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " INSERT INTO " << tablePrefix << "ledgers " + << " (sequence, header) VALUES(?,?)"; + if (!insertLedgerHeader_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " INSERT INTO " << tablePrefix << "ledger_hashes" + << " (hash, sequence) VALUES(?,?)"; + if (!insertLedgerHash_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << "SELECT sequence FROM " << tablePrefix << "ledger_hashes " + << "WHERE hash = ? LIMIT 1"; + if (!selectLedgerByHash_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " update " << tablePrefix << "ledger_range" + << " set sequence = ? where is_latest = ? if sequence in " + "(?,null)"; + if (!updateLedgerRange_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " update " << tablePrefix << "ledger_range" + << " set sequence = ? where is_latest = false"; + if (!deleteLedgerRange_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " select header from " << tablePrefix + << "ledgers where sequence = ?"; + if (!selectLedgerBySeq_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " select sequence from " << tablePrefix + << "ledger_range where is_latest = true"; + if (!selectLatestLedger_.prepareStatement(query, session_.get())) + continue; + + query.str(""); + query << " SELECT sequence FROM " << tablePrefix << "ledger_range"; + if (!selectLedgerRange_.prepareStatement(query, session_.get())) + continue; + setupPreparedStatements = true; + } + + open_ = true; + + log_.info() << "Opened CassandraBackend successfully"; +} +} // namespace Backend diff --git a/src/backend/CassandraBackend.h b/src/backend/CassandraBackend.h new file mode 100644 index 00000000..afea125b --- /dev/null +++ b/src/backend/CassandraBackend.h @@ -0,0 +1,1280 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace Backend { + +class CassandraPreparedStatement +{ +private: + clio::Logger log_{"Backend"}; + CassPrepared const* prepared_ = nullptr; + +public: + CassPrepared const* + get() const + { + return prepared_; + } + + bool + prepareStatement(std::stringstream const& query, CassSession* session) + { + return prepareStatement(query.str().c_str(), session); + } + + bool + prepareStatement(std::string const& query, CassSession* session) + { + return prepareStatement(query.c_str(), session); + } + + bool + prepareStatement(char const* query, CassSession* session) + { + if (!query) + throw std::runtime_error("prepareStatement: null query"); + if (!session) + throw std::runtime_error("prepareStatement: null sesssion"); + CassFuture* prepareFuture = cass_session_prepare(session, query); + /* Wait for the statement to prepare and get the result */ + CassError rc = cass_future_error_code(prepareFuture); + if (rc == CASS_OK) + { + prepared_ = cass_future_get_prepared(prepareFuture); + } + else + { + std::stringstream ss; + ss << "nodestore: Error preparing statement : " << rc << ", " + << cass_error_desc(rc) << ". query : " << query; + log_.error() << ss.str(); + } + cass_future_free(prepareFuture); + return rc == CASS_OK; + } + + ~CassandraPreparedStatement() + { + log_.trace() << "called"; + if (prepared_) + { + cass_prepared_free(prepared_); + prepared_ = nullptr; + } + } +}; + +class CassandraStatement +{ + CassStatement* statement_ = nullptr; + size_t curBindingIndex_ = 0; + clio::Logger log_{"Backend"}; + +public: + CassandraStatement(CassandraPreparedStatement const& prepared) + { + statement_ = cass_prepared_bind(prepared.get()); + cass_statement_set_consistency(statement_, CASS_CONSISTENCY_QUORUM); + } + + CassandraStatement(CassandraStatement&& other) + { + statement_ = other.statement_; + other.statement_ = nullptr; + curBindingIndex_ = other.curBindingIndex_; + other.curBindingIndex_ = 0; + } + + CassandraStatement(CassandraStatement const& other) = delete; + + CassStatement* + get() const + { + return statement_; + } + + void + bindNextBoolean(bool val) + { + if (!statement_) + throw std::runtime_error( + "CassandraStatement::bindNextBoolean - statement_ is null"); + CassError rc = cass_statement_bind_bool( + statement_, curBindingIndex_, static_cast(val)); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding boolean to statement: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + curBindingIndex_++; + } + + void + bindNextBytes(const char* data, std::uint32_t const size) + { + bindNextBytes((unsigned const char*)(data), size); + } + + void + bindNextBytes(ripple::uint256 const& data) + { + bindNextBytes(data.data(), data.size()); + } + void + bindNextBytes(std::vector const& data) + { + bindNextBytes(data.data(), data.size()); + } + void + bindNextBytes(ripple::AccountID const& data) + { + bindNextBytes(data.data(), data.size()); + } + + void + bindNextBytes(std::string const& data) + { + bindNextBytes(data.data(), data.size()); + } + + void + bindNextBytes(void const* key, std::uint32_t const size) + { + bindNextBytes(static_cast(key), size); + } + + void + bindNextBytes(const unsigned char* data, std::uint32_t const size) + { + if (!statement_) + throw std::runtime_error( + "CassandraStatement::bindNextBytes - statement_ is null"); + CassError rc = cass_statement_bind_bytes( + statement_, + curBindingIndex_, + static_cast(data), + size); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding bytes to statement: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + curBindingIndex_++; + } + + void + bindNextUInt(std::uint32_t const value) + { + if (!statement_) + throw std::runtime_error( + "CassandraStatement::bindNextUInt - statement_ is null"); + log_.trace() << std::to_string(curBindingIndex_) << " " + << std::to_string(value); + CassError rc = + cass_statement_bind_int32(statement_, curBindingIndex_, value); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding uint to statement: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + curBindingIndex_++; + } + + void + bindNextInt(std::uint32_t const value) + { + bindNextInt(static_cast(value)); + } + + void + bindNextInt(int64_t value) + { + if (!statement_) + throw std::runtime_error( + "CassandraStatement::bindNextInt - statement_ is null"); + CassError rc = + cass_statement_bind_int64(statement_, curBindingIndex_, value); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding int to statement: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + curBindingIndex_++; + } + + void + bindNextIntTuple(std::uint32_t const first, std::uint32_t const second) + { + CassTuple* tuple = cass_tuple_new(2); + CassError rc = cass_tuple_set_int64(tuple, 0, first); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding int to tuple: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + rc = cass_tuple_set_int64(tuple, 1, second); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding int to tuple: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + rc = cass_statement_bind_tuple(statement_, curBindingIndex_, tuple); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Error binding tuple to statement: " << rc << ", " + << cass_error_desc(rc); + log_.error() << ss.str(); + throw std::runtime_error(ss.str()); + } + cass_tuple_free(tuple); + curBindingIndex_++; + } + + ~CassandraStatement() + { + if (statement_) + cass_statement_free(statement_); + } +}; + +class CassandraResult +{ + clio::Logger log_{"Backend"}; + CassResult const* result_ = nullptr; + CassRow const* row_ = nullptr; + CassIterator* iter_ = nullptr; + size_t curGetIndex_ = 0; + +public: + CassandraResult() : result_(nullptr), row_(nullptr), iter_(nullptr) + { + } + + CassandraResult& + operator=(CassandraResult&& other) + { + result_ = other.result_; + row_ = other.row_; + iter_ = other.iter_; + curGetIndex_ = other.curGetIndex_; + other.result_ = nullptr; + other.row_ = nullptr; + other.iter_ = nullptr; + other.curGetIndex_ = 0; + return *this; + } + + CassandraResult(CassandraResult const& other) = delete; + CassandraResult& + operator=(CassandraResult const& other) = delete; + + CassandraResult(CassResult const* result) : result_(result) + { + if (!result_) + throw std::runtime_error("CassandraResult - result is null"); + iter_ = cass_iterator_from_result(result_); + if (cass_iterator_next(iter_)) + { + row_ = cass_iterator_get_row(iter_); + } + } + + bool + isOk() + { + return result_ != nullptr; + } + + bool + hasResult() + { + return row_ != nullptr; + } + + bool + operator!() + { + return !hasResult(); + } + + size_t + numRows() + { + return cass_result_row_count(result_); + } + + bool + nextRow() + { + curGetIndex_ = 0; + if (cass_iterator_next(iter_)) + { + row_ = cass_iterator_get_row(iter_); + return true; + } + row_ = nullptr; + return false; + } + + std::vector + getBytes() + { + if (!row_) + throw std::runtime_error("CassandraResult::getBytes - no result"); + cass_byte_t const* buf; + std::size_t bufSize; + CassError rc = cass_value_get_bytes( + cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); + if (rc != CASS_OK) + { + std::stringstream msg; + msg << "CassandraResult::getBytes - error getting value: " << rc + << ", " << cass_error_desc(rc); + log_.error() << msg.str(); + throw std::runtime_error(msg.str()); + } + curGetIndex_++; + return {buf, buf + bufSize}; + } + + ripple::uint256 + getUInt256() + { + if (!row_) + throw std::runtime_error("CassandraResult::uint256 - no result"); + cass_byte_t const* buf; + std::size_t bufSize; + CassError rc = cass_value_get_bytes( + cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); + if (rc != CASS_OK) + { + std::stringstream msg; + msg << "CassandraResult::getuint256 - error getting value: " << rc + << ", " << cass_error_desc(rc); + log_.error() << msg.str(); + throw std::runtime_error(msg.str()); + } + curGetIndex_++; + return ripple::uint256::fromVoid(buf); + } + + int64_t + getInt64() + { + if (!row_) + throw std::runtime_error("CassandraResult::getInt64 - no result"); + cass_int64_t val; + CassError rc = + cass_value_get_int64(cass_row_get_column(row_, curGetIndex_), &val); + if (rc != CASS_OK) + { + std::stringstream msg; + msg << "CassandraResult::getInt64 - error getting value: " << rc + << ", " << cass_error_desc(rc); + log_.error() << msg.str(); + throw std::runtime_error(msg.str()); + } + ++curGetIndex_; + return val; + } + + std::uint32_t + getUInt32() + { + return static_cast(getInt64()); + } + + std::pair + getInt64Tuple() + { + if (!row_) + throw std::runtime_error( + "CassandraResult::getInt64Tuple - no result"); + + CassValue const* tuple = cass_row_get_column(row_, curGetIndex_); + CassIterator* tupleIter = cass_iterator_from_tuple(tuple); + + if (!cass_iterator_next(tupleIter)) + { + cass_iterator_free(tupleIter); + throw std::runtime_error( + "CassandraResult::getInt64Tuple - failed to iterate tuple"); + } + + CassValue const* value = cass_iterator_get_value(tupleIter); + std::int64_t first; + cass_value_get_int64(value, &first); + if (!cass_iterator_next(tupleIter)) + { + cass_iterator_free(tupleIter); + throw std::runtime_error( + "CassandraResult::getInt64Tuple - failed to iterate tuple"); + } + + value = cass_iterator_get_value(tupleIter); + std::int64_t second; + cass_value_get_int64(value, &second); + cass_iterator_free(tupleIter); + + ++curGetIndex_; + return {first, second}; + } + + std::pair + getBytesTuple() + { + cass_byte_t const* buf; + std::size_t bufSize; + + if (!row_) + throw std::runtime_error( + "CassandraResult::getBytesTuple - no result"); + CassValue const* tuple = cass_row_get_column(row_, curGetIndex_); + CassIterator* tupleIter = cass_iterator_from_tuple(tuple); + if (!cass_iterator_next(tupleIter)) + throw std::runtime_error( + "CassandraResult::getBytesTuple - failed to iterate tuple"); + CassValue const* value = cass_iterator_get_value(tupleIter); + cass_value_get_bytes(value, &buf, &bufSize); + Blob first{buf, buf + bufSize}; + + if (!cass_iterator_next(tupleIter)) + throw std::runtime_error( + "CassandraResult::getBytesTuple - failed to iterate tuple"); + value = cass_iterator_get_value(tupleIter); + cass_value_get_bytes(value, &buf, &bufSize); + Blob second{buf, buf + bufSize}; + ++curGetIndex_; + return {first, second}; + } + + // TODO: should be replaced with a templated implementation as is very + // similar to other getters + bool + getBool() + { + if (!row_) + { + std::string msg{"No result"}; + log_.error() << msg; + throw std::runtime_error(msg); + } + cass_bool_t val; + CassError rc = + cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val); + if (rc != CASS_OK) + { + std::stringstream msg; + msg << "Error getting value: " << rc << ", " << cass_error_desc(rc); + log_.error() << msg.str(); + throw std::runtime_error(msg.str()); + } + ++curGetIndex_; + return val; + } + + ~CassandraResult() + { + if (result_ != nullptr) + cass_result_free(result_); + if (iter_ != nullptr) + cass_iterator_free(iter_); + } +}; + +inline bool +isTimeout(CassError rc) +{ + if (rc == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or + rc == CASS_ERROR_LIB_REQUEST_TIMED_OUT or + rc == CASS_ERROR_SERVER_UNAVAILABLE or + rc == CASS_ERROR_SERVER_OVERLOADED or + rc == CASS_ERROR_SERVER_READ_TIMEOUT) + return true; + return false; +} + +template +CassError +cass_future_error_code(CassFuture* fut, CompletionToken&& token) +{ + using function_type = void(boost::system::error_code, CassError); + using result_type = + boost::asio::async_result; + using handler_type = typename result_type::completion_handler_type; + + handler_type handler(std::forward(token)); + result_type result(handler); + + struct HandlerWrapper + { + handler_type handler; + + HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_)) + { + } + }; + + auto resume = [](CassFuture* fut, void* data) -> void { + HandlerWrapper* hw = (HandlerWrapper*)data; + + boost::asio::post( + boost::asio::get_associated_executor(hw->handler), + [fut, hw, handler = std::move(hw->handler)]() mutable { + delete hw; + + handler( + boost::system::error_code{}, cass_future_error_code(fut)); + }); + }; + + HandlerWrapper* wrapper = new HandlerWrapper(std::move(handler)); + + cass_future_set_callback(fut, resume, wrapper); + + // Suspend the coroutine until completion handler is called. + // The handler will populate rc, the error code describing + // the state of the cassandra future. + auto rc = result.get(); + + return rc; +} + +class CassandraBackend : public BackendInterface +{ +private: + // convenience function for one-off queries. For normal reads and writes, + // use the prepared statements insert_ and select_ + CassStatement* + makeStatement(char const* query, std::size_t params) + { + CassStatement* ret = cass_statement_new(query, params); + CassError rc = + cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "nodestore: Error setting query consistency: " << query + << ", result: " << rc << ", " << cass_error_desc(rc); + throw std::runtime_error(ss.str()); + } + return ret; + } + + clio::Logger log_{"Backend"}; + std::atomic open_{false}; + + std::unique_ptr session_{ + nullptr, + [](CassSession* session) { + // Try to disconnect gracefully. + CassFuture* fut = cass_session_close(session); + cass_future_wait(fut); + cass_future_free(fut); + cass_session_free(session); + }}; + + // Database statements cached server side. Using these is more efficient + // than making a new statement + CassandraPreparedStatement insertObject_; + CassandraPreparedStatement insertTransaction_; + CassandraPreparedStatement insertLedgerTransaction_; + CassandraPreparedStatement selectTransaction_; + CassandraPreparedStatement selectAllTransactionHashesInLedger_; + CassandraPreparedStatement selectObject_; + CassandraPreparedStatement selectLedgerPageKeys_; + CassandraPreparedStatement selectLedgerPage_; + CassandraPreparedStatement upperBound2_; + CassandraPreparedStatement getToken_; + CassandraPreparedStatement insertSuccessor_; + CassandraPreparedStatement selectSuccessor_; + CassandraPreparedStatement insertDiff_; + CassandraPreparedStatement selectDiff_; + CassandraPreparedStatement insertAccountTx_; + CassandraPreparedStatement selectAccountTx_; + CassandraPreparedStatement selectAccountTxForward_; + CassandraPreparedStatement insertNFT_; + CassandraPreparedStatement selectNFT_; + CassandraPreparedStatement insertIssuerNFT_; + CassandraPreparedStatement insertNFTURI_; + CassandraPreparedStatement selectNFTURI_; + CassandraPreparedStatement insertNFTTx_; + CassandraPreparedStatement selectNFTTx_; + CassandraPreparedStatement selectNFTTxForward_; + CassandraPreparedStatement insertLedgerHeader_; + CassandraPreparedStatement insertLedgerHash_; + CassandraPreparedStatement updateLedgerRange_; + CassandraPreparedStatement deleteLedgerRange_; + CassandraPreparedStatement updateLedgerHeader_; + CassandraPreparedStatement selectLedgerBySeq_; + CassandraPreparedStatement selectLedgerByHash_; + CassandraPreparedStatement selectLatestLedger_; + CassandraPreparedStatement selectLedgerRange_; + + uint32_t syncInterval_ = 1; + uint32_t lastSync_ = 0; + + // maximum number of concurrent in flight write requests. New requests will + // wait for earlier requests to finish if this limit is exceeded + std::uint32_t maxWriteRequestsOutstanding = 10000; + mutable std::atomic_uint32_t numWriteRequestsOutstanding_ = 0; + + // maximum number of concurrent in flight read requests. isTooBusy() will + // return true if the number of in flight read requests exceeds this limit + std::uint32_t maxReadRequestsOutstanding = 100000; + mutable std::atomic_uint32_t numReadRequestsOutstanding_ = 0; + + // mutex and condition_variable to limit the number of concurrent in flight + // write requests + mutable std::mutex throttleMutex_; + mutable std::condition_variable throttleCv_; + + // writes are asynchronous. This mutex and condition_variable is used to + // wait for all writes to finish + mutable std::mutex syncMutex_; + mutable std::condition_variable syncCv_; + + // io_context for read/write retries + mutable boost::asio::io_context ioContext_; + std::optional work_; + std::thread ioThread_; + + clio::Config config_; + uint32_t ttl_ = 0; + + mutable std::uint32_t ledgerSequence_ = 0; + +public: + CassandraBackend( + boost::asio::io_context& ioc, + clio::Config const& config, + uint32_t ttl) + : BackendInterface(config), config_(config), ttl_(ttl) + { + work_.emplace(ioContext_); + ioThread_ = std::thread([this]() { ioContext_.run(); }); + } + + ~CassandraBackend() override + { + work_.reset(); + ioThread_.join(); + + if (open_) + close(); + } + + boost::asio::io_context& + getIOContext() const + { + return ioContext_; + } + + bool + isOpen() + { + return open_; + } + + // Setup all of the necessary components for talking to the database. + // Create the table if it doesn't exist already + // @param createIfMissing ignored + void + open(bool readOnly) override; + + // Close the connection to the database + void + close() override + { + open_ = false; + } + + TransactionsAndCursor + fetchAccountTransactions( + ripple::AccountID const& account, + std::uint32_t const limit, + bool forward, + std::optional const& cursor, + boost::asio::yield_context& yield) const override; + + bool + doFinishWritesSync() + { + assert(syncInterval_ == 1); + // wait for all other writes to finish + sync(); + // write range + if (!range) + { + CassandraStatement statement{updateLedgerRange_}; + statement.bindNextInt(ledgerSequence_); + statement.bindNextBoolean(false); + statement.bindNextInt(ledgerSequence_); + executeSyncWrite(statement); + } + CassandraStatement statement{updateLedgerRange_}; + statement.bindNextInt(ledgerSequence_); + statement.bindNextBoolean(true); + statement.bindNextInt(ledgerSequence_ - 1); + if (!executeSyncUpdate(statement)) + { + log_.warn() << "Update failed for ledger " + << std::to_string(ledgerSequence_) << ". Returning"; + return false; + } + log_.info() << "Committed ledger " << std::to_string(ledgerSequence_); + return true; + } + + bool + doFinishWritesAsync() + { + assert(syncInterval_ != 1); + // if db is empty, sync. if sync interval is 1, always sync. + // if we've never synced, sync. if its been greater than the configured + // sync interval since we last synced, sync. + if (!range || lastSync_ == 0 || + ledgerSequence_ - syncInterval_ >= lastSync_) + { + // wait for all other writes to finish + sync(); + // write range + if (!range) + { + CassandraStatement statement{updateLedgerRange_}; + statement.bindNextInt(ledgerSequence_); + statement.bindNextBoolean(false); + statement.bindNextInt(ledgerSequence_); + executeSyncWrite(statement); + } + CassandraStatement statement{updateLedgerRange_}; + statement.bindNextInt(ledgerSequence_); + statement.bindNextBoolean(true); + if (lastSync_ == 0) + statement.bindNextInt(ledgerSequence_ - 1); + else + statement.bindNextInt(lastSync_); + if (!executeSyncUpdate(statement)) + { + log_.warn() << "Update failed for ledger " + << std::to_string(ledgerSequence_) << ". Returning"; + return false; + } + log_.info() << "Committed ledger " + << std::to_string(ledgerSequence_); + lastSync_ = ledgerSequence_; + } + else + { + log_.info() << "Skipping commit. sync interval is " + << std::to_string(syncInterval_) << " - last sync is " + << std::to_string(lastSync_) << " - ledger sequence is " + << std::to_string(ledgerSequence_); + } + return true; + } + + bool + doFinishWrites() override + { + if (syncInterval_ == 1) + return doFinishWritesSync(); + else + return doFinishWritesAsync(); + } + void + writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) + override; + + std::optional + fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override + { + log_.trace() << "called"; + CassandraStatement statement{selectLatestLedger_}; + CassandraResult result = executeAsyncRead(statement, yield); + if (!result.hasResult()) + { + log_.error() + << "CassandraBackend::fetchLatestLedgerSequence - no rows"; + return {}; + } + return result.getUInt32(); + } + + std::optional + fetchLedgerBySequence( + std::uint32_t const sequence, + boost::asio::yield_context& yield) const override + { + log_.trace() << "called"; + CassandraStatement statement{selectLedgerBySeq_}; + statement.bindNextInt(sequence); + CassandraResult result = executeAsyncRead(statement, yield); + if (!result) + { + log_.error() << "No rows"; + return {}; + } + std::vector header = result.getBytes(); + return deserializeHeader(ripple::makeSlice(header)); + } + + std::optional + fetchLedgerByHash( + ripple::uint256 const& hash, + boost::asio::yield_context& yield) const override + { + CassandraStatement statement{selectLedgerByHash_}; + + statement.bindNextBytes(hash); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result.hasResult()) + { + log_.debug() << "No rows returned"; + return {}; + } + + std::uint32_t const sequence = result.getInt64(); + + return fetchLedgerBySequence(sequence, yield); + } + + std::optional + hardFetchLedgerRange(boost::asio::yield_context& yield) const override; + + std::vector + fetchAllTransactionsInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const override; + + std::vector + fetchAllTransactionHashesInLedger( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const override; + + std::optional + fetchNFT( + ripple::uint256 const& tokenID, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const override; + + TransactionsAndCursor + fetchNFTTransactions( + ripple::uint256 const& tokenID, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) const override; + + // Synchronously fetch the object with key key, as of ledger with sequence + // sequence + std::optional + doFetchLedgerObject( + ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const override; + + std::optional + getToken(void const* key, boost::asio::yield_context& yield) const + { + log_.trace() << "Fetching from cassandra"; + CassandraStatement statement{getToken_}; + statement.bindNextBytes(key, 32); + + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result) + { + log_.error() << "No rows"; + return {}; + } + int64_t token = result.getInt64(); + if (token == INT64_MAX) + return {}; + else + return token + 1; + } + + std::optional + fetchTransaction( + ripple::uint256 const& hash, + boost::asio::yield_context& yield) const override + { + log_.trace() << "called"; + CassandraStatement statement{selectTransaction_}; + statement.bindNextBytes(hash); + CassandraResult result = executeAsyncRead(statement, yield); + + if (!result) + { + log_.error() << "No rows"; + return {}; + } + return { + {result.getBytes(), + result.getBytes(), + result.getUInt32(), + result.getUInt32()}}; + } + + std::optional + doFetchSuccessorKey( + ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const override; + + std::vector + fetchTransactions( + std::vector const& hashes, + boost::asio::yield_context& yield) const override; + + std::vector + doFetchLedgerObjects( + std::vector const& keys, + std::uint32_t const sequence, + boost::asio::yield_context& yield) const override; + + std::vector + fetchLedgerDiff( + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) const override; + + void + doWriteLedgerObject( + std::string&& key, + std::uint32_t const seq, + std::string&& blob) override; + + void + writeSuccessor( + std::string&& key, + std::uint32_t const seq, + std::string&& successor) override; + + void + writeAccountTransactions( + std::vector&& data) override; + + void + writeNFTTransactions(std::vector&& data) override; + + void + writeTransaction( + std::string&& hash, + std::uint32_t const seq, + std::uint32_t const date, + std::string&& transaction, + std::string&& metadata) override; + + void + writeNFTs(std::vector&& data) override; + + void + startWrites() const override + { + } + + void + sync() const + { + std::unique_lock lck(syncMutex_); + + syncCv_.wait(lck, [this]() { return finishedAllRequests(); }); + } + + bool + doOnlineDelete( + std::uint32_t const numLedgersToKeep, + boost::asio::yield_context& yield) const override; + + bool + isTooBusy() const override; + + inline void + incrementOutstandingRequestCount() const + { + { + std::unique_lock lck(throttleMutex_); + if (!canAddRequest()) + { + log_.debug() << "Max outstanding requests reached. " + << "Waiting for other requests to finish"; + throttleCv_.wait(lck, [this]() { return canAddRequest(); }); + } + } + ++numWriteRequestsOutstanding_; + } + + inline void + decrementOutstandingRequestCount() const + { + // sanity check + if (numWriteRequestsOutstanding_ == 0) + { + assert(false); + throw std::runtime_error("decrementing num outstanding below 0"); + } + size_t cur = (--numWriteRequestsOutstanding_); + { + // mutex lock required to prevent race condition around spurious + // wakeup + std::lock_guard lck(throttleMutex_); + throttleCv_.notify_one(); + } + if (cur == 0) + { + // mutex lock required to prevent race condition around spurious + // wakeup + std::lock_guard lck(syncMutex_); + syncCv_.notify_one(); + } + } + + inline bool + canAddRequest() const + { + return numWriteRequestsOutstanding_ < maxWriteRequestsOutstanding; + } + + inline bool + finishedAllRequests() const + { + return numWriteRequestsOutstanding_ == 0; + } + + void + finishAsyncWrite() const + { + decrementOutstandingRequestCount(); + } + + template + void + executeAsyncHelper( + CassandraStatement const& statement, + T callback, + S& callbackData) const + { + CassFuture* fut = cass_session_execute(session_.get(), statement.get()); + + cass_future_set_callback( + fut, callback, static_cast(&callbackData)); + + cass_future_free(fut); + } + + template + void + executeAsyncWrite( + CassandraStatement const& statement, + T callback, + S& callbackData, + bool isRetry) const + { + if (!isRetry) + incrementOutstandingRequestCount(); + executeAsyncHelper(statement, callback, callbackData); + } + + template + void + executeAsyncRead( + CassandraStatement const& statement, + T callback, + S& callbackData) const + { + executeAsyncHelper(statement, callback, callbackData); + } + + void + executeSyncWrite(CassandraStatement const& statement) const + { + CassFuture* fut; + CassError rc; + do + { + fut = cass_session_execute(session_.get(), statement.get()); + rc = cass_future_error_code(fut); + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Cassandra sync write error"; + ss << ", retrying"; + ss << ": " << cass_error_desc(rc); + log_.warn() << ss.str(); + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } + } while (rc != CASS_OK); + cass_future_free(fut); + } + + bool + executeSyncUpdate(CassandraStatement const& statement) const + { + bool timedOut = false; + CassFuture* fut; + CassError rc; + do + { + fut = cass_session_execute(session_.get(), statement.get()); + rc = cass_future_error_code(fut); + if (rc != CASS_OK) + { + timedOut = true; + std::stringstream ss; + ss << "Cassandra sync update error"; + ss << ", retrying"; + ss << ": " << cass_error_desc(rc); + log_.warn() << ss.str(); + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } + } while (rc != CASS_OK); + CassResult const* res = cass_future_get_result(fut); + cass_future_free(fut); + + CassRow const* row = cass_result_first_row(res); + if (!row) + { + log_.error() << "executeSyncUpdate - no rows"; + cass_result_free(res); + return false; + } + cass_bool_t success; + rc = cass_value_get_bool(cass_row_get_column(row, 0), &success); + if (rc != CASS_OK) + { + cass_result_free(res); + log_.error() << "executeSyncUpdate - error getting result " << rc + << ", " << cass_error_desc(rc); + return false; + } + cass_result_free(res); + if (success != cass_true && timedOut) + { + log_.warn() << "Update failed, but timedOut is true"; + // if there was a timeout, the update may have succeeded in the + // background on the first attempt. To determine if this happened, + // we query the range from the db, making sure the range is what + // we wrote. There's a possibility that another writer actually + // applied the update, but there is no way to detect if that + // happened. So, we just return true as long as what we tried to + // write was what ended up being written. + auto rng = hardFetchLedgerRangeNoThrow(); + return rng && rng->maxSequence == ledgerSequence_; + } + return success == cass_true; + } + + CassandraResult + executeAsyncRead( + CassandraStatement const& statement, + boost::asio::yield_context& yield) const + { + using result = boost::asio::async_result< + boost::asio::yield_context, + void(boost::system::error_code, CassError)>; + + CassFuture* fut; + CassError rc; + do + { + ++numReadRequestsOutstanding_; + fut = cass_session_execute(session_.get(), statement.get()); + + boost::system::error_code ec; + rc = cass_future_error_code(fut, yield[ec]); + --numReadRequestsOutstanding_; + + if (ec) + { + log_.error() << "Cannot read async cass_future_error_code"; + } + if (rc != CASS_OK) + { + std::stringstream ss; + ss << "Cassandra executeAsyncRead error"; + ss << ": " << cass_error_desc(rc); + log_.error() << ss.str(); + } + if (isTimeout(rc)) + { + cass_future_free(fut); + throw DatabaseTimeout(); + } + + if (rc == CASS_ERROR_SERVER_INVALID_QUERY) + { + throw std::runtime_error("invalid query"); + } + } while (rc != CASS_OK); + + // The future should have returned at the earlier cass_future_error_code + // so we can use the sync version of this function. + CassResult const* res = cass_future_get_result(fut); + cass_future_free(fut); + return {res}; + } +}; + +} // namespace Backend diff --git a/src/backend/DBHelpers.h b/src/backend/DBHelpers.h new file mode 100644 index 00000000..6dbcfb7e --- /dev/null +++ b/src/backend/DBHelpers.h @@ -0,0 +1,249 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include + +/// Struct used to keep track of what to write to +/// account_transactions/account_tx tables +struct AccountTransactionsData +{ + boost::container::flat_set accounts; + std::uint32_t ledgerSequence; + std::uint32_t transactionIndex; + ripple::uint256 txHash; + + AccountTransactionsData( + ripple::TxMeta& meta, + ripple::uint256 const& txHash, + beast::Journal& j) + : accounts(meta.getAffectedAccounts()) + , ledgerSequence(meta.getLgrSeq()) + , transactionIndex(meta.getIndex()) + , txHash(txHash) + { + } + + AccountTransactionsData() = default; +}; + +/// Represents a link from a tx to an NFT that was targeted/modified/created +/// by it. Gets written to nf_token_transactions table and the like. +struct NFTTransactionsData +{ + ripple::uint256 tokenID; + std::uint32_t ledgerSequence; + std::uint32_t transactionIndex; + ripple::uint256 txHash; + + NFTTransactionsData( + ripple::uint256 const& tokenID, + ripple::TxMeta const& meta, + ripple::uint256 const& txHash) + : tokenID(tokenID) + , ledgerSequence(meta.getLgrSeq()) + , transactionIndex(meta.getIndex()) + , txHash(txHash) + { + } +}; + +/// Represents an NFT state at a particular ledger. Gets written to nf_tokens +/// table and the like. +struct NFTsData +{ + ripple::uint256 tokenID; + std::uint32_t ledgerSequence; + + // The transaction index is only stored because we want to store only the + // final state of an NFT per ledger. Since we pull this from transactions + // we keep track of which tx index created this so we can de-duplicate, as + // it is possible for one ledger to have multiple txs that change the + // state of the same NFT. This field is not applicable when we are loading + // initial NFT state via ledger objects, since we do not have to tiebreak + // NFT state for a given ledger in that case. + std::optional transactionIndex; + ripple::AccountID owner; + // We only set the uri if this is a mint tx, or if we are + // loading initial state from NFTokenPage objects. In other words, + // uri should only be set if the etl process believes this NFT hasn't + // been seen before in our local database. We do this so that we don't + // write to the the nf_token_uris table every + // time the same NFT changes hands. We also can infer if there is a URI + // that we need to write to the issuer_nf_tokens table. + std::optional uri; + bool isBurned = false; + + // This constructor is used when parsing an NFTokenMint tx. + // Unfortunately because of the extreme edge case of being able to + // re-mint an NFT with the same ID, we must explicitly record a null + // URI. For this reason, we _always_ write this field as a result of + // this tx. + NFTsData( + ripple::uint256 const& tokenID, + ripple::AccountID const& owner, + ripple::Blob const& uri, + ripple::TxMeta const& meta) + : tokenID(tokenID) + , ledgerSequence(meta.getLgrSeq()) + , transactionIndex(meta.getIndex()) + , owner(owner) + , uri(uri) + { + } + + // This constructor is used when parsing an NFTokenBurn or + // NFTokenAcceptOffer tx + NFTsData( + ripple::uint256 const& tokenID, + ripple::AccountID const& owner, + ripple::TxMeta const& meta, + bool isBurned) + : tokenID(tokenID) + , ledgerSequence(meta.getLgrSeq()) + , transactionIndex(meta.getIndex()) + , owner(owner) + , isBurned(isBurned) + { + } + + // This constructor is used when parsing an NFTokenPage directly from + // ledger state. + // Unfortunately because of the extreme edge case of being able to + // re-mint an NFT with the same ID, we must explicitly record a null + // URI. For this reason, we _always_ write this field as a result of + // this tx. + NFTsData( + ripple::uint256 const& tokenID, + std::uint32_t const ledgerSequence, + ripple::AccountID const& owner, + ripple::Blob const& uri) + : tokenID(tokenID) + , ledgerSequence(ledgerSequence) + , owner(owner) + , uri(uri) + { + } +}; + +template +inline bool +isOffer(T const& object) +{ + short offer_bytes = (object[1] << 8) | object[2]; + return offer_bytes == 0x006f; +} + +template +inline bool +isOfferHex(T const& object) +{ + auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4); + if (blob) + { + short offer_bytes = ((*blob)[1] << 8) | (*blob)[2]; + return offer_bytes == 0x006f; + } + return false; +} + +template +inline bool +isDirNode(T const& object) +{ + short spaceKey = (object.data()[1] << 8) | object.data()[2]; + return spaceKey == 0x0064; +} + +template +inline bool +isBookDir(T const& key, R const& object) +{ + if (!isDirNode(object)) + return false; + + ripple::STLedgerEntry const sle{ + ripple::SerialIter{object.data(), object.size()}, key}; + return !sle[~ripple::sfOwner].has_value(); +} + +template +inline ripple::uint256 +getBook(T const& offer) +{ + ripple::SerialIter it{offer.data(), offer.size()}; + ripple::SLE sle{it, {}}; + ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory); + return book; +} + +template +inline ripple::uint256 +getBookBase(T const& key) +{ + assert(key.size() == ripple::uint256::size()); + ripple::uint256 ret; + for (size_t i = 0; i < 24; ++i) + { + ret.data()[i] = key.data()[i]; + } + return ret; +} + +inline ripple::LedgerInfo +deserializeHeader(ripple::Slice data) +{ + ripple::SerialIter sit(data.data(), data.size()); + + ripple::LedgerInfo info; + + info.seq = sit.get32(); + info.drops = sit.get64(); + info.parentHash = sit.get256(); + info.txHash = sit.get256(); + info.accountHash = sit.get256(); + info.parentCloseTime = + ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; + info.closeTime = + ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; + info.closeTimeResolution = ripple::NetClock::duration{sit.get8()}; + info.closeFlags = sit.get8(); + + info.hash = sit.get256(); + + return info; +} + +inline std::string +uint256ToString(ripple::uint256 const& uint) +{ + return {reinterpret_cast(uint.data()), uint.size()}; +} + +static constexpr std::uint32_t rippleEpochStart = 946684800; diff --git a/src/backend/Errors.h b/src/backend/Errors.h new file mode 100644 index 00000000..271b0456 --- /dev/null +++ b/src/backend/Errors.h @@ -0,0 +1,38 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +namespace Backend { + +class UnexpectedDataError : public std::exception +{ + std::string msg; + +public: + explicit UnexpectedDataError(std::string const& msg) : msg(msg) + { + } + + const char* + what() const throw() override + { + return msg.c_str(); + } +}; + +} // namespace Backend diff --git a/src/backend/README.md b/src/backend/README.md new file mode 100644 index 00000000..6be3d486 --- /dev/null +++ b/src/backend/README.md @@ -0,0 +1,220 @@ +# Clio Backend +## Background +The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file. + +## Data Model +The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes. + +Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations. + +There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type. + +**Keywords** +*Sequence*: A unique incrementing identification number used to label the different ledger versions. +*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects. +*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data). +*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful. +*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction. +*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object. + +## Cassandra Implementation +Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration. + +In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below. + +*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.* + + +### `ledger_transactions` +``` +CREATE TABLE clio.ledger_transactions ( + ledger_sequence bigint, # The sequence number of the ledger version + hash blob, # Hash of all the transactions on this ledger version + PRIMARY KEY (ledger_sequence, hash) +) WITH CLUSTERING ORDER BY (hash ASC) ... + ``` +This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order. + +### `transactions` +``` +CREATE TABLE clio.transactions ( + hash blob PRIMARY KEY, # The transaction hash + date bigint, # Date of the transaction + ledger_sequence bigint, # The sequence that the transaction was validated + metadata blob, # Metadata of the transaction + transaction blob # Data of the transaction +) ... + ``` +This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key. + +To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data. + +### `ledger_hashes` +``` +CREATE TABLE clio.ledger_hashes ( + hash blob PRIMARY KEY, # Hash of entire ledger version's data + sequence bigint # The sequence of the ledger version +) ... + ``` +This table stores the hash of all ledger versions by their sequences. +### `ledger_range` +``` +CREATE TABLE clio.ledger_range ( + is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range + sequence bigint # The sequence number of the starting/stopping range +) ... + ``` +This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range. + +### `objects` +``` +CREATE TABLE clio.objects ( + key blob, # Object index of the object + sequence bigint, # The sequence this object was last updated + object blob, # Data of the object + PRIMARY KEY (key, sequence) +) WITH CLUSTERING ORDER BY (sequence DESC) ... + ``` +This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top. + +This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written. + +### `ledgers` +``` +CREATE TABLE clio.ledgers ( + sequence bigint PRIMARY KEY, # Sequence of the ledger version + header blob # Data of the header +) ... + ``` +This table stores the ledger header data of specific ledger versions by their sequence. + +### `diff` +``` +CREATE TABLE clio.diff ( + seq bigint, # Sequence of the ledger version + key blob, # Hash of changes in the ledger version + PRIMARY KEY (seq, key) +) WITH CLUSTERING ORDER BY (key ASC) ... + ``` +This table stores the object index of all the changes in each ledger version. + +### `account_tx` +``` +CREATE TABLE clio.account_tx ( + account blob, + seq_idx frozen>, # Tuple of (ledger_index, transaction_index) + hash blob, # Hash of the transaction + PRIMARY KEY (account, seq_idx) +) WITH CLUSTERING ORDER BY (seq_idx DESC) ... + ``` +This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received. + + +### `successor` +``` +CREATE TABLE clio.successor ( + key blob, # Object index + seq bigint, # The sequnce that this ledger object's predecessor and successor was updated + next blob, # Index of the next object that existed in this sequence + PRIMARY KEY (key, seq) +) WITH CLUSTERING ORDER BY (seq ASC) ... + ``` +This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`. + +![Successor Table Trace Diagram](https://raw.githubusercontent.com/Shoukozumi/clio/9b2ea3efb6b164b02e9a5f0ef6717065a70f078c/src/backend/README.png) +*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace* + +In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps: + 1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`. + 2. If `v` is... + 1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation). + 2. Being **modified**, do nothing. + 3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation). + +### NFT data model +In `rippled` NFTs are stored in NFTokenPage ledger objects. This object is +implemented to save ledger space and has the property that it gives us O(1) +lookup time for an NFT, assuming we know who owns the NFT at a particular +ledger. However, if we do not know who owns the NFT at a specific ledger +height we have no alternative in rippled other than scanning the entire +ledger. Because of this tradeoff, clio implements a special NFT indexing data +structure that allows clio users to query NFTs quickly, while keeping +rippled's space-saving optimizations. + +#### `nf_tokens` +``` +CREATE TABLE clio.nf_tokens ( + token_id blob, # The NFT's ID + sequence bigint, # Sequence of ledger version + owner blob, # The account ID of the owner of this NFT at this ledger + is_burned boolean, # True if token was burned in this ledger + PRIMARY KEY (token_id, sequence) +) WITH CLUSTERING ORDER BY (sequence DESC) ... +``` +This table indexes NFT IDs with their owner at a given ledger. So +``` +SELECT * FROM nf_tokens +WHERE token_id = N AND seq <= Y +ORDER BY seq DESC LIMIT 1; +``` +will give you the owner of token N at ledger Y and whether it was burned. If +the token is burned, the owner field indicates the account that owned the +token at the time it was burned; it does not indicate the person who burned +the token, necessarily. If you need to determine who burned the token you can +use the `nft_history` API, which will give you the NFTokenBurn transaction +that burned this token, along with the account that submitted that +transaction. + +#### `issuer_nf_tokens_v2` +``` +CREATE TABLE clio.issuer_nf_tokens_v2 ( + issuer blob, # The NFT issuer's account ID + taxon bigint, # The NFT's token taxon + token_id blob, # The NFT's ID + PRIMARY KEY (issuer, taxon, token_id) +) +``` +This table indexes token IDs against their issuer and issuer/taxon +combination. This is useful for determining all the NFTs a specific account +issued, or all the NFTs a specific account issued with a specific taxon. It is +not useful to know all the NFTs with a given taxon while excluding issuer, since the +meaning of a taxon is left to an issuer. + +#### `nf_token_uris` +``` +CREATE TABLE clio.nf_token_uris ( + token_id blob, # The NFT's ID + sequence bigint, # Sequence of ledger version + uri blob, # The NFT's URI + PRIMARY KEY (token_id, sequence) +) WITH CLUSTERING ORDER BY (sequence DESC) ... +``` +This table is used to store an NFT's URI. Without storing this here, we would +need to traverse the NFT owner's entire set of NFTs to find the URI, again due +to the way that NFTs are stored in rippled. Furthermore, instead of storing +this in the `nf_tokens` table, we store it here to save space. A given NFT +will have only one entry in this table (see caveat below), written to this +table as soon as clio sees the NFTokenMint transaction, or when clio loads an +NFTokenPage from the initial ledger it downloaded. However, the `nf_tokens` +table is written to every time an NFT changes ownership, or if it is burned. + +Given this, why do we have to store the sequence? Unfortunately there is an +extreme edge case where a given NFT ID can be burned, and then re-minted with +a different URI. This is extremely unlikely, and might be fixed in a future +version to rippled, but just in case we can handle that edge case by allowing +a given NFT ID to have a new URI assigned in this case, without removing the +prior URI. + +#### `nf_token_transactions` +``` +CREATE TABLE clio.nf_token_transactions ( + token_id blob, # The NFT's ID + seq_idx tuple, # Tuple of (ledger_index, transaction_index) + hash blob, # Hash of the transaction + PRIMARY KEY (token_id, seq_idx) +) WITH CLUSTERING ORDER BY (seq_idx DESC) ... +``` +This table is the NFT equivalent of `account_tx`. It's motivated by the exact +same reasons and serves the analogous purpose here. It drives the +`nft_history` API. + diff --git a/src/backend/SimpleCache.cpp b/src/backend/SimpleCache.cpp new file mode 100644 index 00000000..6284f460 --- /dev/null +++ b/src/backend/SimpleCache.cpp @@ -0,0 +1,157 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +namespace Backend { + +uint32_t +SimpleCache::latestLedgerSequence() const +{ + std::shared_lock lck{mtx_}; + return latestSeq_; +} + +void +SimpleCache::update( + std::vector const& objs, + uint32_t seq, + bool isBackground) +{ + if (disabled_) + return; + + { + std::scoped_lock lck{mtx_}; + if (seq > latestSeq_) + { + assert(seq == latestSeq_ + 1 || latestSeq_ == 0); + latestSeq_ = seq; + } + for (auto const& obj : objs) + { + if (obj.blob.size()) + { + if (isBackground && deletes_.count(obj.key)) + continue; + + auto& e = map_[obj.key]; + if (seq > e.seq) + { + e = {seq, obj.blob}; + } + } + else + { + map_.erase(obj.key); + if (!full_ && !isBackground) + deletes_.insert(obj.key); + } + } + } +} + +std::optional +SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const +{ + if (!full_) + return {}; + std::shared_lock{mtx_}; + successorReqCounter_++; + if (seq != latestSeq_) + return {}; + auto e = map_.upper_bound(key); + if (e == map_.end()) + return {}; + successorHitCounter_++; + return {{e->first, e->second.blob}}; +} + +std::optional +SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const +{ + if (!full_) + return {}; + std::shared_lock lck{mtx_}; + if (seq != latestSeq_) + return {}; + auto e = map_.lower_bound(key); + if (e == map_.begin()) + return {}; + --e; + return {{e->first, e->second.blob}}; +} +std::optional +SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const +{ + if (seq > latestSeq_) + return {}; + std::shared_lock lck{mtx_}; + objectReqCounter_++; + auto e = map_.find(key); + if (e == map_.end()) + return {}; + if (seq < e->second.seq) + return {}; + objectHitCounter_++; + return {e->second.blob}; +} + +void +SimpleCache::setDisabled() +{ + disabled_ = true; +} + +void +SimpleCache::setFull() +{ + if (disabled_) + return; + + full_ = true; + std::scoped_lock lck{mtx_}; + deletes_.clear(); +} + +bool +SimpleCache::isFull() const +{ + return full_; +} +size_t +SimpleCache::size() const +{ + std::shared_lock lck{mtx_}; + return map_.size(); +} +float +SimpleCache::getObjectHitRate() const +{ + if (!objectReqCounter_) + return 1; + return ((float)objectHitCounter_) / objectReqCounter_; +} +float +SimpleCache::getSuccessorHitRate() const +{ + if (!successorReqCounter_) + return 1; + return ((float)successorHitCounter_) / successorReqCounter_; +} +} // namespace Backend diff --git a/src/backend/SimpleCache.h b/src/backend/SimpleCache.h new file mode 100644 index 00000000..85d2ab88 --- /dev/null +++ b/src/backend/SimpleCache.h @@ -0,0 +1,98 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +namespace Backend { +class SimpleCache +{ + struct CacheEntry + { + uint32_t seq = 0; + Blob blob; + }; + + // counters for fetchLedgerObject(s) hit rate + mutable std::atomic_uint32_t objectReqCounter_; + mutable std::atomic_uint32_t objectHitCounter_; + // counters for fetchSuccessorKey hit rate + mutable std::atomic_uint32_t successorReqCounter_; + mutable std::atomic_uint32_t successorHitCounter_; + + std::map map_; + mutable std::shared_mutex mtx_; + uint32_t latestSeq_ = 0; + std::atomic_bool full_ = false; + std::atomic_bool disabled_ = false; + // temporary set to prevent background thread from writing already deleted + // data. not used when cache is full + std::unordered_set> deletes_; + +public: + // Update the cache with new ledger objects + // set isBackground to true when writing old data from a background thread + void + update( + std::vector const& blobs, + uint32_t seq, + bool isBackground = false); + + std::optional + get(ripple::uint256 const& key, uint32_t seq) const; + + // always returns empty optional if isFull() is false + std::optional + getSuccessor(ripple::uint256 const& key, uint32_t seq) const; + + // always returns empty optional if isFull() is false + std::optional + getPredecessor(ripple::uint256 const& key, uint32_t seq) const; + + void + setDisabled(); + + void + setFull(); + + uint32_t + latestLedgerSequence() const; + + // whether the cache has all data for the most recent ledger + bool + isFull() const; + + size_t + size() const; + + float + getObjectHitRate() const; + + float + getSuccessorHitRate() const; +}; + +} // namespace Backend diff --git a/src/backend/Types.h b/src/backend/Types.h new file mode 100644 index 00000000..d83ec1ef --- /dev/null +++ b/src/backend/Types.h @@ -0,0 +1,111 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +namespace Backend { + +// *** return types + +using Blob = std::vector; + +struct LedgerObject +{ + ripple::uint256 key; + Blob blob; + bool + operator==(const LedgerObject& other) const + { + return key == other.key && blob == other.blob; + } +}; + +struct LedgerPage +{ + std::vector objects; + std::optional cursor; +}; +struct BookOffersPage +{ + std::vector offers; + std::optional cursor; +}; +struct TransactionAndMetadata +{ + Blob transaction; + Blob metadata; + std::uint32_t ledgerSequence; + std::uint32_t date; + bool + operator==(const TransactionAndMetadata& other) const + { + return transaction == other.transaction && metadata == other.metadata && + ledgerSequence == other.ledgerSequence && date == other.date; + } +}; + +struct TransactionsCursor +{ + std::uint32_t ledgerSequence; + std::uint32_t transactionIndex; +}; + +struct TransactionsAndCursor +{ + std::vector txns; + std::optional cursor; +}; + +struct NFT +{ + ripple::uint256 tokenID; + std::uint32_t ledgerSequence; + ripple::AccountID owner; + Blob uri; + bool isBurned; + + // clearly two tokens are the same if they have the same ID, but this + // struct stores the state of a given token at a given ledger sequence, so + // we also need to compare with ledgerSequence + bool + operator==(NFT const& other) const + { + return tokenID == other.tokenID && + ledgerSequence == other.ledgerSequence; + } +}; + +struct LedgerRange +{ + std::uint32_t minSequence; + std::uint32_t maxSequence; +}; +constexpr ripple::uint256 firstKey{ + "0000000000000000000000000000000000000000000000000000000000000000"}; +constexpr ripple::uint256 lastKey{ + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"}; +constexpr ripple::uint256 hi192{ + "0000000000000000000000000000000000000000000000001111111111111111"}; +} // namespace Backend diff --git a/src/config/Config.cpp b/src/config/Config.cpp new file mode 100644 index 00000000..e5d90cdf --- /dev/null +++ b/src/config/Config.cpp @@ -0,0 +1,190 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include + +namespace clio { + +// Note: `store_(store)` MUST use `()` instead of `{}` otherwise gcc +// picks `initializer_list` constructor and anything passed becomes an +// array :-D +Config::Config(boost::json::value store) : store_(std::move(store)) +{ +} + +Config::operator bool() const noexcept +{ + return not store_.is_null(); +} + +bool +Config::contains(key_type key) const +{ + return lookup(key).has_value(); +} + +std::optional +Config::lookup(key_type key) const +{ + if (store_.is_null()) + return std::nullopt; + + std::reference_wrapper cur = std::cref(store_); + auto hasBrokenPath = false; + auto tokenized = detail::Tokenizer{key}; + std::string subkey{}; + + auto maybeSection = tokenized.next(); + while (maybeSection.has_value()) + { + auto section = maybeSection.value(); + subkey += section; + + if (not hasBrokenPath) + { + if (not cur.get().is_object()) + throw detail::StoreException( + "Not an object at '" + subkey + "'"); + if (not cur.get().as_object().contains(section)) + hasBrokenPath = true; + else + cur = std::cref(cur.get().as_object().at(section)); + } + + subkey += Separator; + maybeSection = tokenized.next(); + } + + if (hasBrokenPath) + return std::nullopt; + return std::make_optional(cur); +} + +std::optional +Config::maybeArray(key_type key) const +{ + try + { + auto maybe_arr = lookup(key); + if (maybe_arr && maybe_arr->is_array()) + { + auto& arr = maybe_arr->as_array(); + array_type out; + out.reserve(arr.size()); + + std::transform( + std::begin(arr), + std::end(arr), + std::back_inserter(out), + [](auto&& element) { return Config{std::move(element)}; }); + return std::make_optional(std::move(out)); + } + } + catch (detail::StoreException const&) + { + // ignore store error, but rethrow key errors + } + + return std::nullopt; +} + +Config::array_type +Config::array(key_type key) const +{ + if (auto maybe_arr = maybeArray(key); maybe_arr) + return maybe_arr.value(); + throw std::logic_error("No array found at '" + key + "'"); +} + +Config::array_type +Config::arrayOr(key_type key, array_type fallback) const +{ + if (auto maybe_arr = maybeArray(key); maybe_arr) + return maybe_arr.value(); + return fallback; +} + +Config::array_type +Config::arrayOrThrow(key_type key, std::string_view err) const +{ + try + { + return maybeArray(key).value(); + } + catch (std::exception const&) + { + throw std::runtime_error(err.data()); + } +} + +Config +Config::section(key_type key) const +{ + auto maybe_element = lookup(key); + if (maybe_element && maybe_element->is_object()) + return Config{std::move(*maybe_element)}; + throw std::logic_error("No section found at '" + key + "'"); +} + +Config::array_type +Config::array() const +{ + if (not store_.is_array()) + throw std::logic_error("_self_ is not an array"); + + array_type out; + auto const& arr = store_.as_array(); + out.reserve(arr.size()); + + std::transform( + std::cbegin(arr), + std::cend(arr), + std::back_inserter(out), + [](auto const& element) { return Config{element}; }); + return out; +} + +Config +ConfigReader::open(std::filesystem::path path) +{ + try + { + std::ifstream in(path, std::ios::in | std::ios::binary); + if (in) + { + std::stringstream contents; + contents << in.rdbuf(); + auto opts = boost::json::parse_options{}; + opts.allow_comments = true; + return Config{boost::json::parse(contents.str(), {}, opts)}; + } + } + catch (std::exception const& e) + { + LogService::error() << "Could not read configuration file from '" + << path.string() << "': " << e.what(); + } + + return Config{}; +} + +} // namespace clio diff --git a/src/config/Config.h b/src/config/Config.h new file mode 100644 index 00000000..06d51ad4 --- /dev/null +++ b/src/config/Config.h @@ -0,0 +1,405 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace clio { + +/** + * @brief Convenience wrapper to query a JSON configuration file. + * + * Any custom data type can be supported by implementing the right `tag_invoke` + * for `boost::json::value_to`. + */ +class Config final +{ + boost::json::value store_; + static constexpr char Separator = '.'; + +public: + using key_type = std::string; /*! The type of key used */ + using array_type = std::vector; /*! The type of array used */ + using write_cursor_type = std::pair< + std::optional>, + key_type>; + + /** + * @brief Construct a new Config object. + * @param store boost::json::value that backs this instance + */ + explicit Config(boost::json::value store = {}); + + // + // Querying the store + // + + /** + * @brief Checks whether underlying store is not null. + * + * @return true If the store is null + * @return false If the store is not null + */ + operator bool() const noexcept; + + /** + * @brief Checks whether something exists under given key. + * + * @param key The key to check + * @return true If something exists under key + * @return false If nothing exists under key + * @throws std::logic_error If the key is of invalid format + */ + [[nodiscard]] bool + contains(key_type key) const; + + // + // Key value access + // + + /** + * @brief Interface for fetching values by key that returns std::optional. + * + * Will attempt to fetch the value under the desired key. If the value + * exists and can be represented by the desired type Result then it will be + * returned wrapped in an optional. If the value exists but the conversion + * to Result is not possible - a runtime_error will be thrown. If the value + * does not exist under the specified key - std::nullopt is returned. + * + * @tparam Result The desired return type + * @param key The key to check + * @return std::optional Optional value of desired type + * @throws std::logic_error Thrown if conversion to Result is not possible + * or key is of invalid format + */ + template + [[nodiscard]] std::optional + maybeValue(key_type key) const + { + auto maybe_element = lookup(key); + if (maybe_element) + return std::make_optional( + checkedAs(key, *maybe_element)); + return std::nullopt; + } + + /** + * @brief Interface for fetching values by key. + * + * Will attempt to fetch the value under the desired key. If the value + * exists and can be represented by the desired type Result then it will be + * returned. If the value exists but the conversion + * to Result is not possible OR the value does not exist - a logic_error + * will be thrown. + * + * @tparam Result The desired return type + * @param key The key to check + * @return Result Value of desired type + * @throws std::logic_error Thrown if conversion to Result is not + * possible, value does not exist under specified key path or the key is of + * invalid format + */ + template + [[nodiscard]] Result + value(key_type key) const + { + return maybeValue(key).value(); + } + + /** + * @brief Interface for fetching values by key with fallback. + * + * Will attempt to fetch the value under the desired key. If the value + * exists and can be represented by the desired type Result then it will be + * returned. If the value exists but the conversion + * to Result is not possible - a logic_error will be thrown. If the value + * does not exist under the specified key - user specified fallback is + * returned. + * + * @tparam Result The desired return type + * @param key The key to check + * @param fallback The fallback value + * @return Result Value of desired type + * @throws std::logic_error Thrown if conversion to Result is not possible + * or the key is of invalid format + */ + template + [[nodiscard]] Result + valueOr(key_type key, Result fallback) const + { + try + { + return maybeValue(key).value_or(fallback); + } + catch (detail::StoreException const&) + { + return fallback; + } + } + + /** + * @brief Interface for fetching values by key with custom error handling. + * + * Will attempt to fetch the value under the desired key. If the value + * exists and can be represented by the desired type Result then it will be + * returned. If the value exists but the conversion + * to Result is not possible OR the value does not exist - a runtime_error + * will be thrown with the user specified message. + * + * @tparam Result The desired return type + * @param key The key to check + * @param err The custom error message + * @return Result Value of desired type + * @throws std::runtime_error Thrown if conversion to Result is not possible + * or value does not exist under key + */ + template + [[nodiscard]] Result + valueOrThrow(key_type key, std::string_view err) const + { + try + { + return maybeValue(key).value(); + } + catch (std::exception const&) + { + throw std::runtime_error(err.data()); + } + } + + /** + * @brief Interface for fetching an array by key that returns std::optional. + * + * Will attempt to fetch an array under the desired key. If the array + * exists then it will be + * returned wrapped in an optional. If the array does not exist under the + * specified key - std::nullopt is returned. + * + * @param key The key to check + * @return std::optional Optional array + * @throws std::logic_error Thrown if the key is of invalid format + */ + [[nodiscard]] std::optional + maybeArray(key_type key) const; + + /** + * @brief Interface for fetching an array by key. + * + * Will attempt to fetch an array under the desired key. If the array + * exists then it will be + * returned. If the array does not exist under the + * specified key an std::logic_error is thrown. + * + * @param key The key to check + * @return array_type The array + * @throws std::logic_error Thrown if there is no array under the desired + * key or the key is of invalid format + */ + [[nodiscard]] array_type + array(key_type key) const; + + /** + * @brief Interface for fetching an array by key with fallback. + * + * Will attempt to fetch an array under the desired key. If the array + * exists then it will be returned. + * If the array does not exist or another type is stored under the desired + * key - user specified fallback is returned. + * + * @param key The key to check + * @param fallback The fallback array + * @return array_type The array + * @throws std::logic_error Thrown if the key is of invalid format + */ + [[nodiscard]] array_type + arrayOr(key_type key, array_type fallback) const; + + /** + * @brief Interface for fetching an array by key with custom error handling. + * + * Will attempt to fetch an array under the desired key. If the array + * exists then it will be returned. + * If the array does not exist or another type is stored under the desired + * key - std::runtime_error is thrown with the user specified error message. + * + * @param key The key to check + * @param err The custom error message + * @return array_type The array + * @throws std::runtime_error Thrown if there is no array under the desired + * key + */ + [[nodiscard]] array_type + arrayOrThrow(key_type key, std::string_view err) const; + + /** + * @brief Interface for fetching a sub section by key. + * + * Will attempt to fetch an entire section under the desired key and return + * it as a Config instance. If the section does not exist or another type is + * stored under the desired key - std::logic_error is thrown. + * + * @param key The key to check + * @return Config Section represented as a separate instance of Config + * @throws std::logic_error Thrown if there is no section under the + * desired key or the key is of invalid format + */ + [[nodiscard]] Config + section(key_type key) const; + + // + // Direct self-value access + // + + /** + * @brief Interface for reading the value directly referred to by the + * instance. Wraps as std::optional. + * + * See @ref maybeValue(key_type) const for how this works. + */ + template + [[nodiscard]] std::optional + maybeValue() const + { + if (store_.is_null()) + return std::nullopt; + return std::make_optional(checkedAs("_self_", store_)); + } + + /** + * @brief Interface for reading the value directly referred to by the + * instance. + * + * See @ref value(key_type) const for how this works. + */ + template + [[nodiscard]] Result + value() const + { + return maybeValue().value(); + } + + /** + * @brief Interface for reading the value directly referred to by the + * instance with user-specified fallback. + * + * See @ref valueOr(key_type, Result) const for how this works. + */ + template + [[nodiscard]] Result + valueOr(Result fallback) const + { + return maybeValue().valueOr(fallback); + } + + /** + * @brief Interface for reading the value directly referred to by the + * instance with user-specified error message. + * + * See @ref valueOrThrow(key_type, std::string_view) const for how this + * works. + */ + template + [[nodiscard]] Result + valueOrThrow(std::string_view err) const + { + try + { + return maybeValue().value(); + } + catch (std::exception const&) + { + throw std::runtime_error(err.data()); + } + } + + /** + * @brief Interface for reading the array directly referred to by the + * instance. + * + * See @ref array(key_type) const for how this works. + */ + [[nodiscard]] array_type + array() const; + +private: + template + [[nodiscard]] Return + checkedAs(key_type key, boost::json::value const& value) const + { + auto has_error = false; + if constexpr (std::is_same_v) + { + if (not value.is_bool()) + has_error = true; + } + else if constexpr (std::is_same_v) + { + if (not value.is_string()) + has_error = true; + } + else if constexpr (std::is_same_v) + { + if (not value.is_number()) + has_error = true; + } + else if constexpr ( + std::is_convertible_v || + std::is_convertible_v) + { + if (not value.is_int64() && not value.is_uint64()) + has_error = true; + } + + if (has_error) + throw std::runtime_error( + "Type for key '" + key + "' is '" + + std::string{to_string(value.kind())} + + "' in JSON but requested '" + detail::typeName() + "'"); + + return boost::json::value_to(value); + } + + std::optional + lookup(key_type key) const; + + write_cursor_type + lookupForWrite(key_type key); +}; + +/** + * @brief Simple configuration file reader. + * + * Reads the JSON file under specified path and creates a @ref Config object + * from its contents. + */ +class ConfigReader final +{ +public: + static Config + open(std::filesystem::path path); +}; + +} // namespace clio diff --git a/src/config/detail/Helpers.h b/src/config/detail/Helpers.h new file mode 100644 index 00000000..33c94838 --- /dev/null +++ b/src/config/detail/Helpers.h @@ -0,0 +1,164 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +namespace clio::detail { + +/** + * @brief Thrown when a KeyPath related error occurs + */ +struct KeyException : public ::std::logic_error +{ + KeyException(::std::string msg) : ::std::logic_error{msg} + { + } +}; + +/** + * @brief Thrown when a Store (config's storage) related error occurs. + */ +struct StoreException : public ::std::logic_error +{ + StoreException(::std::string msg) : ::std::logic_error{msg} + { + } +}; + +/** + * @brief Simple string tokenizer. Used by @ref Config. + * + * @tparam KeyType The type of key to use + * @tparam Separator The separator character + */ +template +class Tokenizer final +{ + using opt_key_t = std::optional; + KeyType key_; + KeyType token_{}; + std::queue tokens_{}; + +public: + explicit Tokenizer(KeyType key) : key_{key} + { + if (key.empty()) + throw KeyException("Empty key"); + + for (auto const& c : key) + { + if (c == Separator) + saveToken(); + else + token_ += c; + } + + saveToken(); + } + + [[nodiscard]] opt_key_t + next() + { + if (tokens_.empty()) + return std::nullopt; + auto token = tokens_.front(); + tokens_.pop(); + return std::make_optional(std::move(token)); + } + +private: + void + saveToken() + { + if (token_.empty()) + throw KeyException("Empty token in key '" + key_ + "'."); + tokens_.push(std::move(token_)); + token_ = {}; + } +}; + +template +static constexpr const char* +typeName() +{ + return typeid(T).name(); +} + +template <> +constexpr const char* +typeName() +{ + return "uint64_t"; +} + +template <> +constexpr const char* +typeName() +{ + return "int64_t"; +} + +template <> +constexpr const char* +typeName() +{ + return "uint32_t"; +} + +template <> +constexpr const char* +typeName() +{ + return "int32_t"; +} + +template <> +constexpr const char* +typeName() +{ + return "bool"; +} + +template <> +constexpr const char* +typeName() +{ + return "std::string"; +} + +template <> +constexpr const char* +typeName() +{ + return "const char*"; +} + +template <> +constexpr const char* +typeName() +{ + return "double"; +} + +}; // namespace clio::detail diff --git a/src/etl/ETLHelpers.h b/src/etl/ETLHelpers.h new file mode 100644 index 00000000..736cf9f2 --- /dev/null +++ b/src/etl/ETLHelpers.h @@ -0,0 +1,192 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include + +/// This datastructure is used to keep track of the sequence of the most recent +/// ledger validated by the network. There are two methods that will wait until +/// certain conditions are met. This datastructure is able to be "stopped". When +/// the datastructure is stopped, any threads currently waiting are unblocked. +/// Any later calls to methods of this datastructure will not wait. Once the +/// datastructure is stopped, the datastructure remains stopped for the rest of +/// its lifetime. +class NetworkValidatedLedgers +{ + // max sequence validated by network + std::optional max_; + + mutable std::mutex m_; + + std::condition_variable cv_; + +public: + static std::shared_ptr + make_ValidatedLedgers() + { + return std::make_shared(); + } + + /// Notify the datastructure that idx has been validated by the network + /// @param idx sequence validated by network + void + push(uint32_t idx) + { + std::lock_guard lck(m_); + if (!max_ || idx > *max_) + max_ = idx; + cv_.notify_all(); + } + + /// Get most recently validated sequence. If no ledgers are known to have + /// been validated, this function waits until the next ledger is validated + /// @return sequence of most recently validated ledger. empty optional if + /// the datastructure has been stopped + std::optional + getMostRecent() + { + std::unique_lock lck(m_); + cv_.wait(lck, [this]() { return max_; }); + return max_; + } + + /// Waits for the sequence to be validated by the network + /// @param sequence to wait for + /// @return true if sequence was validated, false otherwise + /// a return value of false means the datastructure has been stopped + bool + waitUntilValidatedByNetwork( + uint32_t sequence, + std::optional maxWaitMs = {}) + { + std::unique_lock lck(m_); + auto pred = [sequence, this]() -> bool { + return (max_ && sequence <= *max_); + }; + if (maxWaitMs) + cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs)); + else + cv_.wait(lck, pred); + return pred(); + } +}; + +/// Generic thread-safe queue with an optional maximum size +/// Note, we can't use a lockfree queue here, since we need the ability to wait +/// for an element to be added or removed from the queue. These waits are +/// blocking calls. +template +class ThreadSafeQueue +{ + std::queue queue_; + + mutable std::mutex m_; + std::condition_variable cv_; + std::optional maxSize_; + +public: + /// @param maxSize maximum size of the queue. Calls that would cause the + /// queue to exceed this size will block until free space is available + ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize) + { + } + + /// Create a queue with no maximum size + ThreadSafeQueue() = default; + + /// @param elt element to push onto queue + /// if maxSize is set, this method will block until free space is available + void + push(T const& elt) + { + std::unique_lock lck(m_); + // if queue has a max size, wait until not full + if (maxSize_) + cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); + queue_.push(elt); + cv_.notify_all(); + } + + /// @param elt element to push onto queue. elt is moved from + /// if maxSize is set, this method will block until free space is available + void + push(T&& elt) + { + std::unique_lock lck(m_); + // if queue has a max size, wait until not full + if (maxSize_) + cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); + queue_.push(std::move(elt)); + cv_.notify_all(); + } + + /// @return element popped from queue. Will block until queue is non-empty + T + pop() + { + std::unique_lock lck(m_); + cv_.wait(lck, [this]() { return !queue_.empty(); }); + T ret = std::move(queue_.front()); + queue_.pop(); + // if queue has a max size, unblock any possible pushers + if (maxSize_) + cv_.notify_all(); + return ret; + } + /// @return element popped from queue. Will block until queue is non-empty + std::optional + tryPop() + { + std::scoped_lock lck(m_); + if (queue_.empty()) + return {}; + T ret = std::move(queue_.front()); + queue_.pop(); + // if queue has a max size, unblock any possible pushers + if (maxSize_) + cv_.notify_all(); + return ret; + } +}; + +/// Parititions the uint256 keyspace into numMarkers partitions, each of equal +/// size. +inline std::vector +getMarkers(size_t numMarkers) +{ + assert(numMarkers <= 256); + + unsigned char incr = 256 / numMarkers; + + std::vector markers; + markers.reserve(numMarkers); + ripple::uint256 base{0}; + for (size_t i = 0; i < numMarkers; ++i) + { + markers.push_back(base); + base.data()[0] += incr; + } + return markers; +} diff --git a/src/etl/ETLSource.cpp b/src/etl/ETLSource.cpp new file mode 100644 index 00000000..b41f04bc --- /dev/null +++ b/src/etl/ETLSource.cpp @@ -0,0 +1,1203 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace clio; + +void +ForwardCache::freshen() +{ + log_.trace() << "Freshening ForwardCache"; + + auto numOutstanding = + std::make_shared(latestForwarded_.size()); + + for (auto const& cacheEntry : latestForwarded_) + { + boost::asio::spawn( + strand_, + [this, numOutstanding, command = cacheEntry.first]( + boost::asio::yield_context yield) { + boost::json::object request = {{"command", command}}; + auto resp = source_.requestFromRippled(request, {}, yield); + + if (!resp || resp->contains("error")) + resp = {}; + + { + std::scoped_lock lk(mtx_); + latestForwarded_[command] = resp; + } + }); + } +} + +void +ForwardCache::clear() +{ + std::scoped_lock lk(mtx_); + for (auto& cacheEntry : latestForwarded_) + latestForwarded_[cacheEntry.first] = {}; +} + +std::optional +ForwardCache::get(boost::json::object const& request) const +{ + std::optional command = {}; + if (request.contains("command") && !request.contains("method") && + request.at("command").is_string()) + command = request.at("command").as_string().c_str(); + else if ( + request.contains("method") && !request.contains("command") && + request.at("method").is_string()) + command = request.at("method").as_string().c_str(); + + if (!command) + return {}; + if (RPC::specifiesCurrentOrClosedLedger(request)) + return {}; + + std::shared_lock lk(mtx_); + if (!latestForwarded_.contains(*command)) + return {}; + + return {latestForwarded_.at(*command)}; +} + +static boost::beast::websocket::stream_base::timeout +make_TimeoutOption() +{ + // See #289 for details. + // TODO: investigate the issue and find if there is a solution other than + // introducing artificial timeouts. + if (true) + { + // The only difference between this and the suggested client role is + // that idle_timeout is set to 20 instead of none() + auto opt = boost::beast::websocket::stream_base::timeout{}; + opt.handshake_timeout = std::chrono::seconds(30); + opt.idle_timeout = std::chrono::seconds(20); + opt.keep_alive_pings = false; + return opt; + } + else + { + return boost::beast::websocket::stream_base::timeout::suggested( + boost::beast::role_type::client); + } +} + +template +void +ETLSourceImpl::reconnect(boost::beast::error_code ec) +{ + if (paused_) + return; + + if (connected_) + hooks_.onDisconnected(ec); + + connected_ = false; + // These are somewhat normal errors. operation_aborted occurs on shutdown, + // when the timer is cancelled. connection_refused will occur repeatedly + std::string err = ec.message(); + // if we cannot connect to the transaction processing process + if (ec.category() == boost::asio::error::get_ssl_category()) + { + err = std::string(" (") + + boost::lexical_cast(ERR_GET_LIB(ec.value())) + "," + + boost::lexical_cast(ERR_GET_REASON(ec.value())) + ") "; + // ERR_PACK /* crypto/err/err.h */ + char buf[128]; + ::ERR_error_string_n(ec.value(), buf, sizeof(buf)); + err += buf; + + std::cout << err << std::endl; + } + + if (ec != boost::asio::error::operation_aborted && + ec != boost::asio::error::connection_refused) + { + log_.error() << "error code = " << ec << " - " << toString(); + } + else + { + log_.warn() << "error code = " << ec << " - " << toString(); + } + + // exponentially increasing timeouts, with a max of 30 seconds + size_t waitTime = std::min(pow(2, numFailures_), 30.0); + numFailures_++; + timer_.expires_after(boost::asio::chrono::seconds(waitTime)); + timer_.async_wait([this](auto ec) { + bool startAgain = (ec != boost::asio::error::operation_aborted); + log_.trace() << "async_wait : ec = " << ec; + derived().close(startAgain); + }); +} + +void +PlainETLSource::close(bool startAgain) +{ + timer_.cancel(); + ioc_.post([this, startAgain]() { + if (closing_) + return; + + if (derived().ws().is_open()) + { + // onStop() also calls close(). If the async_close is called twice, + // an assertion fails. Using closing_ makes sure async_close is only + // called once + closing_ = true; + derived().ws().async_close( + boost::beast::websocket::close_code::normal, + [this, startAgain](auto ec) { + if (ec) + { + log_.error() + << " async_close : " + << "error code = " << ec << " - " << toString(); + } + closing_ = false; + if (startAgain) + { + ws_ = std::make_unique>( + boost::asio::make_strand(ioc_)); + + run(); + } + }); + } + else if (startAgain) + { + ws_ = std::make_unique< + boost::beast::websocket::stream>( + boost::asio::make_strand(ioc_)); + + run(); + } + }); +} + +void +SslETLSource::close(bool startAgain) +{ + timer_.cancel(); + ioc_.post([this, startAgain]() { + if (closing_) + return; + + if (derived().ws().is_open()) + { + // onStop() also calls close(). If the async_close is called twice, + // an assertion fails. Using closing_ makes sure async_close is only + // called once + closing_ = true; + derived().ws().async_close( + boost::beast::websocket::close_code::normal, + [this, startAgain](auto ec) { + if (ec) + { + log_.error() + << " async_close : " + << "error code = " << ec << " - " << toString(); + } + closing_ = false; + if (startAgain) + { + ws_ = std::make_unique>>( + boost::asio::make_strand(ioc_), *sslCtx_); + + run(); + } + }); + } + else if (startAgain) + { + ws_ = std::make_unique>>( + boost::asio::make_strand(ioc_), *sslCtx_); + + run(); + } + }); +} + +template +void +ETLSourceImpl::onResolve( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type results) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + if (ec) + { + // try again + reconnect(ec); + } + else + { + boost::beast::get_lowest_layer(derived().ws()) + .expires_after(std::chrono::seconds(30)); + boost::beast::get_lowest_layer(derived().ws()) + .async_connect(results, [this](auto ec, auto ep) { + derived().onConnect(ec, ep); + }); + } +} + +void +PlainETLSource::onConnect( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + if (ec) + { + // start over + reconnect(ec); + } + else + { + numFailures_ = 0; + // Turn off timeout on the tcp stream, because websocket stream has it's + // own timeout system + boost::beast::get_lowest_layer(derived().ws()).expires_never(); + + // Set a desired timeout for the websocket stream + derived().ws().set_option(make_TimeoutOption()); + + // Set a decorator to change the User-Agent of the handshake + derived().ws().set_option( + boost::beast::websocket::stream_base::decorator( + [](boost::beast::websocket::request_type& req) { + req.set( + boost::beast::http::field::user_agent, "clio-client"); + + req.set("X-User", "clio-client"); + })); + + // Update the host_ string. This will provide the value of the + // Host HTTP header during the WebSocket handshake. + // See https://tools.ietf.org/html/rfc7230#section-5.4 + auto host = ip_ + ':' + std::to_string(endpoint.port()); + // Perform the websocket handshake + derived().ws().async_handshake( + host, "/", [this](auto ec) { onHandshake(ec); }); + } +} + +void +SslETLSource::onConnect( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + if (ec) + { + // start over + reconnect(ec); + } + else + { + numFailures_ = 0; + // Turn off timeout on the tcp stream, because websocket stream has it's + // own timeout system + boost::beast::get_lowest_layer(derived().ws()).expires_never(); + + // Set a desired timeout for the websocket stream + derived().ws().set_option(make_TimeoutOption()); + + // Set a decorator to change the User-Agent of the handshake + derived().ws().set_option( + boost::beast::websocket::stream_base::decorator( + [](boost::beast::websocket::request_type& req) { + req.set( + boost::beast::http::field::user_agent, "clio-client"); + + req.set("X-User", "clio-client"); + })); + + // Update the host_ string. This will provide the value of the + // Host HTTP header during the WebSocket handshake. + // See https://tools.ietf.org/html/rfc7230#section-5.4 + auto host = ip_ + ':' + std::to_string(endpoint.port()); + // Perform the websocket handshake + ws().next_layer().async_handshake( + boost::asio::ssl::stream_base::client, + [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); }); + } +} + +void +SslETLSource::onSslHandshake( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) +{ + if (ec) + { + reconnect(ec); + } + else + { + // Perform the websocket handshake + auto host = ip_ + ':' + std::to_string(endpoint.port()); + // Perform the websocket handshake + ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); }); + } +} + +template +void +ETLSourceImpl::onHandshake(boost::beast::error_code ec) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + if (auto action = hooks_.onConnected(ec); + action == ETLSourceHooks::Action::STOP) + return; + + if (ec) + { + // start over + reconnect(ec); + } + else + { + boost::json::object jv{ + {"command", "subscribe"}, + {"streams", + {"ledger", "manifests", "validations", "transactions_proposed"}}}; + std::string s = boost::json::serialize(jv); + log_.trace() << "Sending subscribe stream message"; + + derived().ws().set_option( + boost::beast::websocket::stream_base::decorator( + [](boost::beast::websocket::request_type& req) { + req.set( + boost::beast::http::field::user_agent, + std::string(BOOST_BEAST_VERSION_STRING) + + " clio-client"); + + req.set("X-User", "coro-client"); + })); + + // Send the message + derived().ws().async_write( + boost::asio::buffer(s), + [this](auto ec, size_t size) { onWrite(ec, size); }); + } +} + +template +void +ETLSourceImpl::onWrite( + boost::beast::error_code ec, + size_t bytesWritten) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + if (ec) + { + // start over + reconnect(ec); + } + else + { + derived().ws().async_read( + readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); + } +} + +template +void +ETLSourceImpl::onRead(boost::beast::error_code ec, size_t size) +{ + log_.trace() << "ec = " << ec << " - " << toString(); + // if error or error reading message, start over + if (ec) + { + reconnect(ec); + } + else + { + handleMessage(); + boost::beast::flat_buffer buffer; + swap(readBuffer_, buffer); + + log_.trace() << "calling async_read - " << toString(); + derived().ws().async_read( + readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); + } +} + +template +bool +ETLSourceImpl::handleMessage() +{ + log_.trace() << toString(); + + setLastMsgTime(); + connected_ = true; + try + { + std::string msg{ + static_cast(readBuffer_.data().data()), + readBuffer_.size()}; + log_.trace() << msg; + boost::json::value raw = boost::json::parse(msg); + log_.trace() << "parsed"; + boost::json::object response = raw.as_object(); + + uint32_t ledgerIndex = 0; + if (response.contains("result")) + { + boost::json::object result = response["result"].as_object(); + if (result.contains("ledger_index")) + { + ledgerIndex = result["ledger_index"].as_int64(); + } + if (result.contains("validated_ledgers")) + { + boost::json::string const& validatedLedgers = + result["validated_ledgers"].as_string(); + + setValidatedRange( + {validatedLedgers.c_str(), validatedLedgers.size()}); + } + log_.info() << "Received a message on ledger " + << " subscription stream. Message : " << response + << " - " << toString(); + } + else if ( + response.contains("type") && response["type"] == "ledgerClosed") + { + log_.info() << "Received a message on ledger " + << " subscription stream. Message : " << response + << " - " << toString(); + if (response.contains("ledger_index")) + { + ledgerIndex = response["ledger_index"].as_int64(); + } + if (response.contains("validated_ledgers")) + { + boost::json::string const& validatedLedgers = + response["validated_ledgers"].as_string(); + setValidatedRange( + {validatedLedgers.c_str(), validatedLedgers.size()}); + } + } + else + { + if (balancer_.shouldPropagateTxnStream(this)) + { + if (response.contains("transaction")) + { + forwardCache_.freshen(); + subscriptions_->forwardProposedTransaction(response); + } + else if ( + response.contains("type") && + response["type"] == "validationReceived") + { + subscriptions_->forwardValidation(response); + } + else if ( + response.contains("type") && + response["type"] == "manifestReceived") + { + subscriptions_->forwardManifest(response); + } + } + } + + if (ledgerIndex != 0) + { + log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " + << toString(); + networkValidatedLedgers_->push(ledgerIndex); + } + return true; + } + catch (std::exception const& e) + { + log_.error() << "Exception in handleMessage : " << e.what(); + return false; + } +} + +class AsyncCallData +{ + clio::Logger log_{"ETL"}; + + std::unique_ptr cur_; + std::unique_ptr next_; + + org::xrpl::rpc::v1::GetLedgerDataRequest request_; + std::unique_ptr context_; + + grpc::Status status_; + unsigned char nextPrefix_; + + std::string lastKey_; + +public: + AsyncCallData( + uint32_t seq, + ripple::uint256 const& marker, + std::optional const& nextMarker) + { + request_.mutable_ledger()->set_sequence(seq); + if (marker.isNonZero()) + { + request_.set_marker(marker.data(), marker.size()); + } + request_.set_user("ETL"); + nextPrefix_ = 0x00; + if (nextMarker) + nextPrefix_ = nextMarker->data()[0]; + + unsigned char prefix = marker.data()[0]; + + log_.debug() << "Setting up AsyncCallData. marker = " + << ripple::strHex(marker) + << " . prefix = " << ripple::strHex(std::string(1, prefix)) + << " . nextPrefix_ = " + << ripple::strHex(std::string(1, nextPrefix_)); + + assert(nextPrefix_ > prefix || nextPrefix_ == 0x00); + + cur_ = std::make_unique(); + + next_ = std::make_unique(); + + context_ = std::make_unique(); + } + + enum class CallStatus { MORE, DONE, ERRORED }; + CallStatus + process( + std::unique_ptr& stub, + grpc::CompletionQueue& cq, + BackendInterface& backend, + bool abort, + bool cacheOnly = false) + { + log_.trace() << "Processing response. " + << "Marker prefix = " << getMarkerPrefix(); + if (abort) + { + log_.error() << "AsyncCallData aborted"; + return CallStatus::ERRORED; + } + if (!status_.ok()) + { + log_.error() << "AsyncCallData status_ not ok: " + << " code = " << status_.error_code() + << " message = " << status_.error_message(); + return CallStatus::ERRORED; + } + if (!next_->is_unlimited()) + { + log_.warn() << "AsyncCallData is_unlimited is false. Make sure " + "secure_gateway is set correctly at the ETL source"; + } + + std::swap(cur_, next_); + + bool more = true; + + // if no marker returned, we are done + if (cur_->marker().size() == 0) + more = false; + + // if returned marker is greater than our end, we are done + unsigned char prefix = cur_->marker()[0]; + if (nextPrefix_ != 0x00 && prefix >= nextPrefix_) + more = false; + + // if we are not done, make the next async call + if (more) + { + request_.set_marker(std::move(cur_->marker())); + call(stub, cq); + } + + log_.trace() << "Writing objects"; + std::vector cacheUpdates; + cacheUpdates.reserve(cur_->ledger_objects().objects_size()); + for (int i = 0; i < cur_->ledger_objects().objects_size(); ++i) + { + auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i)); + if (!more && nextPrefix_ != 0x00) + { + if (((unsigned char)obj.key()[0]) >= nextPrefix_) + continue; + } + cacheUpdates.push_back( + {*ripple::uint256::fromVoidChecked(obj.key()), + {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); + if (!cacheOnly) + { + if (lastKey_.size()) + backend.writeSuccessor( + std::move(lastKey_), + request_.ledger().sequence(), + std::string{obj.key()}); + lastKey_ = obj.key(); + backend.writeNFTs(getNFTDataFromObj( + request_.ledger().sequence(), obj.key(), obj.data())); + backend.writeLedgerObject( + std::move(*obj.mutable_key()), + request_.ledger().sequence(), + std::move(*obj.mutable_data())); + } + } + backend.cache().update( + cacheUpdates, request_.ledger().sequence(), cacheOnly); + log_.trace() << "Wrote objects"; + + return more ? CallStatus::MORE : CallStatus::DONE; + } + + void + call( + std::unique_ptr& stub, + grpc::CompletionQueue& cq) + { + context_ = std::make_unique(); + + std::unique_ptr> + rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)); + + rpc->StartCall(); + + rpc->Finish(next_.get(), &status_, this); + } + + std::string + getMarkerPrefix() + { + if (next_->marker().size() == 0) + return ""; + else + return ripple::strHex(std::string{next_->marker().data()[0]}); + } + + std::string + getLastKey() + { + return lastKey_; + } +}; + +template +bool +ETLSourceImpl::loadInitialLedger( + uint32_t sequence, + uint32_t numMarkers, + bool cacheOnly) +{ + if (!stub_) + return false; + + grpc::CompletionQueue cq; + + void* tag; + + bool ok = false; + + std::vector calls; + auto markers = getMarkers(numMarkers); + + for (size_t i = 0; i < markers.size(); ++i) + { + std::optional nextMarker; + if (i + 1 < markers.size()) + nextMarker = markers[i + 1]; + calls.emplace_back(sequence, markers[i], nextMarker); + } + + log_.debug() << "Starting data download for ledger " << sequence + << ". Using source = " << toString(); + + for (auto& c : calls) + c.call(stub_, cq); + + size_t numFinished = 0; + bool abort = false; + size_t incr = 500000; + size_t progress = incr; + std::vector edgeKeys; + while (numFinished < calls.size() && cq.Next(&tag, &ok)) + { + assert(tag); + + auto ptr = static_cast(tag); + + if (!ok) + { + log_.error() << "loadInitialLedger - ok is false"; + return false; + // handle cancelled + } + else + { + log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix(); + auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly); + if (result != AsyncCallData::CallStatus::MORE) + { + numFinished++; + log_.debug() << "Finished a marker. " + << "Current number of finished = " << numFinished; + std::string lastKey = ptr->getLastKey(); + if (lastKey.size()) + edgeKeys.push_back(ptr->getLastKey()); + } + if (result == AsyncCallData::CallStatus::ERRORED) + { + abort = true; + } + if (backend_->cache().size() > progress) + { + log_.info() << "Downloaded " << backend_->cache().size() + << " records from rippled"; + progress += incr; + } + } + } + log_.info() << "Finished loadInitialLedger. cache size = " + << backend_->cache().size(); + size_t numWrites = 0; + if (!abort) + { + backend_->cache().setFull(); + if (!cacheOnly) + { + auto seconds = util::timed([&]() { + for (auto& key : edgeKeys) + { + log_.debug() + << "Writing edge key = " << ripple::strHex(key); + auto succ = backend_->cache().getSuccessor( + *ripple::uint256::fromVoidChecked(key), sequence); + if (succ) + backend_->writeSuccessor( + std::move(key), + sequence, + uint256ToString(succ->key)); + } + ripple::uint256 prev = Backend::firstKey; + while (auto cur = + backend_->cache().getSuccessor(prev, sequence)) + { + assert(cur); + if (prev == Backend::firstKey) + { + backend_->writeSuccessor( + uint256ToString(prev), + sequence, + uint256ToString(cur->key)); + } + + if (isBookDir(cur->key, cur->blob)) + { + auto base = getBookBase(cur->key); + // make sure the base is not an actual object + if (!backend_->cache().get(cur->key, sequence)) + { + auto succ = + backend_->cache().getSuccessor(base, sequence); + assert(succ); + if (succ->key == cur->key) + { + log_.debug() << "Writing book successor = " + << ripple::strHex(base) << " - " + << ripple::strHex(cur->key); + + backend_->writeSuccessor( + uint256ToString(base), + sequence, + uint256ToString(cur->key)); + } + } + ++numWrites; + } + prev = std::move(cur->key); + if (numWrites % 100000 == 0 && numWrites != 0) + log_.info() + << "Wrote " << numWrites << " book successors"; + } + + backend_->writeSuccessor( + uint256ToString(prev), + sequence, + uint256ToString(Backend::lastKey)); + + ++numWrites; + }); + log_.info() + << "Looping through cache and submitting all writes took " + << seconds + << " seconds. numWrites = " << std::to_string(numWrites); + } + } + return !abort; +} + +template +std::pair +ETLSourceImpl::fetchLedger( + uint32_t ledgerSequence, + bool getObjects, + bool getObjectNeighbors) +{ + org::xrpl::rpc::v1::GetLedgerResponse response; + if (!stub_) + return {{grpc::StatusCode::INTERNAL, "No Stub"}, response}; + + // ledger header with txns and metadata + org::xrpl::rpc::v1::GetLedgerRequest request; + grpc::ClientContext context; + request.mutable_ledger()->set_sequence(ledgerSequence); + request.set_transactions(true); + request.set_expand(true); + request.set_get_objects(getObjects); + request.set_get_object_neighbors(getObjectNeighbors); + request.set_user("ETL"); + grpc::Status status = stub_->GetLedger(&context, request, &response); + if (status.ok() && !response.is_unlimited()) + { + log_.warn() << "ETLSourceImpl::fetchLedger - is_unlimited is " + "false. Make sure secure_gateway is set " + "correctly on the ETL source. source = " + << toString() << " status = " << status.error_message(); + } + return {status, std::move(response)}; +} + +static std::unique_ptr +make_ETLSource( + clio::Config const& config, + boost::asio::io_context& ioContext, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr networkValidatedLedgers, + ETLLoadBalancer& balancer) +{ + auto src = std::make_unique( + config, + ioContext, + backend, + subscriptions, + networkValidatedLedgers, + balancer); + + src->run(); + + return src; +} + +ETLLoadBalancer::ETLLoadBalancer( + clio::Config const& config, + boost::asio::io_context& ioContext, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl) +{ + if (auto value = config.maybeValue("num_markers"); value) + downloadRanges_ = std::clamp(*value, 1u, 256u); + else if (backend->fetchLedgerRange()) + downloadRanges_ = 4; + + for (auto const& entry : config.array("etl_sources")) + { + std::unique_ptr source = make_ETLSource( + entry, ioContext, backend, subscriptions, nwvl, *this); + + sources_.push_back(std::move(source)); + log_.info() << "Added etl source - " << sources_.back()->toString(); + } +} + +void +ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly) +{ + execute( + [this, &sequence, cacheOnly](auto& source) { + bool res = + source->loadInitialLedger(sequence, downloadRanges_, cacheOnly); + if (!res) + { + log_.error() << "Failed to download initial ledger." + << " Sequence = " << sequence + << " source = " << source->toString(); + } + return res; + }, + sequence); +} + +std::optional +ETLLoadBalancer::fetchLedger( + uint32_t ledgerSequence, + bool getObjects, + bool getObjectNeighbors) +{ + org::xrpl::rpc::v1::GetLedgerResponse response; + bool success = execute( + [&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_]( + auto& source) { + auto [status, data] = source->fetchLedger( + ledgerSequence, getObjects, getObjectNeighbors); + response = std::move(data); + if (status.ok() && response.validated()) + { + log.info() << "Successfully fetched ledger = " << ledgerSequence + << " from source = " << source->toString(); + return true; + } + else + { + log.warn() << "Could not fetch ledger " << ledgerSequence + << ", Reply: " << response.DebugString() + << ", error_code: " << status.error_code() + << ", error_msg: " << status.error_message() + << ", source = " << source->toString(); + return false; + } + }, + ledgerSequence); + if (success) + return response; + else + return {}; +} + +std::optional +ETLLoadBalancer::forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const +{ + srand((unsigned)time(0)); + auto sourceIdx = rand() % sources_.size(); + auto numAttempts = 0; + while (numAttempts < sources_.size()) + { + if (auto res = + sources_[sourceIdx]->forwardToRippled(request, clientIp, yield)) + return res; + + sourceIdx = (sourceIdx + 1) % sources_.size(); + ++numAttempts; + } + return {}; +} + +template +std::optional +ETLSourceImpl::forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const +{ + if (auto resp = forwardCache_.get(request); resp) + { + log_.debug() << "request hit forwardCache"; + return resp; + } + + return requestFromRippled(request, clientIp, yield); +} + +template +std::optional +ETLSourceImpl::requestFromRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const +{ + log_.trace() << "Attempting to forward request to tx. " + << "request = " << boost::json::serialize(request); + + boost::json::object response; + if (!connected_) + { + log_.error() << "Attempted to proxy but failed to connect to tx"; + return {}; + } + namespace beast = boost::beast; // from + namespace http = beast::http; // from + namespace websocket = beast::websocket; // from + namespace net = boost::asio; // from + using tcp = boost::asio::ip::tcp; // from + try + { + boost::beast::error_code ec; + // These objects perform our I/O + tcp::resolver resolver{ioc_}; + + log_.trace() << "Creating websocket"; + auto ws = std::make_unique>(ioc_); + + // Look up the domain name + auto const results = resolver.async_resolve(ip_, wsPort_, yield[ec]); + if (ec) + return {}; + + ws->next_layer().expires_after(std::chrono::seconds(3)); + + log_.trace() << "Connecting websocket"; + // Make the connection on the IP address we get from a lookup + ws->next_layer().async_connect(results, yield[ec]); + if (ec) + return {}; + + // Set a decorator to change the User-Agent of the handshake + // and to tell rippled to charge the client IP for RPC + // resources. See "secure_gateway" in + // + // https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg + ws->set_option(websocket::stream_base::decorator( + [&clientIp](websocket::request_type& req) { + req.set( + http::field::user_agent, + std::string(BOOST_BEAST_VERSION_STRING) + + " websocket-client-coro"); + req.set(http::field::forwarded, "for=" + clientIp); + })); + log_.trace() << "client ip: " << clientIp; + + log_.trace() << "Performing websocket handshake"; + // Perform the websocket handshake + ws->async_handshake(ip_, "/", yield[ec]); + if (ec) + return {}; + + log_.trace() << "Sending request"; + // Send the message + ws->async_write( + net::buffer(boost::json::serialize(request)), yield[ec]); + if (ec) + return {}; + + beast::flat_buffer buffer; + ws->async_read(buffer, yield[ec]); + if (ec) + return {}; + + auto begin = static_cast(buffer.data().data()); + auto end = begin + buffer.data().size(); + auto parsed = boost::json::parse(std::string(begin, end)); + + if (!parsed.is_object()) + { + log_.error() << "Error parsing response: " + << std::string{begin, end}; + return {}; + } + log_.trace() << "Successfully forward request"; + + response = parsed.as_object(); + + response["forwarded"] = true; + return response; + } + catch (std::exception const& e) + { + log_.error() << "Encountered exception : " << e.what(); + return {}; + } +} + +template +bool +ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) +{ + srand((unsigned)time(0)); + auto sourceIdx = rand() % sources_.size(); + auto numAttempts = 0; + + while (true) + { + auto& source = sources_[sourceIdx]; + + log_.debug() << "Attempting to execute func. ledger sequence = " + << ledgerSequence << " - source = " << source->toString(); + // Originally, it was (source->hasLedger(ledgerSequence) || true) + /* Sometimes rippled has ledger but doesn't actually know. However, + but this does NOT happen in the normal case and is safe to remove + This || true is only needed when loading full history standalone */ + if (source->hasLedger(ledgerSequence)) + { + bool res = f(source); + if (res) + { + log_.debug() << "Successfully executed func at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; + break; + } + else + { + log_.warn() << "Failed to execute func at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; + } + } + else + { + log_.warn() << "Ledger not present at source = " + << source->toString() + << " - ledger sequence = " << ledgerSequence; + } + sourceIdx = (sourceIdx + 1) % sources_.size(); + numAttempts++; + if (numAttempts % sources_.size() == 0) + { + log_.info() << "Ledger sequence " << ledgerSequence + << " is not yet available from any configured sources. " + << "Sleeping and trying again"; + std::this_thread::sleep_for(std::chrono::seconds(2)); + } + } + return true; +} diff --git a/src/etl/ETLSource.h b/src/etl/ETLSource.h new file mode 100644 index 00000000..8bff3b8d --- /dev/null +++ b/src/etl/ETLSource.h @@ -0,0 +1,724 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" +#include + +#include +#include +#include +#include +#include +#include + +class ETLLoadBalancer; +class ETLSource; +class ProbingETLSource; +class SubscriptionManager; + +/// This class manages a connection to a single ETL source. This is almost +/// always a rippled node, but really could be another reporting node. This +/// class subscribes to the ledgers and transactions_proposed streams of the +/// associated rippled node, and keeps track of which ledgers the rippled node +/// has. This class also has methods for extracting said ledgers. Lastly this +/// class forwards transactions received on the transactions_proposed streams to +/// any subscribers. +class ForwardCache +{ + using response_type = std::optional; + + clio::Logger log_{"ETL"}; + mutable std::atomic_bool stopping_ = false; + mutable std::shared_mutex mtx_; + std::unordered_map latestForwarded_; + + boost::asio::io_context::strand strand_; + boost::asio::steady_timer timer_; + ETLSource const& source_; + std::uint32_t duration_ = 10; + + void + clear(); + +public: + ForwardCache( + clio::Config const& config, + boost::asio::io_context& ioc, + ETLSource const& source) + : strand_(ioc), timer_(strand_), source_(source) + { + if (config.contains("cache")) + { + auto commands = + config.arrayOrThrow("cache", "ETLSource cache must be array"); + + if (config.contains("cache_duration")) + duration_ = config.valueOrThrow( + "cache_duration", + "ETLSource cache_duration must be a number"); + + for (auto const& command : commands) + { + auto key = command.valueOrThrow( + "ETLSource forward command must be array of strings"); + latestForwarded_[key] = {}; + } + } + } + + // This is to be called every freshenDuration_ seconds. + // It will request information from this etlSource, and + // will populate the cache with the latest value. If the + // request fails, it will evict that value from the cache. + void + freshen(); + + std::optional + get(boost::json::object const& command) const; +}; + +class ETLSource +{ +public: + virtual bool + isConnected() const = 0; + + virtual boost::json::object + toJson() const = 0; + + virtual void + run() = 0; + + virtual void + pause() = 0; + + virtual void + resume() = 0; + + virtual std::string + toString() const = 0; + + virtual bool + hasLedger(uint32_t sequence) const = 0; + + virtual std::pair + fetchLedger( + uint32_t ledgerSequence, + bool getObjects = true, + bool getObjectNeighbors = false) = 0; + + virtual bool + loadInitialLedger( + uint32_t sequence, + std::uint32_t numMarkers, + bool cacheOnly = false) = 0; + + virtual std::optional + forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const = 0; + + virtual ~ETLSource() + { + } + +protected: + clio::Logger log_{"ETL"}; + +private: + friend ForwardCache; + friend ProbingETLSource; + + virtual std::optional + requestFromRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const = 0; +}; + +struct ETLSourceHooks +{ + enum class Action { STOP, PROCEED }; + + std::function onConnected; + std::function onDisconnected; +}; + +template +class ETLSourceImpl : public ETLSource +{ + std::string wsPort_; + + std::string grpcPort_; + + std::unique_ptr stub_; + + boost::asio::ip::tcp::resolver resolver_; + + boost::beast::flat_buffer readBuffer_; + + std::vector> validatedLedgers_; + + std::string validatedLedgersRaw_{"N/A"}; + + std::shared_ptr networkValidatedLedgers_; + + // beast::Journal journal_; + + mutable std::mutex mtx_; + + std::atomic_bool connected_{false}; + + // true if this ETL source is forwarding transactions received on the + // transactions_proposed stream. There are usually multiple ETL sources, + // so to avoid forwarding the same transaction multiple times, we only + // forward from one particular ETL source at a time. + std::atomic_bool forwardingStream_{false}; + + // The last time a message was received on the ledgers stream + std::chrono::system_clock::time_point lastMsgTime_; + mutable std::mutex lastMsgTimeMtx_; + + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + ETLLoadBalancer& balancer_; + + ForwardCache forwardCache_; + + std::optional + requestFromRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const override; + +protected: + Derived& + derived() + { + return static_cast(*this); + } + + std::string ip_; + + size_t numFailures_ = 0; + + boost::asio::io_context& ioc_; + + // used for retrying connections + boost::asio::steady_timer timer_; + + std::atomic_bool closing_{false}; + + std::atomic_bool paused_{false}; + + ETLSourceHooks hooks_; + + void + run() override + { + log_.trace() << toString(); + + auto const host = ip_; + auto const port = wsPort_; + + resolver_.async_resolve(host, port, [this](auto ec, auto results) { + onResolve(ec, results); + }); + } + +public: + ~ETLSourceImpl() + { + derived().close(false); + } + + bool + isConnected() const override + { + return connected_; + } + + std::chrono::system_clock::time_point + getLastMsgTime() const + { + std::lock_guard lck(lastMsgTimeMtx_); + return lastMsgTime_; + } + + void + setLastMsgTime() + { + std::lock_guard lck(lastMsgTimeMtx_); + lastMsgTime_ = std::chrono::system_clock::now(); + } + + /// Create ETL source without gRPC endpoint + /// Fetch ledger and load initial ledger will fail for this source + /// Primarly used in read-only mode, to monitor when ledgers are validated + ETLSourceImpl( + clio::Config const& config, + boost::asio::io_context& ioContext, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr networkValidatedLedgers, + ETLLoadBalancer& balancer, + ETLSourceHooks hooks) + : resolver_(boost::asio::make_strand(ioContext)) + , networkValidatedLedgers_(networkValidatedLedgers) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , forwardCache_(config, ioContext, *this) + , ioc_(ioContext) + , timer_(ioContext) + , hooks_(hooks) + { + ip_ = config.valueOr("ip", {}); + wsPort_ = config.valueOr("ws_port", {}); + + if (auto value = config.maybeValue("grpc_port"); value) + { + grpcPort_ = *value; + try + { + boost::asio::ip::tcp::endpoint endpoint{ + boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)}; + std::stringstream ss; + ss << endpoint; + grpc::ChannelArguments chArgs; + chArgs.SetMaxReceiveMessageSize(-1); + stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( + grpc::CreateCustomChannel( + ss.str(), grpc::InsecureChannelCredentials(), chArgs)); + log_.debug() << "Made stub for remote = " << toString(); + } + catch (std::exception const& e) + { + log_.debug() << "Exception while creating stub = " << e.what() + << " . Remote = " << toString(); + } + } + } + + /// @param sequence ledger sequence to check for + /// @return true if this source has the desired ledger + bool + hasLedger(uint32_t sequence) const override + { + std::lock_guard lck(mtx_); + for (auto& pair : validatedLedgers_) + { + if (sequence >= pair.first && sequence <= pair.second) + { + return true; + } + else if (sequence < pair.first) + { + // validatedLedgers_ is a sorted list of disjoint ranges + // if the sequence comes before this range, the sequence will + // come before all subsequent ranges + return false; + } + } + return false; + } + + /// process the validated range received on the ledgers stream. set the + /// appropriate member variable + /// @param range validated range received on ledgers stream + void + setValidatedRange(std::string const& range) + { + std::vector> pairs; + std::vector ranges; + boost::split(ranges, range, boost::is_any_of(",")); + for (auto& pair : ranges) + { + std::vector minAndMax; + + boost::split(minAndMax, pair, boost::is_any_of("-")); + + if (minAndMax.size() == 1) + { + uint32_t sequence = std::stoll(minAndMax[0]); + pairs.push_back(std::make_pair(sequence, sequence)); + } + else + { + assert(minAndMax.size() == 2); + uint32_t min = std::stoll(minAndMax[0]); + uint32_t max = std::stoll(minAndMax[1]); + pairs.push_back(std::make_pair(min, max)); + } + } + std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { + return left.first < right.first; + }); + + // we only hold the lock here, to avoid blocking while string processing + std::lock_guard lck(mtx_); + validatedLedgers_ = std::move(pairs); + validatedLedgersRaw_ = range; + } + + /// @return the validated range of this source + /// @note this is only used by server_info + std::string + getValidatedRange() const + { + std::lock_guard lck(mtx_); + return validatedLedgersRaw_; + } + + /// Fetch the specified ledger + /// @param ledgerSequence sequence of the ledger to fetch + /// @getObjects whether to get the account state diff between this ledger + /// and the prior one + /// @return the extracted data and the result status + std::pair + fetchLedger( + uint32_t ledgerSequence, + bool getObjects = true, + bool getObjectNeighbors = false) override; + + std::string + toString() const override + { + return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ + + ", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}"; + } + + boost::json::object + toJson() const override + { + boost::json::object res; + res["validated_range"] = getValidatedRange(); + res["is_connected"] = std::to_string(isConnected()); + res["ip"] = ip_; + res["ws_port"] = wsPort_; + res["grpc_port"] = grpcPort_; + auto last = getLastMsgTime(); + if (last.time_since_epoch().count() != 0) + res["last_msg_age_seconds"] = std::to_string( + std::chrono::duration_cast( + std::chrono::system_clock::now() - getLastMsgTime()) + .count()); + return res; + } + + /// Download a ledger in full + /// @param ledgerSequence sequence of the ledger to download + /// @param writeQueue queue to push downloaded ledger objects + /// @return true if the download was successful + bool + loadInitialLedger( + std::uint32_t ledgerSequence, + std::uint32_t numMarkers, + bool cacheOnly = false) override; + + /// Attempt to reconnect to the ETL source + void + reconnect(boost::beast::error_code ec); + + /// Pause the source effectively stopping it from trying to reconnect + void + pause() override + { + paused_ = true; + derived().close(false); + } + + /// Resume the source allowing it to reconnect again + void + resume() override + { + paused_ = false; + derived().close(true); + } + + /// Callback + void + onResolve( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type results); + + /// Callback + virtual void + onConnect( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type + endpoint) = 0; + + /// Callback + void + onHandshake(boost::beast::error_code ec); + + /// Callback + void + onWrite(boost::beast::error_code ec, size_t size); + + /// Callback + void + onRead(boost::beast::error_code ec, size_t size); + + /// Handle the most recently received message + /// @return true if the message was handled successfully. false on error + bool + handleMessage(); + + std::optional + forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const override; +}; + +class PlainETLSource : public ETLSourceImpl +{ + std::unique_ptr> + ws_; + +public: + PlainETLSource( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl, + ETLLoadBalancer& balancer, + ETLSourceHooks hooks) + : ETLSourceImpl( + config, + ioc, + backend, + subscriptions, + nwvl, + balancer, + std::move(hooks)) + , ws_(std::make_unique< + boost::beast::websocket::stream>( + boost::asio::make_strand(ioc))) + { + } + + void + onConnect( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) + override; + + /// Close the websocket + /// @param startAgain whether to reconnect + void + close(bool startAgain); + + boost::beast::websocket::stream& + ws() + { + return *ws_; + } +}; + +class SslETLSource : public ETLSourceImpl +{ + std::optional> sslCtx_; + + std::unique_ptr>> + ws_; + +public: + SslETLSource( + clio::Config const& config, + boost::asio::io_context& ioc, + std::optional> sslCtx, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl, + ETLLoadBalancer& balancer, + ETLSourceHooks hooks) + : ETLSourceImpl( + config, + ioc, + backend, + subscriptions, + nwvl, + balancer, + std::move(hooks)) + , sslCtx_(sslCtx) + , ws_(std::make_unique>>( + boost::asio::make_strand(ioc_), + *sslCtx_)) + { + } + + void + onConnect( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) + override; + + void + onSslHandshake( + boost::beast::error_code ec, + boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint); + + /// Close the websocket + /// @param startAgain whether to reconnect + void + close(bool startAgain); + + boost::beast::websocket::stream< + boost::beast::ssl_stream>& + ws() + { + return *ws_; + } +}; + +/// This class is used to manage connections to transaction processing processes +/// This class spawns a listener for each etl source, which listens to messages +/// on the ledgers stream (to keep track of which ledgers have been validated by +/// the network, and the range of ledgers each etl source has). This class also +/// allows requests for ledger data to be load balanced across all possible etl +/// sources. +class ETLLoadBalancer +{ +private: + clio::Logger log_{"ETL"}; + std::vector> sources_; + std::uint32_t downloadRanges_ = 16; + +public: + ETLLoadBalancer( + clio::Config const& config, + boost::asio::io_context& ioContext, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl); + + static std::shared_ptr + make_ETLLoadBalancer( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr validatedLedgers) + { + return std::make_shared( + config, ioc, backend, subscriptions, validatedLedgers); + } + + ~ETLLoadBalancer() + { + sources_.clear(); + } + + /// Load the initial ledger, writing data to the queue + /// @param sequence sequence of ledger to download + void + loadInitialLedger(uint32_t sequence, bool cacheOnly = false); + + /// Fetch data for a specific ledger. This function will continuously try + /// to fetch data for the specified ledger until the fetch succeeds, the + /// ledger is found in the database, or the server is shutting down. + /// @param ledgerSequence sequence of ledger to fetch data for + /// @param getObjects if true, fetch diff between specified ledger and + /// previous + /// @return the extracted data, if extraction was successful. If the ledger + /// was found in the database or the server is shutting down, the optional + /// will be empty + std::optional + fetchLedger( + uint32_t ledgerSequence, + bool getObjects, + bool getObjectNeighbors); + + /// Determine whether messages received on the transactions_proposed stream + /// should be forwarded to subscribing clients. The server subscribes to + /// transactions_proposed on multiple ETLSources, yet only forwards messages + /// from one source at any given time (to avoid sending duplicate messages + /// to clients). + /// @param in ETLSource in question + /// @return true if messages should be forwarded + bool + shouldPropagateTxnStream(ETLSource* in) const + { + for (auto& src : sources_) + { + assert(src); + // We pick the first ETLSource encountered that is connected + if (src->isConnected()) + { + if (src.get() == in) + return true; + else + return false; + } + } + + // If no sources connected, then this stream has not been forwarded + return true; + } + + boost::json::value + toJson() const + { + boost::json::array ret; + for (auto& src : sources_) + { + ret.push_back(src->toJson()); + } + return ret; + } + + /// Forward a JSON RPC request to a randomly selected rippled node + /// @param request JSON-RPC request + /// @return response received from rippled node + std::optional + forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const; + +private: + /// f is a function that takes an ETLSource as an argument and returns a + /// bool. Attempt to execute f for one randomly chosen ETLSource that has + /// the specified ledger. If f returns false, another randomly chosen + /// ETLSource is used. The process repeats until f returns true. + /// @param f function to execute. This function takes the ETL source as an + /// argument, and returns a bool. + /// @param ledgerSequence f is executed for each ETLSource that has this + /// ledger + /// @return true if f was eventually executed successfully. false if the + /// ledger was found in the database or the server is shutting down + template + bool + execute(Func f, uint32_t ledgerSequence); +}; diff --git a/src/etl/NFTHelpers.cpp b/src/etl/NFTHelpers.cpp new file mode 100644 index 00000000..68e812b7 --- /dev/null +++ b/src/etl/NFTHelpers.cpp @@ -0,0 +1,416 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include +#include +#include + +std::pair, std::optional> +getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) +{ + // To find the minted token ID, we put all tokenIDs referenced in the + // metadata from prior to the tx application into one vector, then all + // tokenIDs referenced in the metadata from after the tx application into + // another, then find the one tokenID that was added by this tx + // application. + std::vector prevIDs; + std::vector finalIDs; + + // The owner is not necessarily the issuer, if using authorized minter + // flow. Determine owner from the ledger object ID of the NFTokenPages + // that were changed. + std::optional owner; + + for (ripple::STObject const& node : txMeta.getNodes()) + { + if (node.getFieldU16(ripple::sfLedgerEntryType) != + ripple::ltNFTOKEN_PAGE) + continue; + + if (!owner) + owner = ripple::AccountID::fromVoid( + node.getFieldH256(ripple::sfLedgerIndex).data()); + + if (node.getFName() == ripple::sfCreatedNode) + { + ripple::STArray const& toAddNFTs = + node.peekAtField(ripple::sfNewFields) + .downcast() + .getFieldArray(ripple::sfNFTokens); + std::transform( + toAddNFTs.begin(), + toAddNFTs.end(), + std::back_inserter(finalIDs), + [](ripple::STObject const& nft) { + return nft.getFieldH256(ripple::sfNFTokenID); + }); + } + // Else it's modified, as there should never be a deleted NFToken page + // as a result of a mint. + else + { + // When a mint results in splitting an existing page, + // it results in a created page and a modified node. Sometimes, + // the created node needs to be linked to a third page, resulting + // in modifying that third page's PreviousPageMin or NextPageMin + // field changing, but no NFTs within that page changing. In this + // case, there will be no previous NFTs and we need to skip. + // However, there will always be NFTs listed in the final fields, + // as rippled outputs all fields in final fields even if they were + // not changed. + ripple::STObject const& previousFields = + node.peekAtField(ripple::sfPreviousFields) + .downcast(); + if (!previousFields.isFieldPresent(ripple::sfNFTokens)) + continue; + + ripple::STArray const& toAddNFTs = + previousFields.getFieldArray(ripple::sfNFTokens); + std::transform( + toAddNFTs.begin(), + toAddNFTs.end(), + std::back_inserter(prevIDs), + [](ripple::STObject const& nft) { + return nft.getFieldH256(ripple::sfNFTokenID); + }); + + ripple::STArray const& toAddFinalNFTs = + node.peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldArray(ripple::sfNFTokens); + std::transform( + toAddFinalNFTs.begin(), + toAddFinalNFTs.end(), + std::back_inserter(finalIDs), + [](ripple::STObject const& nft) { + return nft.getFieldH256(ripple::sfNFTokenID); + }); + } + } + + std::sort(finalIDs.begin(), finalIDs.end()); + std::sort(prevIDs.begin(), prevIDs.end()); + std::vector tokenIDResult; + std::set_difference( + finalIDs.begin(), + finalIDs.end(), + prevIDs.begin(), + prevIDs.end(), + std::inserter(tokenIDResult, tokenIDResult.begin())); + if (tokenIDResult.size() == 1 && owner) + return { + {NFTTransactionsData( + tokenIDResult.front(), txMeta, sttx.getTransactionID())}, + NFTsData( + tokenIDResult.front(), + *owner, + sttx.getFieldVL(ripple::sfURI), + txMeta)}; + + std::stringstream msg; + msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID(); + throw std::runtime_error(msg.str()); +} + +std::pair, std::optional> +getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) +{ + ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID); + std::vector const txs = { + NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}; + + // Determine who owned the token when it was burned by finding an + // NFTokenPage that was deleted or modified that contains this + // tokenID. + for (ripple::STObject const& node : txMeta.getNodes()) + { + if (node.getFieldU16(ripple::sfLedgerEntryType) != + ripple::ltNFTOKEN_PAGE || + node.getFName() == ripple::sfCreatedNode) + continue; + + // NFT burn can result in an NFTokenPage being modified to no longer + // include the target, or an NFTokenPage being deleted. If this is + // modified, we want to look for the target in the fields prior to + // modification. If deleted, it's possible that the page was + // modified to remove the target NFT prior to the entire page being + // deleted. In this case, we need to look in the PreviousFields. + // Otherwise, the page was not modified prior to deleting and we + // need to look in the FinalFields. + std::optional prevNFTs; + + if (node.isFieldPresent(ripple::sfPreviousFields)) + { + ripple::STObject const& previousFields = + node.peekAtField(ripple::sfPreviousFields) + .downcast(); + if (previousFields.isFieldPresent(ripple::sfNFTokens)) + prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens); + } + else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) + prevNFTs = node.peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldArray(ripple::sfNFTokens); + + if (!prevNFTs) + continue; + + auto const nft = std::find_if( + prevNFTs->begin(), + prevNFTs->end(), + [&tokenID](ripple::STObject const& candidate) { + return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; + }); + if (nft != prevNFTs->end()) + return std::make_pair( + txs, + NFTsData( + tokenID, + ripple::AccountID::fromVoid( + node.getFieldH256(ripple::sfLedgerIndex).data()), + txMeta, + true)); + } + + std::stringstream msg; + msg << " - could not determine owner at burntime for tx " + << sttx.getTransactionID(); + throw std::runtime_error(msg.str()); +} + +std::pair, std::optional> +getNFTokenAcceptOfferData( + ripple::TxMeta const& txMeta, + ripple::STTx const& sttx) +{ + // If we have the buy offer from this tx, we can determine the owner + // more easily by just looking at the owner of the accepted NFTokenOffer + // object. + if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer)) + { + auto const affectedBuyOffer = std::find_if( + txMeta.getNodes().begin(), + txMeta.getNodes().end(), + [&sttx](ripple::STObject const& node) { + return node.getFieldH256(ripple::sfLedgerIndex) == + sttx.getFieldH256(ripple::sfNFTokenBuyOffer); + }); + if (affectedBuyOffer == txMeta.getNodes().end()) + { + std::stringstream msg; + msg << " - unexpected NFTokenAcceptOffer data in tx " + << sttx.getTransactionID(); + throw std::runtime_error(msg.str()); + } + + ripple::uint256 const tokenID = + affectedBuyOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldH256(ripple::sfNFTokenID); + + ripple::AccountID const owner = + affectedBuyOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getAccountID(ripple::sfOwner); + return { + {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, + NFTsData(tokenID, owner, txMeta, false)}; + } + + // Otherwise we have to infer the new owner from the affected nodes. + auto const affectedSellOffer = std::find_if( + txMeta.getNodes().begin(), + txMeta.getNodes().end(), + [&sttx](ripple::STObject const& node) { + return node.getFieldH256(ripple::sfLedgerIndex) == + sttx.getFieldH256(ripple::sfNFTokenSellOffer); + }); + if (affectedSellOffer == txMeta.getNodes().end()) + { + std::stringstream msg; + msg << " - unexpected NFTokenAcceptOffer data in tx " + << sttx.getTransactionID(); + throw std::runtime_error(msg.str()); + } + + ripple::uint256 const tokenID = + affectedSellOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldH256(ripple::sfNFTokenID); + + ripple::AccountID const seller = + affectedSellOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getAccountID(ripple::sfOwner); + + for (ripple::STObject const& node : txMeta.getNodes()) + { + if (node.getFieldU16(ripple::sfLedgerEntryType) != + ripple::ltNFTOKEN_PAGE || + node.getFName() == ripple::sfDeletedNode) + continue; + + ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid( + node.getFieldH256(ripple::sfLedgerIndex).data()); + if (nodeOwner == seller) + continue; + + ripple::STArray const& nfts = [&node] { + if (node.getFName() == ripple::sfCreatedNode) + return node.peekAtField(ripple::sfNewFields) + .downcast() + .getFieldArray(ripple::sfNFTokens); + return node.peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldArray(ripple::sfNFTokens); + }(); + + auto const nft = std::find_if( + nfts.begin(), + nfts.end(), + [&tokenID](ripple::STObject const& candidate) { + return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; + }); + if (nft != nfts.end()) + return { + {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, + NFTsData(tokenID, nodeOwner, txMeta, false)}; + } + + std::stringstream msg; + msg << " - unexpected NFTokenAcceptOffer data in tx " + << sttx.getTransactionID(); + throw std::runtime_error(msg.str()); +} + +// This is the only transaction where there can be more than 1 element in +// the returned vector, because you can cancel multiple offers in one +// transaction using this feature. This transaction also never returns an +// NFTsData because it does not change the state of an NFT itself. +std::pair, std::optional> +getNFTokenCancelOfferData( + ripple::TxMeta const& txMeta, + ripple::STTx const& sttx) +{ + std::vector txs; + for (ripple::STObject const& node : txMeta.getNodes()) + { + if (node.getFieldU16(ripple::sfLedgerEntryType) != + ripple::ltNFTOKEN_OFFER) + continue; + + ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldH256(ripple::sfNFTokenID); + txs.emplace_back(tokenID, txMeta, sttx.getTransactionID()); + } + + // Deduplicate any transactions based on tokenID/txIdx combo. Can't just + // use txIdx because in this case one tx can cancel offers for several + // NFTs. + std::sort( + txs.begin(), + txs.end(), + [](NFTTransactionsData const& a, NFTTransactionsData const& b) { + return a.tokenID < b.tokenID && + a.transactionIndex < b.transactionIndex; + }); + auto last = std::unique( + txs.begin(), + txs.end(), + [](NFTTransactionsData const& a, NFTTransactionsData const& b) { + return a.tokenID == b.tokenID && + a.transactionIndex == b.transactionIndex; + }); + txs.erase(last, txs.end()); + return {txs, {}}; +} + +// This transaction never returns an NFTokensData because it does not +// change the state of an NFT itself. +std::pair, std::optional> +getNFTokenCreateOfferData( + ripple::TxMeta const& txMeta, + ripple::STTx const& sttx) +{ + return { + {NFTTransactionsData( + sttx.getFieldH256(ripple::sfNFTokenID), + txMeta, + sttx.getTransactionID())}, + {}}; +} + +std::pair, std::optional> +getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) +{ + if (txMeta.getResultTER() != ripple::tesSUCCESS) + return {{}, {}}; + + switch (sttx.getTxnType()) + { + case ripple::TxType::ttNFTOKEN_MINT: + return getNFTokenMintData(txMeta, sttx); + + case ripple::TxType::ttNFTOKEN_BURN: + return getNFTokenBurnData(txMeta, sttx); + + case ripple::TxType::ttNFTOKEN_ACCEPT_OFFER: + return getNFTokenAcceptOfferData(txMeta, sttx); + + case ripple::TxType::ttNFTOKEN_CANCEL_OFFER: + return getNFTokenCancelOfferData(txMeta, sttx); + + case ripple::TxType::ttNFTOKEN_CREATE_OFFER: + return getNFTokenCreateOfferData(txMeta, sttx); + + default: + return {{}, {}}; + } +} + +std::vector +getNFTDataFromObj( + std::uint32_t const seq, + std::string const& key, + std::string const& blob) +{ + std::vector nfts; + ripple::STLedgerEntry const sle = ripple::STLedgerEntry( + ripple::SerialIter{blob.data(), blob.size()}, + ripple::uint256::fromVoid(key.data())); + + if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE) + return nfts; + + auto const owner = ripple::AccountID::fromVoid(key.data()); + for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens)) + nfts.emplace_back( + node.getFieldH256(ripple::sfNFTokenID), + seq, + owner, + node.getFieldVL(ripple::sfURI)); + + return nfts; +} diff --git a/src/etl/NFTHelpers.h b/src/etl/NFTHelpers.h new file mode 100644 index 00000000..e0e64cfc --- /dev/null +++ b/src/etl/NFTHelpers.h @@ -0,0 +1,36 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include +#include + +// Pulling from tx via ReportingETL +std::pair, std::optional> +getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx); + +// Pulling from ledger object via loadInitialLedger +std::vector +getNFTDataFromObj( + std::uint32_t const seq, + std::string const& key, + std::string const& blob); diff --git a/src/etl/ProbingETLSource.cpp b/src/etl/ProbingETLSource.cpp new file mode 100644 index 00000000..31567793 --- /dev/null +++ b/src/etl/ProbingETLSource.cpp @@ -0,0 +1,219 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +using namespace clio; + +ProbingETLSource::ProbingETLSource( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl, + ETLLoadBalancer& balancer, + boost::asio::ssl::context sslCtx) + : sslCtx_{std::move(sslCtx)} + , sslSrc_{make_shared( + config, + ioc, + std::ref(sslCtx_), + backend, + subscriptions, + nwvl, + balancer, + make_SSLHooks())} + , plainSrc_{make_shared( + config, + ioc, + backend, + subscriptions, + nwvl, + balancer, + make_PlainHooks())} +{ +} + +void +ProbingETLSource::run() +{ + sslSrc_->run(); + plainSrc_->run(); +} + +void +ProbingETLSource::pause() +{ + sslSrc_->pause(); + plainSrc_->pause(); +} + +void +ProbingETLSource::resume() +{ + sslSrc_->resume(); + plainSrc_->resume(); +} + +bool +ProbingETLSource::isConnected() const +{ + return currentSrc_ && currentSrc_->isConnected(); +} + +bool +ProbingETLSource::hasLedger(uint32_t sequence) const +{ + if (!currentSrc_) + return false; + return currentSrc_->hasLedger(sequence); +} + +boost::json::object +ProbingETLSource::toJson() const +{ + if (!currentSrc_) + { + boost::json::object sourcesJson = { + {"ws", plainSrc_->toJson()}, + {"wss", sslSrc_->toJson()}, + }; + + return { + {"probing", sourcesJson}, + }; + } + return currentSrc_->toJson(); +} + +std::string +ProbingETLSource::toString() const +{ + if (!currentSrc_) + return "{probing... ws: " + plainSrc_->toString() + + ", wss: " + sslSrc_->toString() + "}"; + return currentSrc_->toString(); +} + +bool +ProbingETLSource::loadInitialLedger( + std::uint32_t ledgerSequence, + std::uint32_t numMarkers, + bool cacheOnly) +{ + if (!currentSrc_) + return false; + return currentSrc_->loadInitialLedger( + ledgerSequence, numMarkers, cacheOnly); +} + +std::pair +ProbingETLSource::fetchLedger( + uint32_t ledgerSequence, + bool getObjects, + bool getObjectNeighbors) +{ + if (!currentSrc_) + return {}; + return currentSrc_->fetchLedger( + ledgerSequence, getObjects, getObjectNeighbors); +} + +std::optional +ProbingETLSource::forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const +{ + if (!currentSrc_) + return {}; + return currentSrc_->forwardToRippled(request, clientIp, yield); +} + +std::optional +ProbingETLSource::requestFromRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const +{ + if (!currentSrc_) + return {}; + return currentSrc_->requestFromRippled(request, clientIp, yield); +} + +ETLSourceHooks +ProbingETLSource::make_SSLHooks() noexcept +{ + return {// onConnected + [this](auto ec) { + std::lock_guard lck(mtx_); + if (currentSrc_) + return ETLSourceHooks::Action::STOP; + + if (!ec) + { + plainSrc_->pause(); + currentSrc_ = sslSrc_; + log_.info() << "Selected WSS as the main source: " + << currentSrc_->toString(); + } + return ETLSourceHooks::Action::PROCEED; + }, + // onDisconnected + [this](auto ec) { + std::lock_guard lck(mtx_); + if (currentSrc_) + { + currentSrc_ = nullptr; + plainSrc_->resume(); + } + return ETLSourceHooks::Action::STOP; + }}; +} + +ETLSourceHooks +ProbingETLSource::make_PlainHooks() noexcept +{ + return {// onConnected + [this](auto ec) { + std::lock_guard lck(mtx_); + if (currentSrc_) + return ETLSourceHooks::Action::STOP; + + if (!ec) + { + sslSrc_->pause(); + currentSrc_ = plainSrc_; + log_.info() << "Selected Plain WS as the main source: " + << currentSrc_->toString(); + } + return ETLSourceHooks::Action::PROCEED; + }, + // onDisconnected + [this](auto ec) { + std::lock_guard lck(mtx_); + if (currentSrc_) + { + currentSrc_ = nullptr; + sslSrc_->resume(); + } + return ETLSourceHooks::Action::STOP; + }}; +} diff --git a/src/etl/ProbingETLSource.h b/src/etl/ProbingETLSource.h new file mode 100644 index 00000000..5bd02921 --- /dev/null +++ b/src/etl/ProbingETLSource.h @@ -0,0 +1,112 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/// This ETLSource implementation attempts to connect over both secure websocket +/// and plain websocket. First to connect pauses the other and the probing is +/// considered done at this point. If however the connected source loses +/// connection the probing is kickstarted again. +class ProbingETLSource : public ETLSource +{ + clio::Logger log_{"ETL"}; + + std::mutex mtx_; + boost::asio::ssl::context sslCtx_; + std::shared_ptr sslSrc_; + std::shared_ptr plainSrc_; + std::shared_ptr currentSrc_; + +public: + ProbingETLSource( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr nwvl, + ETLLoadBalancer& balancer, + boost::asio::ssl::context sslCtx = boost::asio::ssl::context{ + boost::asio::ssl::context::tlsv12}); + + ~ProbingETLSource() = default; + + void + run() override; + + void + pause() override; + + void + resume() override; + + bool + isConnected() const override; + + bool + hasLedger(uint32_t sequence) const override; + + boost::json::object + toJson() const override; + + std::string + toString() const override; + + bool + loadInitialLedger( + std::uint32_t ledgerSequence, + std::uint32_t numMarkers, + bool cacheOnly = false) override; + + std::pair + fetchLedger( + uint32_t ledgerSequence, + bool getObjects = true, + bool getObjectNeighbors = false) override; + + std::optional + forwardToRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const override; + +private: + std::optional + requestFromRippled( + boost::json::object const& request, + std::string const& clientIp, + boost::asio::yield_context& yield) const override; + + ETLSourceHooks + make_SSLHooks() noexcept; + + ETLSourceHooks + make_PlainHooks() noexcept; +}; diff --git a/src/etl/README.md b/src/etl/README.md new file mode 100644 index 00000000..b52c28d5 --- /dev/null +++ b/src/etl/README.md @@ -0,0 +1,29 @@ +A single clio node has one or more ETL sources, specified in the config +file. clio will subscribe to the `ledgers` stream of each of the ETL +sources. This stream sends a message whenever a new ledger is validated. Upon +receiving a message on the stream, clio will then fetch the data associated +with the newly validated ledger from one of the ETL sources. The fetch is +performed via a gRPC request (`GetLedger`). This request returns the ledger +header, transactions+metadata blobs, and every ledger object +added/modified/deleted as part of this ledger. ETL then writes all of this data +to the databases, and moves on to the next ledger. ETL does not apply +transactions, but rather extracts the already computed results of those +transactions (all of the added/modified/deleted SHAMap leaf nodes of the state +tree). + +If the database is entirely empty, ETL must download an entire ledger in full +(as opposed to just the diff, as described above). This download is done via the +`GetLedgerData` gRPC request. `GetLedgerData` allows clients to page through an +entire ledger over several RPC calls. ETL will page through an entire ledger, +and write each object to the database. + +If the database is not empty, clio will first come up in a "soft" +read-only mode. In read-only mode, the server does not perform ETL and simply +publishes new ledgers as they are written to the database. +If the database is not updated within a certain time period +(currently hard coded at 20 seconds), clio will begin the ETL +process and start writing to the database. The database will report an error when +trying to write a record with a key that already exists. ETL uses this error to +determine that another process is writing to the database, and subsequently +falls back to a soft read-only mode. clio can also operate in strict +read-only mode, in which case they will never write to the database. diff --git a/src/etl/ReportingETL.cpp b/src/etl/ReportingETL.cpp new file mode 100644 index 00000000..2ecd4e2b --- /dev/null +++ b/src/etl/ReportingETL.cpp @@ -0,0 +1,1326 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +using namespace clio; + +namespace clio::detail { +/// Convenience function for printing out basic ledger info +std::string +toString(ripple::LedgerInfo const& info) +{ + std::stringstream ss; + ss << "LedgerInfo { Sequence : " << info.seq + << " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash) + << " AccountHash : " << strHex(info.accountHash) + << " ParentHash : " << strHex(info.parentHash) << " }"; + return ss.str(); +} +} // namespace clio::detail + +FormattedTransactionsData +ReportingETL::insertTransactions( + ripple::LedgerInfo const& ledger, + org::xrpl::rpc::v1::GetLedgerResponse& data) +{ + FormattedTransactionsData result; + + for (auto& txn : + *(data.mutable_transactions_list()->mutable_transactions())) + { + std::string* raw = txn.mutable_transaction_blob(); + + ripple::SerialIter it{raw->data(), raw->size()}; + ripple::STTx sttx{it}; + + log_.trace() << "Inserting transaction = " << sttx.getTransactionID(); + + ripple::TxMeta txMeta{ + sttx.getTransactionID(), ledger.seq, txn.metadata_blob()}; + + auto const [nftTxs, maybeNFT] = getNFTDataFromTx(txMeta, sttx); + result.nfTokenTxData.insert( + result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end()); + if (maybeNFT) + result.nfTokensData.push_back(*maybeNFT); + + auto journal = ripple::debugLog(); + result.accountTxData.emplace_back( + txMeta, sttx.getTransactionID(), journal); + std::string keyStr{(const char*)sttx.getTransactionID().data(), 32}; + backend_->writeTransaction( + std::move(keyStr), + ledger.seq, + ledger.closeTime.time_since_epoch().count(), + std::move(*raw), + std::move(*txn.mutable_metadata_blob())); + } + + // Remove all but the last NFTsData for each id. unique removes all + // but the first of a group, so we want to reverse sort by transaction + // index + std::sort( + result.nfTokensData.begin(), + result.nfTokensData.end(), + [](NFTsData const& a, NFTsData const& b) { + return a.tokenID > b.tokenID && + a.transactionIndex > b.transactionIndex; + }); + // Now we can unique the NFTs by tokenID. + auto last = std::unique( + result.nfTokensData.begin(), + result.nfTokensData.end(), + [](NFTsData const& a, NFTsData const& b) { + return a.tokenID == b.tokenID; + }); + result.nfTokensData.erase(last, result.nfTokensData.end()); + + return result; +} + +std::optional +ReportingETL::loadInitialLedger(uint32_t startingSequence) +{ + // check that database is actually empty + auto rng = backend_->hardFetchLedgerRangeNoThrow(); + if (rng) + { + log_.fatal() << "Database is not empty"; + assert(false); + return {}; + } + + // fetch the ledger from the network. This function will not return until + // either the fetch is successful, or the server is being shutdown. This + // only fetches the ledger header and the transactions+metadata + std::optional ledgerData{ + fetchLedgerData(startingSequence)}; + if (!ledgerData) + return {}; + + ripple::LedgerInfo lgrInfo = + deserializeHeader(ripple::makeSlice(ledgerData->ledger_header())); + + log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); + + auto timeDiff = util::timed>([&]() { + backend_->startWrites(); + + log_.debug() << "Started writes"; + + backend_->writeLedger( + lgrInfo, std::move(*ledgerData->mutable_ledger_header())); + + log_.debug() << "Wrote ledger"; + FormattedTransactionsData insertTxResult = + insertTransactions(lgrInfo, *ledgerData); + log_.debug() << "Inserted txns"; + + // download the full account state map. This function downloads full + // ledger data and pushes the downloaded data into the writeQueue. + // asyncWriter consumes from the queue and inserts the data into the + // Ledger object. Once the below call returns, all data has been pushed + // into the queue + loadBalancer_->loadInitialLedger(startingSequence); + + log_.debug() << "Loaded initial ledger"; + + if (!stopping_) + { + backend_->writeAccountTransactions( + std::move(insertTxResult.accountTxData)); + backend_->writeNFTs(std::move(insertTxResult.nfTokensData)); + backend_->writeNFTTransactions( + std::move(insertTxResult.nfTokenTxData)); + } + backend_->finishWrites(startingSequence); + }); + log_.debug() << "Time to download and store ledger = " << timeDiff; + return lgrInfo; +} + +void +ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) +{ + log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq); + + if (!writing_) + { + log_.info() << "Updating cache"; + + std::vector diff = + Backend::synchronousAndRetryOnTimeout([&](auto yield) { + return backend_->fetchLedgerDiff(lgrInfo.seq, yield); + }); + + backend_->cache().update(diff, lgrInfo.seq); + backend_->updateRange(lgrInfo.seq); + } + + setLastClose(lgrInfo.closeTime); + auto age = lastCloseAgeSeconds(); + // if the ledger closed over 10 minutes ago, assume we are still + // catching up and don't publish + if (age < 600) + { + std::optional fees = + Backend::synchronousAndRetryOnTimeout([&](auto yield) { + return backend_->fetchFees(lgrInfo.seq, yield); + }); + + std::vector transactions = + Backend::synchronousAndRetryOnTimeout([&](auto yield) { + return backend_->fetchAllTransactionsInLedger( + lgrInfo.seq, yield); + }); + + auto ledgerRange = backend_->fetchLedgerRange(); + assert(ledgerRange); + assert(fees); + + std::string range = std::to_string(ledgerRange->minSequence) + "-" + + std::to_string(ledgerRange->maxSequence); + + subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size()); + + for (auto& txAndMeta : transactions) + subscriptions_->pubTransaction(txAndMeta, lgrInfo); + + subscriptions_->pubBookChanges(lgrInfo, transactions); + + log_.info() << "Published ledger " << std::to_string(lgrInfo.seq); + } + else + log_.info() << "Skipping publishing ledger " + << std::to_string(lgrInfo.seq); + setLastPublish(); +} + +bool +ReportingETL::publishLedger( + uint32_t ledgerSequence, + std::optional maxAttempts) +{ + log_.info() << "Attempting to publish ledger = " << ledgerSequence; + size_t numAttempts = 0; + while (!stopping_) + { + auto range = backend_->hardFetchLedgerRangeNoThrow(); + + if (!range || range->maxSequence < ledgerSequence) + { + log_.debug() << "Trying to publish. Could not find " + "ledger with sequence = " + << ledgerSequence; + // We try maxAttempts times to publish the ledger, waiting one + // second in between each attempt. + if (maxAttempts && numAttempts >= maxAttempts) + { + log_.debug() << "Failed to publish ledger after " << numAttempts + << " attempts."; + return false; + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + ++numAttempts; + continue; + } + else + { + auto lgr = Backend::synchronousAndRetryOnTimeout([&](auto yield) { + return backend_->fetchLedgerBySequence(ledgerSequence, yield); + }); + + assert(lgr); + publishLedger(*lgr); + + return true; + } + } + return false; +} + +std::optional +ReportingETL::fetchLedgerData(uint32_t seq) +{ + log_.debug() << "Attempting to fetch ledger with sequence = " << seq; + + std::optional response = + loadBalancer_->fetchLedger(seq, false, false); + if (response) + log_.trace() << "GetLedger reply = " << response->DebugString(); + return response; +} + +std::optional +ReportingETL::fetchLedgerDataAndDiff(uint32_t seq) +{ + log_.debug() << "Attempting to fetch ledger with sequence = " << seq; + + std::optional response = + loadBalancer_->fetchLedger( + seq, + true, + !backend_->cache().isFull() || + backend_->cache().latestLedgerSequence() >= seq); + if (response) + log_.trace() << "GetLedger reply = " << response->DebugString(); + return response; +} + +std::pair +ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) +{ + log_.debug() << "Beginning ledger update"; + ripple::LedgerInfo lgrInfo = + deserializeHeader(ripple::makeSlice(rawData.ledger_header())); + + log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); + backend_->startWrites(); + log_.debug() << "started writes"; + + backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header())); + log_.debug() << "wrote ledger header"; + + // Write successor info, if included from rippled + if (rawData.object_neighbors_included()) + { + log_.debug() << "object neighbors included"; + for (auto& obj : *(rawData.mutable_book_successors())) + { + auto firstBook = std::move(*obj.mutable_first_book()); + if (!firstBook.size()) + firstBook = uint256ToString(Backend::lastKey); + log_.debug() << "writing book successor " + << ripple::strHex(obj.book_base()) << " - " + << ripple::strHex(firstBook); + + backend_->writeSuccessor( + std::move(*obj.mutable_book_base()), + lgrInfo.seq, + std::move(firstBook)); + } + for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects())) + { + if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED) + { + std::string* predPtr = obj.mutable_predecessor(); + if (!predPtr->size()) + *predPtr = uint256ToString(Backend::firstKey); + std::string* succPtr = obj.mutable_successor(); + if (!succPtr->size()) + *succPtr = uint256ToString(Backend::lastKey); + + if (obj.mod_type() == + org::xrpl::rpc::v1::RawLedgerObject::DELETED) + { + log_.debug() << "Modifying successors for deleted object " + << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " + << ripple::strHex(*succPtr); + + backend_->writeSuccessor( + std::move(*predPtr), lgrInfo.seq, std::move(*succPtr)); + } + else + { + log_.debug() << "adding successor for new object " + << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " + << ripple::strHex(*succPtr); + + backend_->writeSuccessor( + std::move(*predPtr), + lgrInfo.seq, + std::string{obj.key()}); + backend_->writeSuccessor( + std::string{obj.key()}, + lgrInfo.seq, + std::move(*succPtr)); + } + } + else + log_.debug() << "object modified " << ripple::strHex(obj.key()); + } + } + std::vector cacheUpdates; + cacheUpdates.reserve(rawData.ledger_objects().objects_size()); + // TODO change these to unordered_set + std::set bookSuccessorsToCalculate; + std::set modified; + for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects())) + { + auto key = ripple::uint256::fromVoidChecked(obj.key()); + assert(key); + cacheUpdates.push_back( + {*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); + log_.debug() << "key = " << ripple::strHex(*key) + << " - mod type = " << obj.mod_type(); + + if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED && + !rawData.object_neighbors_included()) + { + log_.debug() << "object neighbors not included. using cache"; + if (!backend_->cache().isFull() || + backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1) + throw std::runtime_error( + "Cache is not full, but object neighbors were not " + "included"); + auto blob = obj.mutable_data(); + bool checkBookBase = false; + bool isDeleted = (blob->size() == 0); + if (isDeleted) + { + auto old = backend_->cache().get(*key, lgrInfo.seq - 1); + assert(old); + checkBookBase = isBookDir(*key, *old); + } + else + checkBookBase = isBookDir(*key, *blob); + if (checkBookBase) + { + log_.debug() << "Is book dir. key = " << ripple::strHex(*key); + auto bookBase = getBookBase(*key); + auto oldFirstDir = + backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1); + assert(oldFirstDir); + // We deleted the first directory, or we added a directory prior + // to the old first directory + if ((isDeleted && key == oldFirstDir->key) || + (!isDeleted && key < oldFirstDir->key)) + { + log_.debug() + << "Need to recalculate book base successor. base = " + << ripple::strHex(bookBase) + << " - key = " << ripple::strHex(*key) + << " - isDeleted = " << isDeleted + << " - seq = " << lgrInfo.seq; + bookSuccessorsToCalculate.insert(bookBase); + } + } + } + if (obj.mod_type() == org::xrpl::rpc::v1::RawLedgerObject::MODIFIED) + modified.insert(*key); + + backend_->writeLedgerObject( + std::move(*obj.mutable_key()), + lgrInfo.seq, + std::move(*obj.mutable_data())); + } + backend_->cache().update(cacheUpdates, lgrInfo.seq); + // rippled didn't send successor information, so use our cache + if (!rawData.object_neighbors_included()) + { + log_.debug() << "object neighbors not included. using cache"; + if (!backend_->cache().isFull() || + backend_->cache().latestLedgerSequence() != lgrInfo.seq) + throw std::runtime_error( + "Cache is not full, but object neighbors were not " + "included"); + for (auto const& obj : cacheUpdates) + { + if (modified.count(obj.key)) + continue; + auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq); + if (!lb) + lb = {Backend::firstKey, {}}; + auto ub = backend_->cache().getSuccessor(obj.key, lgrInfo.seq); + if (!ub) + ub = {Backend::lastKey, {}}; + if (obj.blob.size() == 0) + { + log_.debug() << "writing successor for deleted object " + << ripple::strHex(obj.key) << " - " + << ripple::strHex(lb->key) << " - " + << ripple::strHex(ub->key); + + backend_->writeSuccessor( + uint256ToString(lb->key), + lgrInfo.seq, + uint256ToString(ub->key)); + } + else + { + backend_->writeSuccessor( + uint256ToString(lb->key), + lgrInfo.seq, + uint256ToString(obj.key)); + backend_->writeSuccessor( + uint256ToString(obj.key), + lgrInfo.seq, + uint256ToString(ub->key)); + + log_.debug() << "writing successor for new object " + << ripple::strHex(lb->key) << " - " + << ripple::strHex(obj.key) << " - " + << ripple::strHex(ub->key); + } + } + for (auto const& base : bookSuccessorsToCalculate) + { + auto succ = backend_->cache().getSuccessor(base, lgrInfo.seq); + if (succ) + { + backend_->writeSuccessor( + uint256ToString(base), + lgrInfo.seq, + uint256ToString(succ->key)); + + log_.debug() + << "Updating book successor " << ripple::strHex(base) + << " - " << ripple::strHex(succ->key); + } + else + { + backend_->writeSuccessor( + uint256ToString(base), + lgrInfo.seq, + uint256ToString(Backend::lastKey)); + + log_.debug() + << "Updating book successor " << ripple::strHex(base) + << " - " << ripple::strHex(Backend::lastKey); + } + } + } + + log_.debug() + << "Inserted/modified/deleted all objects. Number of objects = " + << rawData.ledger_objects().objects_size(); + FormattedTransactionsData insertTxResult = + insertTransactions(lgrInfo, rawData); + log_.debug() << "Inserted all transactions. Number of transactions = " + << rawData.transactions_list().transactions_size(); + backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData)); + backend_->writeNFTs(std::move(insertTxResult.nfTokensData)); + backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData)); + log_.debug() << "wrote account_tx"; + + auto [success, duration] = util::timed>( + [&]() { return backend_->finishWrites(lgrInfo.seq); }); + + log_.debug() << "Finished writes. took " << std::to_string(duration); + log_.debug() << "Finished ledger update. " << detail::toString(lgrInfo); + + return {lgrInfo, success}; +} + +// Database must be populated when this starts +std::optional +ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) +{ + if (finishSequence_ && startSequence > *finishSequence_) + return {}; + + /* + * Behold, mortals! This function spawns three separate threads, which talk + * to each other via 2 different thread safe queues and 1 atomic variable. + * All threads and queues are function local. This function returns when all + * + * of the threads exit. There are two termination conditions: the first is + * if the load thread encounters a write conflict. In this case, the load + * thread sets writeConflict, an atomic bool, to true, which signals the + * other threads to stop. The second termination condition is when the + * entire server is shutting down, which is detected in one of three ways: + * 1. isStopping() returns true if the server is shutting down + * 2. networkValidatedLedgers_.waitUntilValidatedByNetwork returns + * false, signaling the wait was aborted. + * 3. fetchLedgerDataAndDiff returns an empty optional, signaling the fetch + * was aborted. + * In all cases, the extract thread detects this condition, + * and pushes an empty optional onto the transform queue. The transform + * thread, upon popping an empty optional, pushes an empty optional onto the + * load queue, and then returns. The load thread, upon popping an empty + * optional, returns. + */ + + log_.debug() << "Starting etl pipeline"; + writing_ = true; + + auto rng = backend_->hardFetchLedgerRangeNoThrow(); + if (!rng || rng->maxSequence < startSequence - 1) + { + assert(false); + throw std::runtime_error("runETLPipeline: parent ledger is null"); + } + std::atomic minSequence = rng->minSequence; + + std::atomic_bool writeConflict = false; + std::optional lastPublishedSequence; + uint32_t maxQueueSize = 1000 / numExtractors; + auto begin = std::chrono::system_clock::now(); + using QueueType = + ThreadSafeQueue>; + std::vector> queues; + + auto getNext = [&queues, &startSequence, &numExtractors]( + uint32_t sequence) -> std::shared_ptr { + return queues[(sequence - startSequence) % numExtractors]; + }; + std::vector extractors; + for (size_t i = 0; i < numExtractors; ++i) + { + auto transformQueue = std::make_shared(maxQueueSize); + queues.push_back(transformQueue); + + extractors.emplace_back([this, + &startSequence, + &writeConflict, + transformQueue, + i, + numExtractors]() { + beast::setCurrentThreadName("rippled: ReportingETL extract"); + uint32_t currentSequence = startSequence + i; + + double totalTime = 0; + + // there are two stopping conditions here. + // First, if there is a write conflict in the load thread, the + // ETL mechanism should stop. The other stopping condition is if + // the entire server is shutting down. This can be detected in a + // variety of ways. See the comment at the top of the function + while ((!finishSequence_ || currentSequence <= *finishSequence_) && + networkValidatedLedgers_->waitUntilValidatedByNetwork( + currentSequence) && + !writeConflict && !isStopping()) + { + auto [fetchResponse, time] = + util::timed>([&]() { + return fetchLedgerDataAndDiff(currentSequence); + }); + totalTime += time; + + // if the fetch is unsuccessful, stop. fetchLedger only + // returns false if the server is shutting down, or if the + // ledger was found in the database (which means another + // process already wrote the ledger that this process was + // trying to extract; this is a form of a write conflict). + // Otherwise, fetchLedgerDataAndDiff will keep trying to + // fetch the specified ledger until successful + if (!fetchResponse) + { + break; + } + auto tps = + fetchResponse->transactions_list().transactions_size() / + time; + + log_.info() << "Extract phase time = " << time + << " . Extract phase tps = " << tps + << " . Avg extract time = " + << totalTime / (currentSequence - startSequence + 1) + << " . thread num = " << i + << " . seq = " << currentSequence; + + transformQueue->push(std::move(fetchResponse)); + currentSequence += numExtractors; + if (finishSequence_ && currentSequence > *finishSequence_) + break; + } + // empty optional tells the transformer to shut down + transformQueue->push({}); + }); + } + + std::thread transformer{[this, + &minSequence, + &writeConflict, + &startSequence, + &getNext, + &lastPublishedSequence]() { + beast::setCurrentThreadName("rippled: ReportingETL transform"); + uint32_t currentSequence = startSequence; + + while (!writeConflict) + { + std::optional fetchResponse{ + getNext(currentSequence)->pop()}; + ++currentSequence; + // if fetchResponse is an empty optional, the extracter thread + // has stopped and the transformer should stop as well + if (!fetchResponse) + { + break; + } + if (isStopping()) + continue; + + auto numTxns = + fetchResponse->transactions_list().transactions_size(); + auto numObjects = fetchResponse->ledger_objects().objects_size(); + auto start = std::chrono::system_clock::now(); + auto [lgrInfo, success] = buildNextLedger(*fetchResponse); + auto end = std::chrono::system_clock::now(); + + auto duration = ((end - start).count()) / 1000000000.0; + if (success) + log_.info() + << "Load phase of etl : " + << "Successfully wrote ledger! Ledger info: " + << detail::toString(lgrInfo) << ". txn count = " << numTxns + << ". object count = " << numObjects + << ". load time = " << duration + << ". load txns per second = " << numTxns / duration + << ". load objs per second = " << numObjects / duration; + else + log_.error() + << "Error writing ledger. " << detail::toString(lgrInfo); + // success is false if the ledger was already written + if (success) + { + boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() { + publishLedger(lgrInfo); + }); + + lastPublishedSequence = lgrInfo.seq; + } + writeConflict = !success; + // TODO move online delete logic to an admin RPC call + if (onlineDeleteInterval_ && !deleting_ && + lgrInfo.seq - minSequence > *onlineDeleteInterval_) + { + deleting_ = true; + ioContext_.post([this, &minSequence]() { + log_.info() << "Running online delete"; + + Backend::synchronous( + [&](boost::asio::yield_context& yield) { + backend_->doOnlineDelete( + *onlineDeleteInterval_, yield); + }); + + log_.info() << "Finished online delete"; + auto rng = backend_->fetchLedgerRange(); + minSequence = rng->minSequence; + deleting_ = false; + }); + } + } + }}; + + transformer.join(); + for (size_t i = 0; i < numExtractors; ++i) + { + // pop from each queue that might be blocked on a push + getNext(i)->tryPop(); + } + // wait for all of the extractors to stop + for (auto& t : extractors) + t.join(); + auto end = std::chrono::system_clock::now(); + log_.debug() << "Extracted and wrote " + << *lastPublishedSequence - startSequence << " in " + << ((end - begin).count()) / 1000000000.0; + writing_ = false; + + log_.debug() << "Stopping etl pipeline"; + + return lastPublishedSequence; +} + +// main loop. The software begins monitoring the ledgers that are validated +// by the nework. The member networkValidatedLedgers_ keeps track of the +// sequences of ledgers validated by the network. Whenever a ledger is validated +// by the network, the software looks for that ledger in the database. Once the +// ledger is found in the database, the software publishes that ledger to the +// ledgers stream. If a network validated ledger is not found in the database +// after a certain amount of time, then the software attempts to take over +// responsibility of the ETL process, where it writes new ledgers to the +// database. The software will relinquish control of the ETL process if it +// detects that another process has taken over ETL. +void +ReportingETL::monitor() +{ + auto rng = backend_->hardFetchLedgerRangeNoThrow(); + if (!rng) + { + log_.info() << "Database is empty. Will download a ledger " + "from the network."; + std::optional ledger; + if (startSequence_) + { + log_.info() << "ledger sequence specified in config. " + << "Will begin ETL process starting with ledger " + << *startSequence_; + ledger = loadInitialLedger(*startSequence_); + } + else + { + log_.info() + << "Waiting for next ledger to be validated by network..."; + std::optional mostRecentValidated = + networkValidatedLedgers_->getMostRecent(); + if (mostRecentValidated) + { + log_.info() << "Ledger " << *mostRecentValidated + << " has been validated. " + << "Downloading..."; + ledger = loadInitialLedger(*mostRecentValidated); + } + else + { + log_.info() << "The wait for the next validated " + << "ledger has been aborted. " + << "Exiting monitor loop"; + return; + } + } + if (ledger) + rng = backend_->hardFetchLedgerRangeNoThrow(); + else + { + log_.error() + << "Failed to load initial ledger. Exiting monitor loop"; + return; + } + } + else + { + if (startSequence_) + { + log_.warn() + << "start sequence specified but db is already populated"; + } + log_.info() + << "Database already populated. Picking up from the tip of history"; + loadCache(rng->maxSequence); + } + assert(rng); + uint32_t nextSequence = rng->maxSequence + 1; + + log_.debug() << "Database is populated. " + << "Starting monitor loop. sequence = " << nextSequence; + while (true) + { + if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); + rng && rng->maxSequence >= nextSequence) + { + publishLedger(nextSequence, {}); + ++nextSequence; + } + else if (networkValidatedLedgers_->waitUntilValidatedByNetwork( + nextSequence, 1000)) + { + log_.info() << "Ledger with sequence = " << nextSequence + << " has been validated by the network. " + << "Attempting to find in database and publish"; + // Attempt to take over responsibility of ETL writer after 10 failed + // attempts to publish the ledger. publishLedger() fails if the + // ledger that has been validated by the network is not found in the + // database after the specified number of attempts. publishLedger() + // waits one second between each attempt to read the ledger from the + // database + constexpr size_t timeoutSeconds = 10; + bool success = publishLedger(nextSequence, timeoutSeconds); + if (!success) + { + log_.warn() << "Failed to publish ledger with sequence = " + << nextSequence << " . Beginning ETL"; + // doContinousETLPipelined returns the most recent sequence + // published empty optional if no sequence was published + std::optional lastPublished = + runETLPipeline(nextSequence, extractorThreads_); + log_.info() << "Aborting ETL. Falling back to publishing"; + // if no ledger was published, don't increment nextSequence + if (lastPublished) + nextSequence = *lastPublished + 1; + } + else + ++nextSequence; + } + } +} +bool +ReportingETL::loadCacheFromClioPeer( + uint32_t ledgerIndex, + std::string const& ip, + std::string const& port, + boost::asio::yield_context& yield) +{ + log_.info() << "Loading cache from peer. ip = " << ip + << " . port = " << port; + namespace beast = boost::beast; // from + namespace http = beast::http; // from + namespace websocket = beast::websocket; // from + namespace net = boost::asio; // from + using tcp = boost::asio::ip::tcp; // from + try + { + boost::beast::error_code ec; + // These objects perform our I/O + tcp::resolver resolver{ioContext_}; + + log_.trace() << "Creating websocket"; + auto ws = + std::make_unique>(ioContext_); + + // Look up the domain name + auto const results = resolver.async_resolve(ip, port, yield[ec]); + if (ec) + return {}; + + log_.trace() << "Connecting websocket"; + // Make the connection on the IP address we get from a lookup + ws->next_layer().async_connect(results, yield[ec]); + if (ec) + return false; + + log_.trace() << "Performing websocket handshake"; + // Perform the websocket handshake + ws->async_handshake(ip, "/", yield[ec]); + if (ec) + return false; + + std::optional marker; + + log_.trace() << "Sending request"; + auto getRequest = [&](auto marker) { + boost::json::object request = { + {"command", "ledger_data"}, + {"ledger_index", ledgerIndex}, + {"binary", true}, + {"out_of_order", true}, + {"limit", 2048}}; + + if (marker) + request["marker"] = *marker; + return request; + }; + + bool started = false; + size_t numAttempts = 0; + do + { + // Send the message + ws->async_write( + net::buffer(boost::json::serialize(getRequest(marker))), + yield[ec]); + if (ec) + { + log_.error() << "error writing = " << ec.message(); + return false; + } + + beast::flat_buffer buffer; + ws->async_read(buffer, yield[ec]); + if (ec) + { + log_.error() << "error reading = " << ec.message(); + return false; + } + + auto raw = beast::buffers_to_string(buffer.data()); + auto parsed = boost::json::parse(raw); + + if (!parsed.is_object()) + { + log_.error() << "Error parsing response: " << raw; + return false; + } + log_.trace() << "Successfully parsed response " << parsed; + + if (auto const& response = parsed.as_object(); + response.contains("error")) + { + log_.error() << "Response contains error: " << response; + auto const& err = response.at("error"); + if (err.is_string() && err.as_string() == "lgrNotFound") + { + ++numAttempts; + if (numAttempts >= 5) + { + log_.error() + << " ledger not found at peer after 5 attempts. " + "peer = " + << ip << " ledger = " << ledgerIndex + << ". Check your config and the health of the peer"; + return false; + } + log_.warn() << "Ledger not found. ledger = " << ledgerIndex + << ". Sleeping and trying again"; + std::this_thread::sleep_for(std::chrono::seconds(1)); + continue; + } + return false; + } + started = true; + auto const& response = parsed.as_object()["result"].as_object(); + + if (!response.contains("cache_full") || + !response.at("cache_full").as_bool()) + { + log_.error() << "cache not full for clio node. ip = " << ip; + return false; + } + if (response.contains("marker")) + marker = response.at("marker"); + else + marker = {}; + + auto const& state = response.at("state").as_array(); + + std::vector objects; + objects.reserve(state.size()); + for (auto const& ledgerObject : state) + { + auto const& obj = ledgerObject.as_object(); + + Backend::LedgerObject stateObject = {}; + + if (!stateObject.key.parseHex( + obj.at("index").as_string().c_str())) + { + log_.error() << "failed to parse object id"; + return false; + } + boost::algorithm::unhex( + obj.at("data").as_string().c_str(), + std::back_inserter(stateObject.blob)); + objects.push_back(std::move(stateObject)); + } + backend_->cache().update(objects, ledgerIndex, true); + + if (marker) + log_.debug() << "At marker " << *marker; + } while (marker || !started); + + log_.info() << "Finished downloading ledger from clio node. ip = " + << ip; + + backend_->cache().setFull(); + return true; + } + catch (std::exception const& e) + { + log_.error() << "Encountered exception : " << e.what() + << " - ip = " << ip; + return false; + } +} + +void +ReportingETL::loadCache(uint32_t seq) +{ + if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL) + { + backend_->cache().setDisabled(); + log_.warn() << "Cache is disabled. Not loading"; + return; + } + // sanity check to make sure we are not calling this multiple times + static std::atomic_bool loading = false; + if (loading) + { + assert(false); + return; + } + loading = true; + if (backend_->cache().isFull()) + { + assert(false); + return; + } + + if (clioPeers.size() > 0) + { + boost::asio::spawn( + ioContext_, [this, seq](boost::asio::yield_context yield) { + for (auto const& peer : clioPeers) + { + // returns true on success + if (loadCacheFromClioPeer( + seq, peer.ip, std::to_string(peer.port), yield)) + return; + } + // if we couldn't successfully load from any peers, load from db + loadCacheFromDb(seq); + }); + return; + } + else + { + loadCacheFromDb(seq); + } + // If loading synchronously, poll cache until full + while (cacheLoadStyle_ == CacheLoadStyle::SYNC && + !backend_->cache().isFull()) + { + log_.debug() << "Cache not full. Cache size = " + << backend_->cache().size() << ". Sleeping ..."; + std::this_thread::sleep_for(std::chrono::seconds(10)); + log_.info() << "Cache is full. Cache size = " + << backend_->cache().size(); + } +} + +void +ReportingETL::loadCacheFromDb(uint32_t seq) +{ + // sanity check to make sure we are not calling this multiple times + static std::atomic_bool loading = false; + if (loading) + { + assert(false); + return; + } + loading = true; + std::vector diff; + auto append = [](auto&& a, auto&& b) { + a.insert(std::end(a), std::begin(b), std::end(b)); + }; + + for (size_t i = 0; i < numCacheDiffs_; ++i) + { + append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) { + return backend_->fetchLedgerDiff(seq - i, yield); + })); + } + + std::sort(diff.begin(), diff.end(), [](auto a, auto b) { + return a.key < b.key || + (a.key == b.key && a.blob.size() < b.blob.size()); + }); + diff.erase( + std::unique( + diff.begin(), + diff.end(), + [](auto a, auto b) { return a.key == b.key; }), + diff.end()); + std::vector> cursors; + cursors.push_back({}); + for (auto& obj : diff) + { + if (obj.blob.size()) + cursors.push_back({obj.key}); + } + cursors.push_back({}); + std::stringstream cursorStr; + for (auto& c : cursors) + { + if (c) + cursorStr << ripple::strHex(*c) << ", "; + } + log_.info() << "Loading cache. num cursors = " << cursors.size() - 1; + log_.trace() << "cursors = " << cursorStr.str(); + + cacheDownloader_ = std::thread{[this, seq, cursors]() { + auto startTime = std::chrono::system_clock::now(); + auto markers = std::make_shared(0); + auto numRemaining = + std::make_shared(cursors.size() - 1); + for (size_t i = 0; i < cursors.size() - 1; ++i) + { + std::optional start = cursors[i]; + std::optional end = cursors[i + 1]; + markers->wait(numCacheMarkers_); + ++(*markers); + boost::asio::spawn( + ioContext_, + [this, seq, start, end, numRemaining, startTime, markers]( + boost::asio::yield_context yield) { + std::optional cursor = start; + std::string cursorStr = cursor.has_value() + ? ripple::strHex(cursor.value()) + : ripple::strHex(Backend::firstKey); + log_.debug() << "Starting a cursor: " << cursorStr + << " markers = " << *markers; + + while (!stopping_) + { + auto res = Backend::retryOnTimeout([this, + seq, + &cursor, + &yield]() { + return backend_->fetchLedgerPage( + cursor, seq, cachePageFetchSize_, false, yield); + }); + backend_->cache().update(res.objects, seq, true); + if (!res.cursor || (end && *(res.cursor) > *end)) + break; + log_.trace() + << "Loading cache. cache size = " + << backend_->cache().size() << " - cursor = " + << ripple::strHex(res.cursor.value()) + << " start = " << cursorStr + << " markers = " << *markers; + + cursor = std::move(res.cursor); + } + --(*markers); + markers->notify_one(); + if (--(*numRemaining) == 0) + { + auto endTime = std::chrono::system_clock::now(); + auto duration = + std::chrono::duration_cast( + endTime - startTime); + log_.info() << "Finished loading cache. cache size = " + << backend_->cache().size() << ". Took " + << duration.count() << " seconds"; + backend_->cache().setFull(); + } + else + { + log_.info() << "Finished a cursor. num remaining = " + << *numRemaining << " start = " << cursorStr + << " markers = " << *markers; + } + }); + } + }}; +} + +void +ReportingETL::monitorReadOnly() +{ + log_.debug() << "Starting reporting in strict read only mode"; + auto rng = backend_->hardFetchLedgerRangeNoThrow(); + uint32_t latestSequence; + if (!rng) + if (auto net = networkValidatedLedgers_->getMostRecent()) + latestSequence = *net; + else + return; + else + latestSequence = rng->maxSequence; + loadCache(latestSequence); + latestSequence++; + while (true) + { + if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); + rng && rng->maxSequence >= latestSequence) + { + publishLedger(latestSequence, {}); + latestSequence = latestSequence + 1; + } + else // if we can't, wait until it's validated by the network, or 1 + // second passes, whichever occurs first. Even if we don't hear + // from rippled, if ledgers are being written to the db, we + // publish them + networkValidatedLedgers_->waitUntilValidatedByNetwork( + latestSequence, 1000); + } +} + +void +ReportingETL::doWork() +{ + worker_ = std::thread([this]() { + beast::setCurrentThreadName("rippled: ReportingETL worker"); + if (readOnly_) + monitorReadOnly(); + else + monitor(); + }); +} + +ReportingETL::ReportingETL( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr ledgers) + : backend_(backend) + , subscriptions_(subscriptions) + , loadBalancer_(balancer) + , ioContext_(ioc) + , publishStrand_(ioc) + , networkValidatedLedgers_(ledgers) +{ + startSequence_ = config.maybeValue("start_sequence"); + finishSequence_ = config.maybeValue("finish_sequence"); + readOnly_ = config.valueOr("read_only", readOnly_); + + if (auto interval = config.maybeValue("online_delete"); interval) + { + auto const max = std::numeric_limits::max(); + if (*interval > max) + { + std::stringstream msg; + msg << "online_delete cannot be greater than " + << std::to_string(max); + throw std::runtime_error(msg.str()); + } + if (*interval > 0) + onlineDeleteInterval_ = *interval; + } + + extractorThreads_ = + config.valueOr("extractor_threads", extractorThreads_); + txnThreshold_ = config.valueOr("txn_threshold", txnThreshold_); + if (config.contains("cache")) + { + auto const cache = config.section("cache"); + if (auto entry = cache.maybeValue("load"); entry) + { + if (boost::iequals(*entry, "sync")) + cacheLoadStyle_ = CacheLoadStyle::SYNC; + if (boost::iequals(*entry, "async")) + cacheLoadStyle_ = CacheLoadStyle::ASYNC; + if (boost::iequals(*entry, "none") or boost::iequals(*entry, "no")) + cacheLoadStyle_ = CacheLoadStyle::NOT_AT_ALL; + } + + numCacheDiffs_ = cache.valueOr("num_diffs", numCacheDiffs_); + numCacheMarkers_ = + cache.valueOr("num_markers", numCacheMarkers_); + cachePageFetchSize_ = + cache.valueOr("page_fetch_size", cachePageFetchSize_); + + if (auto peers = cache.maybeArray("peers"); peers) + { + for (auto const& peer : *peers) + { + auto ip = peer.value("ip"); + auto port = peer.value("port"); + + // todo: use emplace_back when clang is ready + clioPeers.push_back({ip, port}); + } + unsigned seed = + std::chrono::system_clock::now().time_since_epoch().count(); + + std::shuffle( + clioPeers.begin(), + clioPeers.end(), + std::default_random_engine(seed)); + } + } +} diff --git a/src/etl/ReportingETL.h b/src/etl/ReportingETL.h new file mode 100644 index 00000000..c23ba378 --- /dev/null +++ b/src/etl/ReportingETL.h @@ -0,0 +1,408 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h" +#include + +#include +#include +#include + +#include + +struct AccountTransactionsData; +struct NFTTransactionsData; +struct NFTsData; +struct FormattedTransactionsData +{ + std::vector accountTxData; + std::vector nfTokenTxData; + std::vector nfTokensData; +}; +class SubscriptionManager; + +/** + * This class is responsible for continuously extracting data from a + * p2p node, and writing that data to the databases. Usually, multiple different + * processes share access to the same network accessible databases, in which + * case only one such process is performing ETL and writing to the database. The + * other processes simply monitor the database for new ledgers, and publish + * those ledgers to the various subscription streams. If a monitoring process + * determines that the ETL writer has failed (no new ledgers written for some + * time), the process will attempt to become the ETL writer. If there are + * multiple monitoring processes that try to become the ETL writer at the same + * time, one will win out, and the others will fall back to + * monitoring/publishing. In this sense, this class dynamically transitions from + * monitoring to writing and from writing to monitoring, based on the activity + * of other processes running on different machines. + */ +class ReportingETL +{ +private: + clio::Logger log_{"ETL"}; + + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr loadBalancer_; + std::optional onlineDeleteInterval_; + std::uint32_t extractorThreads_ = 1; + + enum class CacheLoadStyle { ASYNC, SYNC, NOT_AT_ALL }; + + CacheLoadStyle cacheLoadStyle_ = CacheLoadStyle::ASYNC; + + // number of diffs to use to generate cursors to traverse the ledger in + // parallel during initial cache download + size_t numCacheDiffs_ = 32; + // number of markers to use at one time to traverse the ledger in parallel + // during initial cache download + size_t numCacheMarkers_ = 48; + // number of ledger objects to fetch concurrently per marker during cache + // download + size_t cachePageFetchSize_ = 512; + // thread responsible for syncing the cache on startup + std::thread cacheDownloader_; + + struct ClioPeer + { + std::string ip; + int port; + }; + + std::vector clioPeers; + + std::thread worker_; + boost::asio::io_context& ioContext_; + + /// Strand to ensure that ledgers are published in order. + /// If ETL is started far behind the network, ledgers will be written and + /// published very rapidly. Monitoring processes will publish ledgers as + /// they are written. However, to publish a ledger, the monitoring process + /// needs to read all of the transactions for that ledger from the database. + /// Reading the transactions from the database requires network calls, which + /// can be slow. It is imperative however that the monitoring processes keep + /// up with the writer, else the monitoring processes will not be able to + /// detect if the writer failed. Therefore, publishing each ledger (which + /// includes reading all of the transactions from the database) is done from + /// the application wide asio io_service, and a strand is used to ensure + /// ledgers are published in order + boost::asio::io_context::strand publishStrand_; + + /// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an + /// arbitrary number of ETL sources and load balances ETL requests across + /// those sources. + + /// Mechanism for detecting when the network has validated a new ledger. + /// This class provides a way to wait for a specific ledger to be validated + std::shared_ptr networkValidatedLedgers_; + + /// Whether the software is stopping + std::atomic_bool stopping_ = false; + /// Whether the software is performing online delete + // TODO this needs to live in the database, so diff servers can coordinate + // deletion + std::atomic_bool deleting_ = false; + + /// This variable controls the number of GetLedgerData calls that will be + /// executed in parallel during the initial ledger download. GetLedgerData + /// allows clients to page through a ledger over many RPC calls. + /// GetLedgerData returns a marker that is used as an offset in a subsequent + /// call. If numMarkers_ is greater than 1, there will be multiple chains of + /// GetLedgerData calls iterating over different parts of the same ledger in + /// parallel. This can dramatically speed up the time to download the + /// initial ledger. However, a higher value for this member variable puts + /// more load on the ETL source. + size_t numMarkers_ = 2; + + /// Whether the process is in strict read-only mode. In strict read-only + /// mode, the process will never attempt to become the ETL writer, and will + /// only publish ledgers as they are written to the database. + bool readOnly_ = false; + + /// Whether the process is writing to the database. Used by server_info + std::atomic_bool writing_ = false; + + /// Ledger sequence to start ETL from. If this is empty, ETL will start from + /// the next ledger validated by the network. If this is set, and the + /// database is already populated, an error is thrown. + std::optional startSequence_; + std::optional finishSequence_; + + size_t txnThreshold_ = 0; + + /// The time that the most recently published ledger was published. Used by + /// server_info + std::chrono::time_point lastPublish_; + + mutable std::shared_mutex publishTimeMtx_; + + void + setLastPublish() + { + std::scoped_lock lck(publishTimeMtx_); + lastPublish_ = std::chrono::system_clock::now(); + } + + /// The time that the most recently published ledger was closed. + std::chrono::time_point lastCloseTime_; + + mutable std::shared_mutex closeTimeMtx_; + + void + setLastClose(std::chrono::time_point lastCloseTime) + { + std::scoped_lock lck(closeTimeMtx_); + lastCloseTime_ = lastCloseTime; + } + + /// Download a ledger with specified sequence in full, via GetLedgerData, + /// and write the data to the databases. This takes several minutes or + /// longer. + /// @param sequence the sequence of the ledger to download + /// @return The ledger downloaded, with a full transaction and account state + /// map + std::optional + loadInitialLedger(uint32_t sequence); + + /// Populates the cache by walking through the given ledger. Should only be + /// called once. The default behavior is to return immediately and populate + /// the cache in the background. This can be overridden via config + /// parameter, to populate synchronously, or not at all + void + loadCache(uint32_t seq); + + void + loadCacheFromDb(uint32_t seq); + + bool + loadCacheFromClioPeer( + uint32_t ledgerSequence, + std::string const& ip, + std::string const& port, + boost::asio::yield_context& yield); + + /// Run ETL. Extracts ledgers and writes them to the database, until a + /// write conflict occurs (or the server shuts down). + /// @note database must already be populated when this function is + /// called + /// @param startSequence the first ledger to extract + /// @return the last ledger written to the database, if any + std::optional + runETLPipeline(uint32_t startSequence, int offset); + + /// Monitor the network for newly validated ledgers. Also monitor the + /// database to see if any process is writing those ledgers. This function + /// is called when the application starts, and will only return when the + /// application is shutting down. If the software detects the database is + /// empty, this function will call loadInitialLedger(). If the software + /// detects ledgers are not being written, this function calls + /// runETLPipeline(). Otherwise, this function publishes ledgers as they are + /// written to the database. + void + monitor(); + + /// Monitor the database for newly written ledgers. + /// Similar to the monitor(), except this function will never call + /// runETLPipeline() or loadInitialLedger(). This function only publishes + /// ledgers as they are written to the database. + void + monitorReadOnly(); + + /// Extract data for a particular ledger from an ETL source. This function + /// continously tries to extract the specified ledger (using all available + /// ETL sources) until the extraction succeeds, or the server shuts down. + /// @param sequence sequence of the ledger to extract + /// @return ledger header and transaction+metadata blobs. Empty optional + /// if the server is shutting down + std::optional + fetchLedgerData(uint32_t sequence); + + /// Extract data for a particular ledger from an ETL source. This function + /// continously tries to extract the specified ledger (using all available + /// ETL sources) until the extraction succeeds, or the server shuts down. + /// @param sequence sequence of the ledger to extract + /// @return ledger header, transaction+metadata blobs, and all ledger + /// objects created, modified or deleted between this ledger and the parent. + /// Empty optional if the server is shutting down + std::optional + fetchLedgerDataAndDiff(uint32_t sequence); + + /// Insert all of the extracted transactions into the ledger, returning + /// transactions related to accounts, transactions related to NFTs, and + /// NFTs themselves for later processsing. + /// @param ledger ledger to insert transactions into + /// @param data data extracted from an ETL source + /// @return struct that contains the neccessary info to write to the + /// account_transactions/account_tx and nft_token_transactions tables + /// (mostly transaction hashes, corresponding nodestore hashes and affected + /// accounts) + FormattedTransactionsData + insertTransactions( + ripple::LedgerInfo const& ledger, + org::xrpl::rpc::v1::GetLedgerResponse& data); + + // TODO update this documentation + /// Build the next ledger using the previous ledger and the extracted data. + /// This function calls insertTransactions() + /// @note rawData should be data that corresponds to the ledger immediately + /// following parent + /// @param parent the previous ledger + /// @param rawData data extracted from an ETL source + /// @return the newly built ledger and data to write to the database + std::pair + buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData); + + /// Attempt to read the specified ledger from the database, and then publish + /// that ledger to the ledgers stream. + /// @param ledgerSequence the sequence of the ledger to publish + /// @param maxAttempts the number of times to attempt to read the ledger + /// from the database. 1 attempt per second + /// @return whether the ledger was found in the database and published + bool + publishLedger(uint32_t ledgerSequence, std::optional maxAttempts); + + /// Publish the passed in ledger + /// @param ledger the ledger to publish + void + publishLedger(ripple::LedgerInfo const& lgrInfo); + + bool + isStopping() + { + return stopping_; + } + + /// Get the number of markers to use during the initial ledger download. + /// This is equivelent to the degree of parallelism during the initial + /// ledger download + /// @return the number of markers + std::uint32_t + getNumMarkers() + { + return numMarkers_; + } + + /// start all of the necessary components and begin ETL + void + run() + { + log_.info() << "Starting reporting etl"; + stopping_ = false; + + doWork(); + } + + void + doWork(); + +public: + ReportingETL( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr ledgers); + + static std::shared_ptr + make_ReportingETL( + clio::Config const& config, + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr ledgers) + { + auto etl = std::make_shared( + config, ioc, backend, subscriptions, balancer, ledgers); + + etl->run(); + + return etl; + } + + ~ReportingETL() + { + log_.info() << "onStop called"; + log_.debug() << "Stopping Reporting ETL"; + stopping_ = true; + + if (worker_.joinable()) + worker_.join(); + if (cacheDownloader_.joinable()) + cacheDownloader_.join(); + + log_.debug() << "Joined ReportingETL worker thread"; + } + + boost::json::object + getInfo() const + { + boost::json::object result; + + result["etl_sources"] = loadBalancer_->toJson(); + result["is_writer"] = writing_.load(); + result["read_only"] = readOnly_; + auto last = getLastPublish(); + if (last.time_since_epoch().count() != 0) + result["last_publish_age_seconds"] = + std::to_string(lastPublishAgeSeconds()); + return result; + } + + std::chrono::time_point + getLastPublish() const + { + std::shared_lock lck(publishTimeMtx_); + return lastPublish_; + } + + std::uint32_t + lastPublishAgeSeconds() const + { + return std::chrono::duration_cast( + std::chrono::system_clock::now() - getLastPublish()) + .count(); + } + + std::uint32_t + lastCloseAgeSeconds() const + { + std::shared_lock lck(closeTimeMtx_); + auto now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + auto closeTime = lastCloseTime_.time_since_epoch().count(); + if (now < (rippleEpochStart + closeTime)) + return 0; + return now - (rippleEpochStart + closeTime); + } +}; diff --git a/src/log/Logger.cpp b/src/log/Logger.cpp new file mode 100644 index 00000000..4b25147a --- /dev/null +++ b/src/log/Logger.cpp @@ -0,0 +1,209 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include + +namespace clio { + +Logger LogService::general_log_ = Logger{"General"}; +Logger LogService::alert_log_ = Logger{"Alert"}; + +std::ostream& +operator<<(std::ostream& stream, Severity sev) +{ + static constexpr std::array labels = { + "TRC", + "DBG", + "NFO", + "WRN", + "ERR", + "FTL", + }; + + return stream << labels.at(static_cast(sev)); +} + +Severity +tag_invoke(boost::json::value_to_tag, boost::json::value const& value) +{ + if (not value.is_string()) + throw std::runtime_error("`log_level` must be a string"); + auto const& logLevel = value.as_string(); + + if (boost::iequals(logLevel, "trace")) + return Severity::TRC; + else if (boost::iequals(logLevel, "debug")) + return Severity::DBG; + else if (boost::iequals(logLevel, "info")) + return Severity::NFO; + else if ( + boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn")) + return Severity::WRN; + else if (boost::iequals(logLevel, "error")) + return Severity::ERR; + else if (boost::iequals(logLevel, "fatal")) + return Severity::FTL; + else + throw std::runtime_error( + "Could not parse `log_level`: expected `trace`, `debug`, `info`, " + "`warning`, `error` or `fatal`"); +} + +void +LogService::init(Config const& config) +{ + namespace src = boost::log::sources; + namespace keywords = boost::log::keywords; + namespace sinks = boost::log::sinks; + + boost::log::add_common_attributes(); + boost::log::register_simple_formatter_factory("Severity"); + auto const defaultFormat = + "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% " + "%Message%"; + std::string format = + config.valueOr("log_format", defaultFormat); + + if (config.valueOr("log_to_console", false)) + { + boost::log::add_console_log(std::cout, keywords::format = format); + } + + auto logDir = config.maybeValue("log_directory"); + if (logDir) + { + boost::filesystem::path dirPath{logDir.value()}; + if (!boost::filesystem::exists(dirPath)) + boost::filesystem::create_directories(dirPath); + auto const rotationSize = + config.valueOr("log_rotation_size", 2048u) * 1024u * + 1024u; + auto const rotationPeriod = + config.valueOr("log_rotation_hour_interval", 12u); + auto const dirSize = + config.valueOr("log_directory_max_size", 50u * 1024u) * + 1024u * 1024u; + auto fileSink = boost::log::add_file_log( + keywords::file_name = dirPath / "clio.log", + keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log", + keywords::auto_flush = true, + keywords::format = format, + keywords::open_mode = std::ios_base::app, + keywords::rotation_size = rotationSize, + keywords::time_based_rotation = + sinks::file::rotation_at_time_interval( + boost::posix_time::hours(rotationPeriod))); + fileSink->locked_backend()->set_file_collector( + sinks::file::make_collector( + keywords::target = dirPath, keywords::max_size = dirSize)); + fileSink->locked_backend()->scan_for_files(); + } + + // get default severity, can be overridden per channel using + // the `log_channels` array + auto defaultSeverity = config.valueOr("log_level", Severity::NFO); + static constexpr std::array channels = { + "General", + "WebServer", + "Backend", + "RPC", + "ETL", + "Subscriptions", + "Performance", + }; + + auto core = boost::log::core::get(); + auto min_severity = boost::log::expressions::channel_severity_filter( + log_channel, log_severity); + + for (auto const& channel : channels) + min_severity[channel] = defaultSeverity; + min_severity["Alert"] = + Severity::WRN; // Channel for alerts, always warning severity + + for (auto const overrides = config.arrayOr("log_channels", {}); + auto const& cfg : overrides) + { + auto name = cfg.valueOrThrow( + "channel", "Channel name is required"); + if (not std::count(std::begin(channels), std::end(channels), name)) + throw std::runtime_error( + "Can't override settings for log channel " + name + + ": invalid channel"); + + min_severity[name] = + cfg.valueOr("log_level", defaultSeverity); + } + + core->set_filter(min_severity); + LogService::info() << "Default log level = " << defaultSeverity; +} + +Logger::Pump +Logger::trace(source_location_t const& loc) const +{ + return {logger_, Severity::TRC, loc}; +}; +Logger::Pump +Logger::debug(source_location_t const& loc) const +{ + return {logger_, Severity::DBG, loc}; +}; +Logger::Pump +Logger::info(source_location_t const& loc) const +{ + return {logger_, Severity::NFO, loc}; +}; +Logger::Pump +Logger::warn(source_location_t const& loc) const +{ + return {logger_, Severity::WRN, loc}; +}; +Logger::Pump +Logger::error(source_location_t const& loc) const +{ + return {logger_, Severity::ERR, loc}; +}; +Logger::Pump +Logger::fatal(source_location_t const& loc) const +{ + return {logger_, Severity::FTL, loc}; +}; + +std::string +Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const +{ + auto const file_path = std::string{loc.file_name()}; + auto idx = file_path.size(); + while (max_depth-- > 0) + { + idx = file_path.rfind('/', idx - 1); + if (idx == std::string::npos || idx == 0) + break; + } + return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' + + std::to_string(loc.line()); +} + +} // namespace clio diff --git a/src/log/Logger.h b/src/log/Logger.h new file mode 100644 index 00000000..7793a4b3 --- /dev/null +++ b/src/log/Logger.h @@ -0,0 +1,314 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location) +// this is used by fully compatible compilers like gcc +#include + +#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION) +// this is used by clang on linux where source_location is still not out of +// experimental headers +#include +#endif + +#include +#include + +namespace clio { + +class Config; +#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location) +using source_location_t = std::source_location; +#define CURRENT_SRC_LOCATION source_location_t::current() + +#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION) +using source_location_t = std::experimental::source_location; +#define CURRENT_SRC_LOCATION source_location_t::current() + +#else +// A workaround for AppleClang that is lacking source_location atm. +// TODO: remove this workaround when all compilers catch up to c++20 +class SourceLocation +{ + std::string_view file_; + std::size_t line_; + +public: + SourceLocation(std::string_view file, std::size_t line) + : file_{file}, line_{line} + { + } + std::string_view + file_name() const + { + return file_; + } + std::size_t + line() const + { + return line_; + } +}; +using source_location_t = SourceLocation; +#define CURRENT_SRC_LOCATION \ + source_location_t(__builtin_FILE(), __builtin_LINE()) +#endif + +/** + * @brief Custom severity levels for @ref Logger. + */ +enum class Severity { + TRC, + DBG, + NFO, + WRN, + ERR, + FTL, +}; + +BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity); +BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string); + +/** + * @brief Custom labels for @ref Severity in log output. + * + * @param stream std::ostream The output stream + * @param sev Severity The severity to output to the ostream + * @return std::ostream& The same ostream we were given + */ +std::ostream& +operator<<(std::ostream& stream, Severity sev); + +/** + * @brief Custom JSON parser for @ref Severity. + * + * @param value The JSON string to parse + * @return Severity The parsed severity + * @throws std::runtime_error Thrown if severity is not in the right format + */ +Severity +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& value); + +/** + * @brief A simple thread-safe logger for the channel specified + * in the constructor. + * + * This is cheap to copy and move. Designed to be used as a member variable or + * otherwise. See @ref LogService::init() for setup of the logging core and + * severity levels for each channel. + */ +class Logger final +{ + using logger_t = + boost::log::sources::severity_channel_logger_mt; + mutable logger_t logger_; + + friend class LogService; // to expose the Pump interface + + /** + * @brief Helper that pumps data into a log record via `operator<<`. + */ + class Pump final + { + using pump_opt_t = + std::optional>; + + boost::log::record rec_; + pump_opt_t pump_ = std::nullopt; + + public: + ~Pump() = default; + Pump(logger_t& logger, Severity sev, source_location_t const& loc) + : rec_{logger.open_record(boost::log::keywords::severity = sev)} + { + if (rec_) + { + pump_.emplace(boost::log::aux::make_record_pump(logger, rec_)); + pump_->stream() << boost::log::add_value( + "SourceLocation", pretty_path(loc)); + } + } + + Pump(Pump&&) = delete; + Pump(Pump const&) = delete; + Pump& + operator=(Pump const&) = delete; + Pump& + operator=(Pump&&) = delete; + + /** + * @brief Perfectly forwards any incoming data into the underlying + * boost::log pump if the pump is available. nop otherwise. + * + * @tparam T Type of data to pump + * @param data The data to pump + * @return Pump& Reference to itself for chaining + */ + template + [[maybe_unused]] Pump& + operator<<(T&& data) + { + if (pump_) + pump_->stream() << std::forward(data); + return *this; + } + + private: + [[nodiscard]] std::string + pretty_path(source_location_t const& loc, size_t max_depth = 3) const; + }; + +public: + ~Logger() = default; + /** + * @brief Construct a new Logger object that produces loglines for the + * specified channel. + * + * See @ref LogService::init() for general setup and configuration of + * severity levels per channel. + * + * @param channel The channel this logger will report into. + */ + Logger(std::string channel) + : logger_{boost::log::keywords::channel = channel} + { + } + Logger(Logger const&) = default; + Logger(Logger&&) = default; + Logger& + operator=(Logger const&) = default; + Logger& + operator=(Logger&&) = default; + + /*! Interface for logging at @ref Severity::TRC severity */ + [[nodiscard]] Pump + trace(source_location_t const& loc = CURRENT_SRC_LOCATION) const; + + /*! Interface for logging at @ref Severity::DBG severity */ + [[nodiscard]] Pump + debug(source_location_t const& loc = CURRENT_SRC_LOCATION) const; + + /*! Interface for logging at @ref Severity::INFO severity */ + [[nodiscard]] Pump + info(source_location_t const& loc = CURRENT_SRC_LOCATION) const; + + /*! Interface for logging at @ref Severity::WRN severity */ + [[nodiscard]] Pump + warn(source_location_t const& loc = CURRENT_SRC_LOCATION) const; + + /*! Interface for logging at @ref Severity::ERR severity */ + [[nodiscard]] Pump + error(source_location_t const& loc = CURRENT_SRC_LOCATION) const; + + /*! Interface for logging at @ref Severity::FTL severity */ + [[nodiscard]] Pump + fatal(source_location_t const& loc = CURRENT_SRC_LOCATION) const; +}; + +/** + * @brief A global logging service. + * + * Used to initialize and setup the logging core as well as a globally available + * entrypoint for logging into the `General` channel as well as raising alerts. + */ +class LogService +{ + static Logger general_log_; /*! Global logger for General channel */ + static Logger alert_log_; /*! Global logger for Alerts channel */ + +public: + LogService() = delete; + + /** + * @brief Global log core initialization from a @ref Config + */ + static void + init(Config const& config); + + /*! Globally accesible General logger at @ref Severity::TRC severity */ + [[nodiscard]] static Logger::Pump + trace(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.trace(loc); + } + + /*! Globally accesible General logger at @ref Severity::DBG severity */ + [[nodiscard]] static Logger::Pump + debug(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.debug(loc); + } + + /*! Globally accesible General logger at @ref Severity::NFO severity */ + [[nodiscard]] static Logger::Pump + info(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.info(loc); + } + + /*! Globally accesible General logger at @ref Severity::WRN severity */ + [[nodiscard]] static Logger::Pump + warn(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.warn(loc); + } + + /*! Globally accesible General logger at @ref Severity::ERR severity */ + [[nodiscard]] static Logger::Pump + error(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.error(loc); + } + + /*! Globally accesible General logger at @ref Severity::FTL severity */ + [[nodiscard]] static Logger::Pump + fatal(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return general_log_.fatal(loc); + } + + /*! Globally accesible Alert logger */ + [[nodiscard]] static Logger::Pump + alert(source_location_t const& loc = CURRENT_SRC_LOCATION) + { + return alert_log_.warn(loc); + } +}; + +}; // namespace clio diff --git a/src/main/Build.h b/src/main/Build.h new file mode 100644 index 00000000..2d396a7c --- /dev/null +++ b/src/main/Build.h @@ -0,0 +1,32 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace Build { + +std::string const& +getClioVersionString(); + +std::string const& +getClioFullVersionString(); + +} // namespace Build diff --git a/src/main/main.cpp b/src/main/main.cpp new file mode 100644 index 00000000..051418c5 --- /dev/null +++ b/src/main/main.cpp @@ -0,0 +1,245 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#ifdef GRPC_TSAN_ENABLED +#undef GRPC_TSAN_ENABLED +#endif +#ifdef GRPC_ASAN_ENABLED +#undef GRPC_ASAN_ENABLED +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include
+#include +#include +#include +#include +#include + +using namespace clio; +namespace po = boost::program_options; + +/** + * @brief Parse command line and return path to configuration file + * + * @param argc + * @param argv + * @return std::string Path to configuration file + */ +std::string +parseCli(int argc, char* argv[]) +{ + static constexpr char defaultConfigPath[] = "/etc/opt/clio/config.json"; + + // clang-format off + po::options_description description("Options"); + description.add_options() + ("help,h", "print help message and exit") + ("version,v", "print version and exit") + ("conf,c", po::value()->default_value(defaultConfigPath), "configuration file") + ; + // clang-format on + po::positional_options_description positional; + positional.add("conf", 1); + + po::variables_map parsed; + po::store( + po::command_line_parser(argc, argv) + .options(description) + .positional(positional) + .run(), + parsed); + po::notify(parsed); + + if (parsed.count("version")) + { + std::cout << Build::getClioFullVersionString() << '\n'; + std::exit(EXIT_SUCCESS); + } + + if (parsed.count("help")) + { + std::cout << "Clio server " << Build::getClioFullVersionString() + << "\n\n" + << description; + std::exit(EXIT_SUCCESS); + } + + return parsed["conf"].as(); +} + +/** + * @brief Parse certificates from configuration file + * + * @param config The configuration + * @return std::optional SSL context if certificates were parsed + */ +std::optional +parseCerts(Config const& config) +{ + if (!config.contains("ssl_cert_file") || !config.contains("ssl_key_file")) + return {}; + + auto certFilename = config.value("ssl_cert_file"); + auto keyFilename = config.value("ssl_key_file"); + + std::ifstream readCert(certFilename, std::ios::in | std::ios::binary); + if (!readCert) + return {}; + + std::stringstream contents; + contents << readCert.rdbuf(); + std::string cert = contents.str(); + + std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary); + if (!readKey) + return {}; + + contents.str(""); + contents << readKey.rdbuf(); + readKey.close(); + std::string key = contents.str(); + + ssl::context ctx{ssl::context::tlsv12}; + + ctx.set_options( + boost::asio::ssl::context::default_workarounds | + boost::asio::ssl::context::no_sslv2); + + ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size())); + + ctx.use_private_key( + boost::asio::buffer(key.data(), key.size()), + boost::asio::ssl::context::file_format::pem); + + return ctx; +} + +/** + * @brief Start context threads + * + * @param ioc Context + * @param numThreads Number of worker threads to start + */ +void +start(boost::asio::io_context& ioc, std::uint32_t numThreads) +{ + std::vector v; + v.reserve(numThreads - 1); + for (auto i = numThreads - 1; i > 0; --i) + v.emplace_back([&ioc] { ioc.run(); }); + + ioc.run(); +} + +int +main(int argc, char* argv[]) +try +{ + auto const configPath = parseCli(argc, argv); + auto const config = ConfigReader::open(configPath); + if (!config) + { + std::cerr << "Couldnt parse config '" << configPath << "'." + << std::endl; + return EXIT_FAILURE; + } + + LogService::init(config); + LogService::info() << "Clio version: " << Build::getClioFullVersionString(); + + auto ctx = parseCerts(config); + auto ctxRef = ctx + ? std::optional>{ctx.value()} + : std::nullopt; + + auto const threads = config.valueOr("io_threads", 2); + if (threads <= 0) + { + LogService::fatal() << "io_threads is less than 0"; + return EXIT_FAILURE; + } + LogService::info() << "Number of io threads = " << threads; + + // IO context to handle all incoming requests, as well as other things + // This is not the only io context in the application + boost::asio::io_context ioc{threads}; + + // Rate limiter, to prevent abuse + auto sweepHandler = IntervalSweepHandler{config, ioc}; + auto dosGuard = DOSGuard{config, sweepHandler}; + + // Interface to the database + auto backend = Backend::make_Backend(ioc, config); + + // Manages clients subscribed to streams + auto subscriptions = + SubscriptionManager::make_SubscriptionManager(config, backend); + + // Tracks which ledgers have been validated by the + // network + auto ledgers = NetworkValidatedLedgers::make_ValidatedLedgers(); + + // Handles the connection to one or more rippled nodes. + // ETL uses the balancer to extract data. + // The server uses the balancer to forward RPCs to a rippled node. + // The balancer itself publishes to streams (transactions_proposed and + // accounts_proposed) + auto balancer = ETLLoadBalancer::make_ETLLoadBalancer( + config, ioc, backend, subscriptions, ledgers); + + // ETL is responsible for writing and publishing to streams. In read-only + // mode, ETL only publishes + auto etl = ReportingETL::make_ReportingETL( + config, ioc, backend, subscriptions, balancer, ledgers); + + // The server handles incoming RPCs + auto httpServer = Server::make_HttpServer( + config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard); + + // Blocks until stopped. + // When stopped, shared_ptrs fall out of scope + // Calls destructors on all resources, and destructs in order + start(ioc, threads); + + return EXIT_SUCCESS; +} +catch (std::exception const& e) +{ + LogService::fatal() << "Exit on exception: " << e.what(); +} diff --git a/src/rpc/Counters.cpp b/src/rpc/Counters.cpp new file mode 100644 index 00000000..10cc8d0d --- /dev/null +++ b/src/rpc/Counters.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace RPC { + +void +Counters::initializeCounter(std::string const& method) +{ + std::shared_lock lk(mutex_); + if (methodInfo_.count(method) == 0) + { + lk.unlock(); + std::scoped_lock ulk(mutex_); + + // This calls the default constructor for methodInfo of the method. + methodInfo_[method]; + } +} + +void +Counters::rpcErrored(std::string const& method) +{ + if (!validHandler(method)) + return; + + initializeCounter(method); + + std::shared_lock lk(mutex_); + MethodInfo& counters = methodInfo_[method]; + counters.started++; + counters.errored++; +} + +void +Counters::rpcComplete( + std::string const& method, + std::chrono::microseconds const& rpcDuration) +{ + if (!validHandler(method)) + return; + + initializeCounter(method); + + std::shared_lock lk(mutex_); + MethodInfo& counters = methodInfo_[method]; + counters.started++; + counters.finished++; + counters.duration += rpcDuration.count(); +} + +void +Counters::rpcForwarded(std::string const& method) +{ + if (!validHandler(method)) + return; + + initializeCounter(method); + + std::shared_lock lk(mutex_); + MethodInfo& counters = methodInfo_[method]; + counters.forwarded++; +} + +boost::json::object +Counters::report() +{ + std::shared_lock lk(mutex_); + boost::json::object obj = {}; + obj[JS(rpc)] = boost::json::object{}; + auto& rpc = obj[JS(rpc)].as_object(); + + for (auto const& [method, info] : methodInfo_) + { + boost::json::object counters = {}; + counters[JS(started)] = std::to_string(info.started); + counters[JS(finished)] = std::to_string(info.finished); + counters[JS(errored)] = std::to_string(info.errored); + counters["forwarded"] = std::to_string(info.forwarded); + counters[JS(duration_us)] = std::to_string(info.duration); + + rpc[method] = std::move(counters); + } + obj["work_queue"] = workQueue_.get().report(); + + return obj; +} + +} // namespace RPC diff --git a/src/rpc/Counters.h b/src/rpc/Counters.h new file mode 100644 index 00000000..4091caba --- /dev/null +++ b/src/rpc/Counters.h @@ -0,0 +1,73 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace RPC { + +class Counters +{ +private: + struct MethodInfo + { + MethodInfo() = default; + + std::atomic_uint64_t started{0}; + std::atomic_uint64_t finished{0}; + std::atomic_uint64_t errored{0}; + std::atomic_uint64_t forwarded{0}; + std::atomic_uint64_t duration{0}; + }; + + void + initializeCounter(std::string const& method); + + std::shared_mutex mutex_; + std::unordered_map methodInfo_; + + std::reference_wrapper workQueue_; + +public: + Counters(WorkQueue const& wq) : workQueue_(std::cref(wq)){}; + + void + rpcErrored(std::string const& method); + + void + rpcComplete( + std::string const& method, + std::chrono::microseconds const& rpcDuration); + + void + rpcForwarded(std::string const& method); + + boost::json::object + report(); +}; + +} // namespace RPC diff --git a/src/rpc/Errors.cpp b/src/rpc/Errors.cpp new file mode 100644 index 00000000..da659a84 --- /dev/null +++ b/src/rpc/Errors.cpp @@ -0,0 +1,169 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +using namespace std; + +namespace { +template +struct overloadSet : Ts... +{ + using Ts::operator()...; +}; + +// explicit deduction guide (not needed as of C++20, but clang be clang) +template +overloadSet(Ts...) -> overloadSet; +} // namespace + +namespace RPC { + +WarningInfo const& +getWarningInfo(WarningCode code) +{ + constexpr static WarningInfo infos[]{ + {warnUNKNOWN, "Unknown warning"}, + {warnRPC_CLIO, + "This is a clio server. clio only serves validated data. If you " + "want to talk to rippled, include 'ledger_index':'current' in your " + "request"}, + {warnRPC_OUTDATED, "This server may be out of date"}, + {warnRPC_RATE_LIMIT, "You are about to be rate limited"}}; + + auto matchByCode = [code](auto const& info) { return info.code == code; }; + if (auto it = find_if(begin(infos), end(infos), matchByCode); + it != end(infos)) + return *it; + + throw(out_of_range("Invalid WarningCode")); +} + +boost::json::object +makeWarning(WarningCode code) +{ + boost::json::object json; + auto const& info = getWarningInfo(code); + json["id"] = code; + json["message"] = static_cast(info.message); + return json; +} + +ClioErrorInfo const& +getErrorInfo(ClioError code) +{ + constexpr static ClioErrorInfo infos[]{ + {ClioError::rpcMALFORMED_CURRENCY, + "malformedCurrency", + "Malformed currency."}, + {ClioError::rpcMALFORMED_REQUEST, + "malformedRequest", + "Malformed request."}, + {ClioError::rpcMALFORMED_OWNER, "malformedOwner", "Malformed owner."}, + {ClioError::rpcMALFORMED_ADDRESS, + "malformedAddress", + "Malformed address."}, + }; + + auto matchByCode = [code](auto const& info) { return info.code == code; }; + if (auto it = find_if(begin(infos), end(infos), matchByCode); + it != end(infos)) + return *it; + + throw(out_of_range("Invalid error code")); +} + +boost::json::object +makeError( + RippledError err, + optional customError, + optional customMessage) +{ + boost::json::object json; + auto const& info = ripple::RPC::get_error_info(err); + + json["error"] = customError.value_or(info.token.c_str()).data(); + json["error_code"] = static_cast(err); + json["error_message"] = customMessage.value_or(info.message.c_str()).data(); + json["status"] = "error"; + json["type"] = "response"; + return json; +} + +boost::json::object +makeError( + ClioError err, + optional customError, + optional customMessage) +{ + boost::json::object json; + auto const& info = getErrorInfo(err); + + json["error"] = customError.value_or(info.error).data(); + json["error_code"] = static_cast(info.code); + json["error_message"] = customMessage.value_or(info.message).data(); + json["status"] = "error"; + json["type"] = "response"; + return json; +} + +boost::json::object +makeError(Status const& status) +{ + auto wrapOptional = [](string_view const& str) { + return str.empty() ? nullopt : make_optional(str); + }; + + auto res = visit( + overloadSet{ + [&status, &wrapOptional](RippledError err) { + if (err == ripple::rpcUNKNOWN) + { + return boost::json::object{ + {"error", status.message}, + {"type", "response"}, + {"status", "error"}}; + } + + return makeError( + err, + wrapOptional(status.error), + wrapOptional(status.message)); + }, + [&status, &wrapOptional](ClioError err) { + return makeError( + err, + wrapOptional(status.error), + wrapOptional(status.message)); + }, + }, + status.code); + if (status.extraInfo) + { + for (auto& [key, value] : status.extraInfo.value()) + { + res[key] = value; + } + } + return res; +} + +} // namespace RPC diff --git a/src/rpc/Errors.h b/src/rpc/Errors.h new file mode 100644 index 00000000..5e7caf17 --- /dev/null +++ b/src/rpc/Errors.h @@ -0,0 +1,256 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include + +#include +#include +#include +#include + +namespace RPC { + +/** + * @brief Custom clio RPC Errors. + */ +enum class ClioError { + rpcMALFORMED_CURRENCY = 5000, + rpcMALFORMED_REQUEST = 5001, + rpcMALFORMED_OWNER = 5002, + rpcMALFORMED_ADDRESS = 5003, +}; + +/** + * @brief Holds info about a particular @ref ClioError. + */ +struct ClioErrorInfo +{ + ClioError const code; + std::string_view const error; + std::string_view const message; +}; + +/** + * @brief Clio uses compatible Rippled error codes for most RPC errors. + */ +using RippledError = ripple::error_code_i; + +/** + * @brief Clio operates on a combination of Rippled and Custom Clio error codes. + * + * @see RippledError For rippled error codes + * @see ClioError For custom clio error codes + */ +using CombinedError = std::variant; + +/** + * @brief A status returned from any RPC handler. + */ +struct Status +{ + CombinedError code = RippledError::rpcSUCCESS; + std::string error = ""; + std::string message = ""; + std::optional extraInfo; + + Status() = default; + /* implicit */ Status(CombinedError code) : code(code){}; + Status(CombinedError code, boost::json::object&& extraInfo) + : code(code), extraInfo(std::move(extraInfo)){}; + + // HACK. Some rippled handlers explicitly specify errors. + // This means that we have to be able to duplicate this + // functionality. + explicit Status(std::string const& message) + : code(ripple::rpcUNKNOWN), message(message) + { + } + + Status(CombinedError code, std::string message) + : code(code), message(message) + { + } + + Status(CombinedError code, std::string error, std::string message) + : code(code), error(error), message(message) + { + } + + /** + * @brief Returns true if the Status is *not* OK. + */ + operator bool() const + { + if (auto err = std::get_if(&code)) + return *err != RippledError::rpcSUCCESS; + return true; + } + + /** + * @brief Returns true if the Status contains the desired @ref RippledError + * + * @param other The RippledError to match + * @return bool true if status matches given error; false otherwise + */ + bool + operator==(RippledError other) const + { + if (auto err = std::get_if(&code)) + return *err == other; + return false; + } + + /** + * @brief Returns true if the Status contains the desired @ref ClioError + * + * @param other The RippledError to match + * @return bool true if status matches given error; false otherwise + */ + bool + operator==(ClioError other) const + { + if (auto err = std::get_if(&code)) + return *err == other; + return false; + } +}; + +/** + * @brief Warning codes that can be returned by clio. + */ +enum WarningCode { + warnUNKNOWN = -1, + warnRPC_CLIO = 2001, + warnRPC_OUTDATED = 2002, + warnRPC_RATE_LIMIT = 2003 +}; + +/** + * @brief Holds information about a clio warning. + */ +struct WarningInfo +{ + constexpr WarningInfo() = default; + constexpr WarningInfo(WarningCode code, char const* message) + : code(code), message(message) + { + } + + WarningCode code = warnUNKNOWN; + std::string_view const message = "unknown warning"; +}; + +/** + * @brief Invalid parameters error. + */ +class InvalidParamsError : public std::exception +{ + std::string msg; + +public: + explicit InvalidParamsError(std::string const& msg) : msg(msg) + { + } + + const char* + what() const throw() override + { + return msg.c_str(); + } +}; + +/** + * @brief Account not found error. + */ +class AccountNotFoundError : public std::exception +{ + std::string account; + +public: + explicit AccountNotFoundError(std::string const& acct) : account(acct) + { + } + const char* + what() const throw() override + { + return account.c_str(); + } +}; + +/** + * @brief A globally available @ref Status that represents a successful state + */ +static Status OK; + +/** + * @brief Get the warning info object from a warning code. + * + * @param code The warning code + * @return WarningInfo const& A reference to the static warning info + */ +WarningInfo const& +getWarningInfo(WarningCode code); + +/** + * @brief Generate JSON from a warning code. + * + * @param code The @ref WarningCode + * @return boost::json::object The JSON output + */ +boost::json::object +makeWarning(WarningCode code); + +/** + * @brief Generate JSON from a @ref Status. + * + * @param status The @ref Status + * @return boost::json::object The JSON output + */ +boost::json::object +makeError(Status const& status); + +/** + * @brief Generate JSON from a @ref RippledError. + * + * @param status The rippled @ref RippledError + * @return boost::json::object The JSON output + */ +boost::json::object +makeError( + RippledError err, + std::optional customError = std::nullopt, + std::optional customMessage = std::nullopt); + +/** + * @brief Generate JSON from a @ref ClioError. + * + * @param status The clio's custom @ref ClioError + * @return boost::json::object The JSON output + */ +boost::json::object +makeError( + ClioError err, + std::optional customError = std::nullopt, + std::optional customMessage = std::nullopt); + +} // namespace RPC diff --git a/src/rpc/Handlers.h b/src/rpc/Handlers.h new file mode 100644 index 00000000..f2b36d86 --- /dev/null +++ b/src/rpc/Handlers.h @@ -0,0 +1,122 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace RPC { +/* + * This file just contains declarations for all of the handlers + */ + +// account state methods +Result +doAccountInfo(Context const& context); + +Result +doAccountChannels(Context const& context); + +Result +doAccountCurrencies(Context const& context); + +Result +doAccountLines(Context const& context); + +Result +doAccountNFTs(Context const& context); + +Result +doAccountObjects(Context const& context); + +Result +doAccountOffers(Context const& context); + +Result +doGatewayBalances(Context const& context); + +Result +doNoRippleCheck(Context const& context); + +// channels methods + +Result +doChannelAuthorize(Context const& context); + +Result +doChannelVerify(Context const& context); + +// book methods +[[nodiscard]] Result +doBookChanges(Context const& context); + +Result +doBookOffers(Context const& context); + +// NFT methods +Result +doNFTBuyOffers(Context const& context); + +Result +doNFTSellOffers(Context const& context); + +Result +doNFTInfo(Context const& context); + +Result +doNFTHistory(Context const& context); + +// ledger methods +Result +doLedger(Context const& context); + +Result +doLedgerEntry(Context const& context); + +Result +doLedgerData(Context const& context); + +Result +doLedgerRange(Context const& context); + +// transaction methods +Result +doTx(Context const& context); + +Result +doTransactionEntry(Context const& context); + +Result +doAccountTx(Context const& context); + +// subscriptions +Result +doSubscribe(Context const& context); + +Result +doUnsubscribe(Context const& context); + +// server methods +Result +doServerInfo(Context const& context); + +// Utility methods +Result +doRandom(Context const& context); +} // namespace RPC diff --git a/src/rpc/README.md b/src/rpc/README.md new file mode 100644 index 00000000..b4797025 --- /dev/null +++ b/src/rpc/README.md @@ -0,0 +1,29 @@ +# Clio RPC subsystem +## Background +The RPC subsystem is where the common framework for handling incoming JSON requests is implemented. +Currently the NextGen RPC framework is a work in progress and the handlers are not yet implemented using the new common framework classes. + +## Integration plan +- Implement base framework - **done** +- Migrate handlers one by one, making them injectable, adding unit-tests - **in progress** +- Integrate all new handlers into clio in one go +- Cover the rest with unit-tests +- Release first time with new subsystem active + +## Components +See `common` subfolder. + +- **AnyHandler**: The type-erased wrapper that allows for storing different handlers in one map/vector. +- **RpcSpec/FieldSpec**: The RPC specification classes, used to specify how incoming JSON is to be validated before it's parsed and passed on to individual handler implementations. +- **Validators**: A bunch of supported validators that can be specified as requirements for each **`FieldSpec`** to make up the final **`RpcSpec`** of any given RPC handler. + +## Implementing a (NextGen) handler +See `unittests/rpc` for exmaples. + +Handlers need to fulfil the requirements specified by the **`Handler`** concept (see `rpc/common/Concepts.h`): +- Expose types: + * `Input` - The POD struct which acts as input for the handler + * `Output` - The POD struct which acts as output of a valid handler invocation +- Have a `spec()` member function returning a const reference to an **`RpcSpec`** describing the JSON input. +- Have a `process(Input)` member function that operates on `Input` POD and returns `HandlerReturnType` +- Implement `value_from` and `value_to` support using `tag_invoke` as per `boost::json` documentation for these functions. diff --git a/src/rpc/RPC.cpp b/src/rpc/RPC.cpp new file mode 100644 index 00000000..35ae1e73 --- /dev/null +++ b/src/rpc/RPC.cpp @@ -0,0 +1,396 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include + +#include + +#include + +using namespace std; +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gPerfLog{"Performance"}; +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { +Context::Context( + boost::asio::yield_context& yield_, + string const& command_, + uint32_t version_, + boost::json::object const& params_, + shared_ptr const& backend_, + shared_ptr const& subscriptions_, + shared_ptr const& balancer_, + shared_ptr const& etl_, + shared_ptr const& session_, + util::TagDecoratorFactory const& tagFactory_, + Backend::LedgerRange const& range_, + Counters& counters_, + string const& clientIp_) + : Taggable(tagFactory_) + , yield(yield_) + , method(command_) + , version(version_) + , params(params_) + , backend(backend_) + , subscriptions(subscriptions_) + , balancer(balancer_) + , etl(etl_) + , session(session_) + , range(range_) + , counters(counters_) + , clientIp(clientIp_) +{ + gPerfLog.debug() << tag() << "new Context created"; +} + +optional +make_WsContext( + boost::asio::yield_context& yc, + boost::json::object const& request, + shared_ptr const& backend, + shared_ptr const& subscriptions, + shared_ptr const& balancer, + shared_ptr const& etl, + shared_ptr const& session, + util::TagDecoratorFactory const& tagFactory, + Backend::LedgerRange const& range, + Counters& counters, + string const& clientIp) +{ + boost::json::value commandValue = nullptr; + if (!request.contains("command") && request.contains("method")) + commandValue = request.at("method"); + else if (request.contains("command") && !request.contains("method")) + commandValue = request.at("command"); + + if (!commandValue.is_string()) + return {}; + + string command = commandValue.as_string().c_str(); + + return make_optional( + yc, + command, + 1, + request, + backend, + subscriptions, + balancer, + etl, + session, + tagFactory, + range, + counters, + clientIp); +} + +optional +make_HttpContext( + boost::asio::yield_context& yc, + boost::json::object const& request, + shared_ptr const& backend, + shared_ptr const& subscriptions, + shared_ptr const& balancer, + shared_ptr const& etl, + util::TagDecoratorFactory const& tagFactory, + Backend::LedgerRange const& range, + RPC::Counters& counters, + string const& clientIp) +{ + if (!request.contains("method") || !request.at("method").is_string()) + return {}; + + string const& command = request.at("method").as_string().c_str(); + + if (command == "subscribe" || command == "unsubscribe") + return {}; + + if (!request.at("params").is_array()) + return {}; + + boost::json::array const& array = request.at("params").as_array(); + + if (array.size() != 1) + return {}; + + if (!array.at(0).is_object()) + return {}; + + return make_optional( + yc, + command, + 1, + array.at(0).as_object(), + backend, + subscriptions, + balancer, + etl, + nullptr, + tagFactory, + range, + counters, + clientIp); +} + +using LimitRange = tuple; +using HandlerFunction = function; + +struct Handler +{ + string method; + function handler; + optional limit; + bool isClioOnly = false; +}; + +class HandlerTable +{ + unordered_map handlerMap_; + +public: + HandlerTable(initializer_list handlers) + { + for (auto const& handler : handlers) + { + handlerMap_[handler.method] = move(handler); + } + } + + bool + contains(string const& method) + { + return handlerMap_.contains(method); + } + + optional + getLimitRange(string const& command) + { + if (!handlerMap_.contains(command)) + return {}; + + return handlerMap_[command].limit; + } + + optional + getHandler(string const& command) + { + if (!handlerMap_.contains(command)) + return {}; + + return handlerMap_[command].handler; + } + + bool + isClioOnly(string const& command) + { + return handlerMap_.contains(command) && handlerMap_[command].isClioOnly; + } +}; + +static HandlerTable handlerTable{ + {"account_channels", &doAccountChannels, LimitRange{10, 50, 256}}, + {"account_currencies", &doAccountCurrencies, {}}, + {"account_info", &doAccountInfo, {}}, + {"account_lines", &doAccountLines, LimitRange{10, 50, 256}}, + {"account_nfts", &doAccountNFTs, LimitRange{1, 5, 10}}, + {"account_objects", &doAccountObjects, LimitRange{10, 50, 256}}, + {"account_offers", &doAccountOffers, LimitRange{10, 50, 256}}, + {"account_tx", &doAccountTx, LimitRange{1, 50, 100}}, + {"gateway_balances", &doGatewayBalances, {}}, + {"noripple_check", &doNoRippleCheck, LimitRange{1, 300, 500}}, + {"book_changes", &doBookChanges, {}}, + {"book_offers", &doBookOffers, LimitRange{1, 50, 100}}, + {"ledger", &doLedger, {}}, + {"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}}, + {"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}}, + {"nft_history", &doNFTHistory, LimitRange{1, 50, 100}, true}, + {"nft_info", &doNFTInfo, {}, true}, + {"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}}, + {"ledger_entry", &doLedgerEntry, {}}, + {"ledger_range", &doLedgerRange, {}}, + {"subscribe", &doSubscribe, {}}, + {"server_info", &doServerInfo, {}}, + {"unsubscribe", &doUnsubscribe, {}}, + {"tx", &doTx, {}}, + {"transaction_entry", &doTransactionEntry, {}}, + {"random", &doRandom, {}}}; + +static unordered_set forwardCommands{ + "submit", + "submit_multisigned", + "fee", + "ledger_closed", + "ledger_current", + "ripple_path_find", + "manifest", + "channel_authorize", + "channel_verify"}; + +bool +validHandler(string const& method) +{ + return handlerTable.contains(method) || forwardCommands.contains(method); +} + +bool +isClioOnly(string const& method) +{ + return handlerTable.isClioOnly(method); +} + +bool +shouldSuppressValidatedFlag(RPC::Context const& context) +{ + return boost::iequals(context.method, "subscribe") || + boost::iequals(context.method, "unsubscribe"); +} + +Status +getLimit(RPC::Context const& context, uint32_t& limit) +{ + if (!handlerTable.getHandler(context.method)) + return Status{RippledError::rpcUNKNOWN_COMMAND}; + + if (!handlerTable.getLimitRange(context.method)) + return Status{ + RippledError::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"}; + + auto [lo, def, hi] = *handlerTable.getLimitRange(context.method); + + if (context.params.contains(JS(limit))) + { + string errMsg = "Invalid field 'limit', not unsigned integer."; + if (!context.params.at(JS(limit)).is_int64()) + return Status{RippledError::rpcINVALID_PARAMS, errMsg}; + + int input = context.params.at(JS(limit)).as_int64(); + if (input <= 0) + return Status{RippledError::rpcINVALID_PARAMS, errMsg}; + + limit = clamp(static_cast(input), lo, hi); + } + else + { + limit = def; + } + + return {}; +} + +bool +shouldForwardToRippled(Context const& ctx) +{ + auto request = ctx.params; + + if (isClioOnly(ctx.method)) + return false; + + if (forwardCommands.find(ctx.method) != forwardCommands.end()) + return true; + + if (specifiesCurrentOrClosedLedger(request)) + return true; + + if (ctx.method == "account_info" && request.contains("queue") && + request.at("queue").as_bool()) + return true; + + return false; +} + +Result +buildResponse(Context const& ctx) +{ + if (shouldForwardToRippled(ctx)) + { + boost::json::object toForward = ctx.params; + toForward["command"] = ctx.method; + + auto res = + ctx.balancer->forwardToRippled(toForward, ctx.clientIp, ctx.yield); + + ctx.counters.rpcForwarded(ctx.method); + + if (!res) + return Status{RippledError::rpcFAILED_TO_FORWARD}; + + return *res; + } + + if (ctx.method == "ping") + return boost::json::object{}; + + if (ctx.backend->isTooBusy()) + { + gLog.error() << "Database is too busy. Rejecting request"; + return Status{RippledError::rpcTOO_BUSY}; + } + + auto method = handlerTable.getHandler(ctx.method); + + if (!method) + return Status{RippledError::rpcUNKNOWN_COMMAND}; + + try + { + gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method + << '`'; + auto v = (*method)(ctx); + gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method + << '`'; + + if (auto object = get_if(&v); + object && not shouldSuppressValidatedFlag(ctx)) + { + (*object)[JS(validated)] = true; + } + + return v; + } + catch (InvalidParamsError const& err) + { + return Status{RippledError::rpcINVALID_PARAMS, err.what()}; + } + catch (AccountNotFoundError const& err) + { + return Status{RippledError::rpcACT_NOT_FOUND, err.what()}; + } + catch (Backend::DatabaseTimeout const& t) + { + gLog.error() << "Database timeout"; + return Status{RippledError::rpcTOO_BUSY}; + } + catch (exception const& err) + { + gLog.error() << ctx.tag() << " caught exception: " << err.what(); + return Status{RippledError::rpcINTERNAL}; + } +} + +} // namespace RPC diff --git a/src/rpc/RPC.h b/src/rpc/RPC.h new file mode 100644 index 00000000..fd2b3129 --- /dev/null +++ b/src/rpc/RPC.h @@ -0,0 +1,166 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +/* + * This file contains various classes necessary for executing RPC handlers. + * Context gives the handlers access to various other parts of the application + * Status is used to report errors. + * And lastly, there are various functions for making Contexts, Statuses and + * serializing Status to JSON. + * This file is meant to contain any class or function that code outside of the + * rpc folder needs to use. For helper functions or classes used within the rpc + * folder, use RPCHelpers.h. + */ + +class WsBase; +class SubscriptionManager; +class ETLLoadBalancer; +class ReportingETL; + +namespace RPC { + +struct Context : public util::Taggable +{ + clio::Logger perfLog_{"Performance"}; + boost::asio::yield_context& yield; + std::string method; + std::uint32_t version; + boost::json::object const& params; + std::shared_ptr const& backend; + // this needs to be an actual shared_ptr, not a reference. The above + // references refer to shared_ptr members of WsBase, but WsBase contains + // SubscriptionManager as a weak_ptr, to prevent a shared_ptr cycle. + std::shared_ptr subscriptions; + std::shared_ptr const& balancer; + std::shared_ptr const& etl; + std::shared_ptr session; + Backend::LedgerRange const& range; + Counters& counters; + std::string clientIp; + + Context( + boost::asio::yield_context& yield_, + std::string const& command_, + std::uint32_t version_, + boost::json::object const& params_, + std::shared_ptr const& backend_, + std::shared_ptr const& subscriptions_, + std::shared_ptr const& balancer_, + std::shared_ptr const& etl_, + std::shared_ptr const& session_, + util::TagDecoratorFactory const& tagFactory_, + Backend::LedgerRange const& range_, + Counters& counters_, + std::string const& clientIp_); +}; + +struct AccountCursor +{ + ripple::uint256 index; + std::uint32_t hint; + + std::string + toString() const + { + return ripple::strHex(index) + "," + std::to_string(hint); + } + + bool + isNonZero() const + { + return index.isNonZero() || hint != 0; + } +}; + +using Result = std::variant; + +std::optional +make_WsContext( + boost::asio::yield_context& yc, + boost::json::object const& request, + std::shared_ptr const& backend, + std::shared_ptr const& subscriptions, + std::shared_ptr const& balancer, + std::shared_ptr const& etl, + std::shared_ptr const& session, + util::TagDecoratorFactory const& tagFactory, + Backend::LedgerRange const& range, + Counters& counters, + std::string const& clientIp); + +std::optional +make_HttpContext( + boost::asio::yield_context& yc, + boost::json::object const& request, + std::shared_ptr const& backend, + std::shared_ptr const& subscriptions, + std::shared_ptr const& balancer, + std::shared_ptr const& etl, + util::TagDecoratorFactory const& tagFactory, + Backend::LedgerRange const& range, + Counters& counters, + std::string const& clientIp); + +Result +buildResponse(Context const& ctx); + +bool +validHandler(std::string const& method); + +bool +isClioOnly(std::string const& method); + +Status +getLimit(RPC::Context const& context, std::uint32_t& limit); + +template +void +logDuration(Context const& ctx, T const& dur) +{ + static clio::Logger log{"RPC"}; + std::stringstream ss; + ss << ctx.tag() << "Request processing duration = " + << std::chrono::duration_cast(dur).count() + << " milliseconds. request = " << ctx.params; + auto seconds = + std::chrono::duration_cast(dur).count(); + if (seconds > 10) + log.error() << ss.str(); + else if (seconds > 1) + log.warn() << ss.str(); + else + log.info() << ss.str(); +} + +} // namespace RPC diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp new file mode 100644 index 00000000..a908eeab --- /dev/null +++ b/src/rpc/RPCHelpers.cpp @@ -0,0 +1,1766 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +#include +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { + +std::optional +getBool(boost::json::object const& request, std::string const& field) +{ + if (!request.contains(field)) + return {}; + else if (request.at(field).is_bool()) + return request.at(field).as_bool(); + else + throw InvalidParamsError("Invalid field " + field + ", not bool."); +} + +bool +getBool( + boost::json::object const& request, + std::string const& field, + bool dfault) +{ + if (auto res = getBool(request, field)) + return *res; + else + return dfault; +} + +bool +getRequiredBool(boost::json::object const& request, std::string const& field) +{ + if (auto res = getBool(request, field)) + return *res; + else + throw InvalidParamsError("Missing field " + field); +} + +std::optional +getUInt(boost::json::object const& request, std::string const& field) +{ + if (!request.contains(field)) + return {}; + else if (request.at(field).is_uint64()) + return request.at(field).as_uint64(); + else if (request.at(field).is_int64()) + return request.at(field).as_int64(); + else + throw InvalidParamsError("Invalid field " + field + ", not uint."); +} + +std::uint32_t +getUInt( + boost::json::object const& request, + std::string const& field, + std::uint32_t const dfault) +{ + if (auto res = getUInt(request, field)) + return *res; + else + return dfault; +} + +std::uint32_t +getRequiredUInt(boost::json::object const& request, std::string const& field) +{ + if (auto res = getUInt(request, field)) + return *res; + else + throw InvalidParamsError("Missing field " + field); +} + +std::optional +parseAccountCursor(std::optional jsonCursor) +{ + ripple::uint256 cursorIndex = beast::zero; + std::uint64_t startHint = 0; + + if (!jsonCursor) + return AccountCursor({cursorIndex, startHint}); + + // Cursor is composed of a comma separated index and start hint. The + // former will be read as hex, and the latter using boost lexical cast. + std::stringstream cursor(*jsonCursor); + std::string value; + if (!std::getline(cursor, value, ',')) + return {}; + + if (!cursorIndex.parseHex(value)) + return {}; + + if (!std::getline(cursor, value, ',')) + return {}; + + try + { + startHint = boost::lexical_cast(value); + } + catch (boost::bad_lexical_cast&) + { + return {}; + } + + return AccountCursor({cursorIndex, startHint}); +} + +std::optional +getString(boost::json::object const& request, std::string const& field) +{ + if (!request.contains(field)) + return {}; + else if (request.at(field).is_string()) + return request.at(field).as_string().c_str(); + else + throw InvalidParamsError("Invalid field " + field + ", not string."); +} + +std::string +getRequiredString(boost::json::object const& request, std::string const& field) +{ + if (auto res = getString(request, field)) + return *res; + else + throw InvalidParamsError("Missing field " + field); +} + +std::string +getString( + boost::json::object const& request, + std::string const& field, + std::string dfault) +{ + if (auto res = getString(request, field)) + return *res; + else + return dfault; +} + +Status +getHexMarker(boost::json::object const& request, ripple::uint256& marker) +{ + if (request.contains(JS(marker))) + { + if (!request.at(JS(marker)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + if (!marker.parseHex(request.at(JS(marker)).as_string().c_str())) + return Status{RippledError::rpcINVALID_PARAMS, "malformedMarker"}; + } + + return {}; +} + +Status +getAccount( + boost::json::object const& request, + ripple::AccountID& account, + boost::string_view const& field, + bool required) +{ + if (!request.contains(field)) + { + if (required) + return Status{ + RippledError::rpcINVALID_PARAMS, field.to_string() + "Missing"}; + + return {}; + } + + if (!request.at(field).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; + + if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); + a) + { + account = a.value(); + return {}; + } + + return Status{ + RippledError::rpcACT_MALFORMED, field.to_string() + "Malformed"}; +} + +Status +getOptionalAccount( + boost::json::object const& request, + std::optional& account, + boost::string_view const& field) +{ + if (!request.contains(field)) + { + account = {}; + return {}; + } + + if (!request.at(field).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; + + if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); + a) + { + account = a.value(); + return {}; + } + + return Status{ + RippledError::rpcINVALID_PARAMS, field.to_string() + "Malformed"}; +} + +Status +getAccount(boost::json::object const& request, ripple::AccountID& accountId) +{ + return getAccount(request, accountId, JS(account), true); +} + +Status +getAccount( + boost::json::object const& request, + ripple::AccountID& destAccount, + boost::string_view const& field) +{ + return getAccount(request, destAccount, field, false); +} + +Status +getTaker(boost::json::object const& request, ripple::AccountID& takerID) +{ + if (request.contains(JS(taker))) + { + auto parsed = parseTaker(request.at(JS(taker))); + if (auto status = std::get_if(&parsed); status) + return *status; + else + takerID = std::get(parsed); + } + + return {}; +} + +Status +getChannelId(boost::json::object const& request, ripple::uint256& channelId) +{ + if (!request.contains(JS(channel_id))) + return Status{RippledError::rpcINVALID_PARAMS, "missingChannelID"}; + + if (!request.at(JS(channel_id)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "channelIDNotString"}; + + if (!channelId.parseHex(request.at(JS(channel_id)).as_string().c_str())) + return Status{RippledError::rpcCHANNEL_MALFORMED, "malformedChannelID"}; + + return {}; +} + +std::optional +getDeliveredAmount( + std::shared_ptr const& txn, + std::shared_ptr const& meta, + std::uint32_t const ledgerSequence, + uint32_t date) +{ + if (meta->hasDeliveredAmount()) + return meta->getDeliveredAmount(); + if (txn->isFieldPresent(ripple::sfAmount)) + { + using namespace std::chrono_literals; + + // Ledger 4594095 is the first ledger in which the DeliveredAmount field + // was present when a partial payment was made and its absence indicates + // that the amount delivered is listed in the Amount field. + // + // If the ledger closed long after the DeliveredAmount code was deployed + // then its absence indicates that the amount delivered is listed in the + // Amount field. DeliveredAmount went live January 24, 2014. + // 446000000 is in Feb 2014, well after DeliveredAmount went live + if (ledgerSequence >= 4594095 || date > 446000000) + { + return txn->getFieldAmount(ripple::sfAmount); + } + } + return {}; +} + +bool +canHaveDeliveredAmount( + std::shared_ptr const& txn, + std::shared_ptr const& meta) +{ + ripple::TxType const tt{txn->getTxnType()}; + if (tt != ripple::ttPAYMENT && tt != ripple::ttCHECK_CASH && + tt != ripple::ttACCOUNT_DELETE) + return false; + + /* + if (tt == ttCHECK_CASH && !getFix1623Enabled()) + return false; + */ + + if (meta->getResultTER() != ripple::tesSUCCESS) + return false; + + return true; +} + +std::optional +accountFromStringStrict(std::string const& account) +{ + auto blob = ripple::strUnHex(account); + + std::optional publicKey = {}; + if (blob && ripple::publicKeyType(ripple::makeSlice(*blob))) + { + publicKey = + ripple::PublicKey(ripple::Slice{blob->data(), blob->size()}); + } + else + { + publicKey = ripple::parseBase58( + ripple::TokenType::AccountPublic, account); + } + + std::optional result; + if (publicKey) + result = ripple::calcAccountID(*publicKey); + else + result = ripple::parseBase58(account); + + if (result) + return result.value(); + else + return {}; +} +std::pair< + std::shared_ptr, + std::shared_ptr> +deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs) +{ + try + { + std::pair< + std::shared_ptr, + std::shared_ptr> + result; + { + ripple::SerialIter s{ + blobs.transaction.data(), blobs.transaction.size()}; + result.first = std::make_shared(s); + } + { + ripple::SerialIter s{blobs.metadata.data(), blobs.metadata.size()}; + result.second = + std::make_shared(s, ripple::sfMetadata); + } + return result; + } + catch (std::exception const& e) + { + std::stringstream txn; + std::stringstream meta; + std::copy( + blobs.transaction.begin(), + blobs.transaction.end(), + std::ostream_iterator(txn)); + std::copy( + blobs.metadata.begin(), + blobs.metadata.end(), + std::ostream_iterator(meta)); + gLog.error() << "Failed to deserialize transaction. txn = " << txn.str() + << " - meta = " << meta.str() << " txn length = " + << std::to_string(blobs.transaction.size()) + << " meta length = " + << std::to_string(blobs.metadata.size()); + throw e; + } +} + +std::pair< + std::shared_ptr, + std::shared_ptr> +deserializeTxPlusMeta( + Backend::TransactionAndMetadata const& blobs, + std::uint32_t seq) +{ + auto [tx, meta] = deserializeTxPlusMeta(blobs); + + std::shared_ptr m = + std::make_shared(tx->getTransactionID(), seq, *meta); + + return {tx, m}; +} + +boost::json::object +toJson(ripple::STBase const& obj) +{ + boost::json::value value = boost::json::parse( + obj.getJson(ripple::JsonOptions::none).toStyledString()); + + return value.as_object(); +} + +std::pair +toExpandedJson(Backend::TransactionAndMetadata const& blobs) +{ + auto [txn, meta] = deserializeTxPlusMeta(blobs, blobs.ledgerSequence); + auto txnJson = toJson(*txn); + auto metaJson = toJson(*meta); + insertDeliveredAmount(metaJson, txn, meta, blobs.date); + return {txnJson, metaJson}; +} + +bool +insertDeliveredAmount( + boost::json::object& metaJson, + std::shared_ptr const& txn, + std::shared_ptr const& meta, + uint32_t date) +{ + if (canHaveDeliveredAmount(txn, meta)) + { + if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq(), date)) + metaJson["delivered_amount"] = + toBoostJson(amt->getJson(ripple::JsonOptions::include_date)); + else + metaJson["delivered_amount"] = "unavailable"; + return true; + } + return false; +} + +boost::json::object +toJson(ripple::TxMeta const& meta) +{ + boost::json::value value = boost::json::parse( + meta.getJson(ripple::JsonOptions::none).toStyledString()); + + return value.as_object(); +} + +boost::json::value +toBoostJson(Json::Value const& value) +{ + boost::json::value boostValue = boost::json::parse(value.toStyledString()); + + return boostValue; +} + +boost::json::object +toJson(ripple::SLE const& sle) +{ + boost::json::value value = boost::json::parse( + sle.getJson(ripple::JsonOptions::none).toStyledString()); + if (sle.getType() == ripple::ltACCOUNT_ROOT) + { + if (sle.isFieldPresent(ripple::sfEmailHash)) + { + auto const& hash = sle.getFieldH128(ripple::sfEmailHash); + std::string md5 = strHex(hash); + boost::algorithm::to_lower(md5); + value.as_object()["urlgravatar"] = + str(boost::format("http://www.gravatar.com/avatar/%s") % md5); + } + } + return value.as_object(); +} + +boost::json::object +toJson(ripple::LedgerInfo const& lgrInfo) +{ + boost::json::object header; + header["ledger_sequence"] = lgrInfo.seq; + header["ledger_hash"] = ripple::strHex(lgrInfo.hash); + header["txns_hash"] = ripple::strHex(lgrInfo.txHash); + header["state_hash"] = ripple::strHex(lgrInfo.accountHash); + header["parent_hash"] = ripple::strHex(lgrInfo.parentHash); + header["total_coins"] = ripple::to_string(lgrInfo.drops); + header["close_flags"] = lgrInfo.closeFlags; + + // Always show fields that contribute to the ledger hash + header["parent_close_time"] = + lgrInfo.parentCloseTime.time_since_epoch().count(); + header["close_time"] = lgrInfo.closeTime.time_since_epoch().count(); + header["close_time_resolution"] = lgrInfo.closeTimeResolution.count(); + return header; +} + +std::optional +parseStringAsUInt(std::string const& value) +{ + std::optional index = {}; + try + { + index = boost::lexical_cast(value); + } + catch (boost::bad_lexical_cast const&) + { + } + + return index; +} + +std::variant +ledgerInfoFromRequest(Context const& ctx) +{ + auto hashValue = ctx.params.contains("ledger_hash") + ? ctx.params.at("ledger_hash") + : nullptr; + + if (!hashValue.is_null()) + { + if (!hashValue.is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, "ledgerHashNotString"}; + + ripple::uint256 ledgerHash; + if (!ledgerHash.parseHex(hashValue.as_string().c_str())) + return Status{ + RippledError::rpcINVALID_PARAMS, "ledgerHashMalformed"}; + + auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield); + + if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence) + return Status{RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; + + return *lgrInfo; + } + + auto indexValue = ctx.params.contains("ledger_index") + ? ctx.params.at("ledger_index") + : nullptr; + + std::optional ledgerSequence = {}; + if (!indexValue.is_null()) + { + if (indexValue.is_string()) + { + boost::json::string const& stringIndex = indexValue.as_string(); + if (stringIndex == "validated") + ledgerSequence = ctx.range.maxSequence; + else + ledgerSequence = parseStringAsUInt(stringIndex.c_str()); + } + else if (indexValue.is_int64()) + ledgerSequence = indexValue.as_int64(); + } + else + { + ledgerSequence = ctx.range.maxSequence; + } + + if (!ledgerSequence) + return Status{RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}; + + auto lgrInfo = + ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield); + + if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence) + return Status{RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; + + return *lgrInfo; +} + +// extract ledgerInfoFromRequest's parameter from context +std::variant +getLedgerInfoFromHashOrSeq( + BackendInterface const& backend, + boost::asio::yield_context& yield, + std::optional ledgerHash, + std::optional ledgerIndex, + uint32_t maxSeq) +{ + std::optional lgrInfo; + auto const err = + RPC::Status{RPC::RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; + if (ledgerHash) + { + // invoke uint256's constructor to parse the hex string , instead of + // copying buffer + ripple::uint256 ledgerHash256{std::string_view(*ledgerHash)}; + lgrInfo = backend.fetchLedgerByHash(ledgerHash256, yield); + if (!lgrInfo || lgrInfo->seq > maxSeq) + return err; + + return *lgrInfo; + } + auto const ledgerSequence = ledgerIndex.value_or(maxSeq); + // return without check db + if (ledgerSequence > maxSeq) + return err; + + lgrInfo = backend.fetchLedgerBySequence(ledgerSequence, yield); + if (!lgrInfo) + return err; + + return *lgrInfo; +} + +std::vector +ledgerInfoToBlob(ripple::LedgerInfo const& info, bool includeHash) +{ + ripple::Serializer s; + s.add32(info.seq); + s.add64(info.drops.drops()); + s.addBitString(info.parentHash); + s.addBitString(info.txHash); + s.addBitString(info.accountHash); + s.add32(info.parentCloseTime.time_since_epoch().count()); + s.add32(info.closeTime.time_since_epoch().count()); + s.add8(info.closeTimeResolution.count()); + s.add8(info.closeFlags); + if (includeHash) + s.addBitString(info.hash); + return s.peekData(); +} + +std::uint64_t +getStartHint(ripple::SLE const& sle, ripple::AccountID const& accountID) +{ + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID) + return sle.getFieldU64(ripple::sfLowNode); + else if ( + sle.getFieldAmount(ripple::sfHighLimit).getIssuer() == accountID) + return sle.getFieldU64(ripple::sfHighNode); + } + + if (!sle.isFieldPresent(ripple::sfOwnerNode)) + return 0; + + return sle.getFieldU64(ripple::sfOwnerNode); +} + +std::variant +traverseOwnedNodes( + BackendInterface const& backend, + ripple::AccountID const& accountID, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode) +{ + if (!backend.fetchLedgerObject( + ripple::keylet::account(accountID).key, sequence, yield)) + return Status{RippledError::rpcACT_NOT_FOUND}; + + auto const maybeCursor = parseAccountCursor(jsonCursor); + if (!maybeCursor) + return Status(ripple::rpcINVALID_PARAMS, "Malformed cursor"); + + auto [hexCursor, startHint] = *maybeCursor; + + return traverseOwnedNodes( + backend, + ripple::keylet::ownerDir(accountID), + hexCursor, + startHint, + sequence, + limit, + jsonCursor, + yield, + atOwnedNode); +} + +std::variant +ngTraverseOwnedNodes( + BackendInterface const& backend, + ripple::AccountID const& accountID, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode) +{ + auto const maybeCursor = parseAccountCursor(jsonCursor); + // the format is checked in RPC framework level + auto const [hexCursor, startHint] = *maybeCursor; + + return traverseOwnedNodes( + backend, + ripple::keylet::ownerDir(accountID), + hexCursor, + startHint, + sequence, + limit, + jsonCursor, + yield, + atOwnedNode); +} + +std::variant +traverseOwnedNodes( + BackendInterface const& backend, + ripple::Keylet const& owner, + ripple::uint256 const& hexMarker, + std::uint32_t const startHint, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode) +{ + auto cursor = AccountCursor({beast::zero, 0}); + + auto const rootIndex = owner; + auto currentIndex = rootIndex; + // track the current page we are accessing, will return it as the next hint + auto currentPage = startHint; + + std::vector keys; + // Only reserve 2048 nodes when fetching all owned ledger objects. If there + // are more, then keys will allocate more memory, which is suboptimal, but + // should only occur occasionally. + keys.reserve(std::min(std::uint32_t{2048}, limit)); + + auto start = std::chrono::system_clock::now(); + + // If startAfter is not zero try jumping to that page using the hint + if (hexMarker.isNonZero()) + { + auto const hintIndex = ripple::keylet::page(rootIndex, startHint); + auto hintDir = + backend.fetchLedgerObject(hintIndex.key, sequence, yield); + + if (!hintDir) + return Status(ripple::rpcINVALID_PARAMS, "Invalid marker"); + + ripple::SerialIter it{hintDir->data(), hintDir->size()}; + ripple::SLE sle{it, hintIndex.key}; + + if (auto const& indexes = sle.getFieldV256(ripple::sfIndexes); + std::find(std::begin(indexes), std::end(indexes), hexMarker) == + std::end(indexes)) + { + // result in empty dataset + return AccountCursor({beast::zero, 0}); + } + + currentIndex = hintIndex; + bool found = false; + for (;;) + { + auto const ownerDir = + backend.fetchLedgerObject(currentIndex.key, sequence, yield); + + if (!ownerDir) + return Status( + ripple::rpcINVALID_PARAMS, "Owner directory not found"); + + ripple::SerialIter it{ownerDir->data(), ownerDir->size()}; + ripple::SLE sle{it, currentIndex.key}; + + for (auto const& key : sle.getFieldV256(ripple::sfIndexes)) + { + if (!found) + { + if (key == hexMarker) + found = true; + } + else + { + keys.push_back(key); + + if (--limit == 0) + { + break; + } + } + } + + if (limit == 0) + { + cursor = AccountCursor({keys.back(), currentPage}); + break; + } + // the next page + auto const uNodeNext = sle.getFieldU64(ripple::sfIndexNext); + if (uNodeNext == 0) + break; + + currentIndex = ripple::keylet::page(rootIndex, uNodeNext); + currentPage = uNodeNext; + } + } + else + { + for (;;) + { + auto const ownerDir = + backend.fetchLedgerObject(currentIndex.key, sequence, yield); + + if (!ownerDir) + break; + + ripple::SerialIter it{ownerDir->data(), ownerDir->size()}; + ripple::SLE sle{it, currentIndex.key}; + + for (auto const& key : sle.getFieldV256(ripple::sfIndexes)) + { + keys.push_back(key); + + if (--limit == 0) + break; + } + + if (limit == 0) + { + cursor = AccountCursor({keys.back(), currentPage}); + break; + } + + auto const uNodeNext = sle.getFieldU64(ripple::sfIndexNext); + if (uNodeNext == 0) + break; + + currentIndex = ripple::keylet::page(rootIndex, uNodeNext); + currentPage = uNodeNext; + } + } + auto end = std::chrono::system_clock::now(); + + gLog.debug() << "Time loading owned directories: " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds"; + + auto [objects, timeDiff] = util::timed( + [&]() { return backend.fetchLedgerObjects(keys, sequence, yield); }); + + gLog.debug() << "Time loading owned entries: " << timeDiff + << " milliseconds"; + + for (auto i = 0; i < objects.size(); ++i) + { + ripple::SerialIter it{objects[i].data(), objects[i].size()}; + atOwnedNode(ripple::SLE{it, keys[i]}); + } + + if (limit == 0) + return cursor; + + return AccountCursor({beast::zero, 0}); +} + +std::shared_ptr +read( + ripple::Keylet const& keylet, + ripple::LedgerInfo const& lgrInfo, + Context const& context) +{ + if (auto const blob = context.backend->fetchLedgerObject( + keylet.key, lgrInfo.seq, context.yield); + blob) + { + return std::make_shared( + ripple::SerialIter{blob->data(), blob->size()}, keylet.key); + } + + return nullptr; +} + +std::optional +parseRippleLibSeed(boost::json::value const& value) +{ + // ripple-lib encodes seed used to generate an Ed25519 wallet in a + // non-standard way. While rippled never encode seeds that way, we + // try to detect such keys to avoid user confusion. + if (!value.is_string()) + return {}; + + auto const result = ripple::decodeBase58Token( + value.as_string().c_str(), ripple::TokenType::None); + + if (result.size() == 18 && + static_cast(result[0]) == std::uint8_t(0xE1) && + static_cast(result[1]) == std::uint8_t(0x4B)) + return ripple::Seed(ripple::makeSlice(result.substr(2))); + + return {}; +} + +std::variant> +keypairFromRequst(boost::json::object const& request) +{ + bool const has_key_type = request.contains("key_type"); + + // All of the secret types we allow, but only one at a time. + // The array should be constexpr, but that makes Visual Studio unhappy. + static std::string const secretTypes[]{ + "passphrase", "secret", "seed", "seed_hex"}; + + // Identify which secret type is in use. + std::string secretType = ""; + int count = 0; + for (auto t : secretTypes) + { + if (request.contains(t)) + { + ++count; + secretType = t; + } + } + + if (count == 0) + return Status{RippledError::rpcINVALID_PARAMS, "missing field secret"}; + + if (count > 1) + { + return Status{ + RippledError::rpcINVALID_PARAMS, + "Exactly one of the following must be specified: " + " passphrase, secret, seed, or seed_hex"}; + } + + std::optional keyType; + std::optional seed; + + if (has_key_type) + { + if (!request.at("key_type").is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "keyTypeNotString"}; + + std::string key_type = request.at("key_type").as_string().c_str(); + keyType = ripple::keyTypeFromString(key_type); + + if (!keyType) + return Status{ + RippledError::rpcINVALID_PARAMS, "invalidFieldKeyType"}; + + if (secretType == "secret") + return Status{ + RippledError::rpcINVALID_PARAMS, + "The secret field is not allowed if key_type is used."}; + } + + // ripple-lib encodes seed used to generate an Ed25519 wallet in a + // non-standard way. While we never encode seeds that way, we try + // to detect such keys to avoid user confusion. + if (secretType != "seed_hex") + { + seed = parseRippleLibSeed(request.at(secretType)); + + if (seed) + { + // If the user passed in an Ed25519 seed but *explicitly* + // requested another key type, return an error. + if (keyType.value_or(ripple::KeyType::ed25519) != + ripple::KeyType::ed25519) + return Status{ + RippledError::rpcINVALID_PARAMS, + "Specified seed is for an Ed25519 wallet."}; + + keyType = ripple::KeyType::ed25519; + } + } + + if (!keyType) + keyType = ripple::KeyType::secp256k1; + + if (!seed) + { + if (has_key_type) + { + if (!request.at(secretType).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "secret value must be string"}; + + std::string key = request.at(secretType).as_string().c_str(); + + if (secretType == "seed") + seed = ripple::parseBase58(key); + else if (secretType == "passphrase") + seed = ripple::parseGenericSeed(key); + else if (secretType == "seed_hex") + { + ripple::uint128 s; + if (s.parseHex(key)) + seed.emplace(ripple::Slice(s.data(), s.size())); + } + } + else + { + if (!request.at("secret").is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "field secret should be a string"}; + + std::string secret = request.at("secret").as_string().c_str(); + seed = ripple::parseGenericSeed(secret); + } + } + + if (!seed) + return Status{ + RippledError::rpcBAD_SEED, + "Bad Seed: invalid field message secretType"}; + + if (keyType != ripple::KeyType::secp256k1 && + keyType != ripple::KeyType::ed25519) + return Status{ + RippledError::rpcINVALID_PARAMS, + "keypairForSignature: invalid key type"}; + + return generateKeyPair(*keyType, *seed); +} + +std::vector +getAccountsFromTransaction(boost::json::object const& transaction) +{ + std::vector accounts = {}; + for (auto const& [key, value] : transaction) + { + if (value.is_object()) + { + auto inObject = getAccountsFromTransaction(value.as_object()); + accounts.insert(accounts.end(), inObject.begin(), inObject.end()); + } + else if (value.is_string()) + { + auto account = accountFromStringStrict(value.as_string().c_str()); + if (account) + { + accounts.push_back(*account); + } + } + } + + return accounts; +} + +bool +isGlobalFrozen( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield) +{ + if (ripple::isXRP(issuer)) + return false; + + auto key = ripple::keylet::account(issuer).key; + auto blob = backend.fetchLedgerObject(key, sequence, yield); + + if (!blob) + return false; + + ripple::SerialIter it{blob->data(), blob->size()}; + ripple::SLE sle{it, key}; + + return sle.isFlag(ripple::lsfGlobalFreeze); +} + +bool +isFrozen( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& account, + ripple::Currency const& currency, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield) +{ + if (ripple::isXRP(currency)) + return false; + + auto key = ripple::keylet::account(issuer).key; + auto blob = backend.fetchLedgerObject(key, sequence, yield); + + if (!blob) + return false; + + ripple::SerialIter it{blob->data(), blob->size()}; + ripple::SLE sle{it, key}; + + if (sle.isFlag(ripple::lsfGlobalFreeze)) + return true; + + if (issuer != account) + { + key = ripple::keylet::line(account, issuer, currency).key; + blob = backend.fetchLedgerObject(key, sequence, yield); + + if (!blob) + return false; + + ripple::SerialIter issuerIt{blob->data(), blob->size()}; + ripple::SLE issuerLine{issuerIt, key}; + + auto frozen = + (issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze; + + if (issuerLine.isFlag(frozen)) + return true; + } + + return false; +} + +ripple::XRPAmount +xrpLiquid( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& id, + boost::asio::yield_context& yield) +{ + auto key = ripple::keylet::account(id).key; + auto blob = backend.fetchLedgerObject(key, sequence, yield); + + if (!blob) + return beast::zero; + + ripple::SerialIter it{blob->data(), blob->size()}; + ripple::SLE sle{it, key}; + + std::uint32_t const ownerCount = sle.getFieldU32(ripple::sfOwnerCount); + + auto const reserve = + backend.fetchFees(sequence, yield)->accountReserve(ownerCount); + + auto const balance = sle.getFieldAmount(ripple::sfBalance); + + ripple::STAmount amount = balance - reserve; + if (balance < reserve) + amount.clear(); + + return amount.xrp(); +} + +ripple::STAmount +accountFunds( + BackendInterface const& backend, + std::uint32_t const sequence, + ripple::STAmount const& amount, + ripple::AccountID const& id, + boost::asio::yield_context& yield) +{ + if (!amount.native() && amount.getIssuer() == id) + { + return amount; + } + else + { + return accountHolds( + backend, + sequence, + id, + amount.getCurrency(), + amount.getIssuer(), + true, + yield); + } +} + +ripple::STAmount +accountHolds( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& account, + ripple::Currency const& currency, + ripple::AccountID const& issuer, + bool const zeroIfFrozen, + boost::asio::yield_context& yield) +{ + ripple::STAmount amount; + if (ripple::isXRP(currency)) + { + return {xrpLiquid(backend, sequence, account, yield)}; + } + auto key = ripple::keylet::line(account, issuer, currency).key; + + auto const blob = backend.fetchLedgerObject(key, sequence, yield); + + if (!blob) + { + amount.clear({currency, issuer}); + return amount; + } + + ripple::SerialIter it{blob->data(), blob->size()}; + ripple::SLE sle{it, key}; + + if (zeroIfFrozen && + isFrozen(backend, sequence, account, currency, issuer, yield)) + { + amount.clear(ripple::Issue(currency, issuer)); + } + else + { + amount = sle.getFieldAmount(ripple::sfBalance); + if (account > issuer) + { + // Put balance in account terms. + amount.negate(); + } + amount.setIssuer(issuer); + } + + return amount; +} + +ripple::Rate +transferRate( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield) +{ + auto key = ripple::keylet::account(issuer).key; + auto blob = backend.fetchLedgerObject(key, sequence, yield); + + if (blob) + { + ripple::SerialIter it{blob->data(), blob->size()}; + ripple::SLE sle{it, key}; + + if (sle.isFieldPresent(ripple::sfTransferRate)) + return ripple::Rate{sle.getFieldU32(ripple::sfTransferRate)}; + } + + return ripple::parityRate; +} + +boost::json::array +postProcessOrderBook( + std::vector const& offers, + ripple::Book const& book, + ripple::AccountID const& takerID, + Backend::BackendInterface const& backend, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield) +{ + boost::json::array jsonOffers; + + std::map umBalance; + + bool globalFreeze = + isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) || + isGlobalFrozen(backend, ledgerSequence, book.out.account, yield); + + auto rate = transferRate(backend, ledgerSequence, book.out.account, yield); + + for (auto const& obj : offers) + { + try + { + ripple::SerialIter it{obj.blob.data(), obj.blob.size()}; + ripple::SLE offer{it, obj.key}; + ripple::uint256 bookDir = + offer.getFieldH256(ripple::sfBookDirectory); + + auto const uOfferOwnerID = offer.getAccountID(ripple::sfAccount); + auto const& saTakerGets = offer.getFieldAmount(ripple::sfTakerGets); + auto const& saTakerPays = offer.getFieldAmount(ripple::sfTakerPays); + ripple::STAmount saOwnerFunds; + bool firstOwnerOffer = true; + + if (book.out.account == uOfferOwnerID) + { + // If an offer is selling issuer's own IOUs, it is fully + // funded. + saOwnerFunds = saTakerGets; + } + else if (globalFreeze) + { + // If either asset is globally frozen, consider all offers + // that aren't ours to be totally unfunded + saOwnerFunds.clear(book.out); + } + else + { + auto umBalanceEntry = umBalance.find(uOfferOwnerID); + if (umBalanceEntry != umBalance.end()) + { + // Found in running balance table. + + saOwnerFunds = umBalanceEntry->second; + firstOwnerOffer = false; + } + else + { + bool zeroIfFrozen = true; + saOwnerFunds = accountHolds( + backend, + ledgerSequence, + uOfferOwnerID, + book.out.currency, + book.out.account, + zeroIfFrozen, + yield); + + if (saOwnerFunds < beast::zero) + saOwnerFunds.clear(); + } + } + + boost::json::object offerJson = toJson(offer); + + ripple::STAmount saTakerGetsFunded; + ripple::STAmount saOwnerFundsLimit = saOwnerFunds; + ripple::Rate offerRate = ripple::parityRate; + ripple::STAmount dirRate = + ripple::amountFromQuality(getQuality(bookDir)); + + if (rate != ripple::parityRate + // Have a tranfer fee. + && takerID != book.out.account + // Not taking offers of own IOUs. + && book.out.account != uOfferOwnerID) + // Offer owner not issuing ownfunds + { + // Need to charge a transfer fee to offer owner. + offerRate = rate; + saOwnerFundsLimit = ripple::divide(saOwnerFunds, offerRate); + } + + if (saOwnerFundsLimit >= saTakerGets) + { + // Sufficient funds no shenanigans. + saTakerGetsFunded = saTakerGets; + } + else + { + saTakerGetsFunded = saOwnerFundsLimit; + offerJson["taker_gets_funded"] = toBoostJson( + saTakerGetsFunded.getJson(ripple::JsonOptions::none)); + offerJson["taker_pays_funded"] = toBoostJson( + std::min( + saTakerPays, + ripple::multiply( + saTakerGetsFunded, dirRate, saTakerPays.issue())) + .getJson(ripple::JsonOptions::none)); + } + + ripple::STAmount saOwnerPays = (ripple::parityRate == offerRate) + ? saTakerGetsFunded + : std::min( + saOwnerFunds, + ripple::multiply(saTakerGetsFunded, offerRate)); + + umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays; + + if (firstOwnerOffer) + offerJson["owner_funds"] = saOwnerFunds.getText(); + + offerJson["quality"] = dirRate.getText(); + + jsonOffers.push_back(offerJson); + } + catch (std::exception const& e) + { + gLog.error() << "caught exception: " << e.what(); + } + } + return jsonOffers; +} + +std::variant +parseBook(boost::json::object const& request) +{ + if (!request.contains("taker_pays")) + return Status{ + RippledError::rpcINVALID_PARAMS, "Missing field 'taker_pays'"}; + + if (!request.contains("taker_gets")) + return Status{ + RippledError::rpcINVALID_PARAMS, "Missing field 'taker_gets'"}; + + if (!request.at("taker_pays").is_object()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "Field 'taker_pays' is not an object"}; + + if (!request.at("taker_gets").is_object()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "Field 'taker_gets' is not an object"}; + + auto taker_pays = request.at("taker_pays").as_object(); + if (!taker_pays.contains("currency")) + return Status{RippledError::rpcSRC_CUR_MALFORMED}; + + if (!taker_pays.at("currency").is_string()) + return Status{RippledError::rpcSRC_CUR_MALFORMED}; + + auto taker_gets = request.at("taker_gets").as_object(); + if (!taker_gets.contains("currency")) + return Status{RippledError::rpcDST_AMT_MALFORMED}; + + if (!taker_gets.at("currency").is_string()) + return Status{ + RippledError::rpcDST_AMT_MALFORMED, + }; + + ripple::Currency pay_currency; + if (!ripple::to_currency( + pay_currency, taker_pays.at("currency").as_string().c_str())) + return Status{RippledError::rpcSRC_CUR_MALFORMED}; + + ripple::Currency get_currency; + if (!ripple::to_currency( + get_currency, taker_gets["currency"].as_string().c_str())) + return Status{RippledError::rpcDST_AMT_MALFORMED}; + + ripple::AccountID pay_issuer; + if (taker_pays.contains("issuer")) + { + if (!taker_pays.at("issuer").is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, "takerPaysIssuerNotString"}; + + if (!ripple::to_issuer( + pay_issuer, taker_pays.at("issuer").as_string().c_str())) + return Status{RippledError::rpcSRC_ISR_MALFORMED}; + + if (pay_issuer == ripple::noAccount()) + return Status{RippledError::rpcSRC_ISR_MALFORMED}; + } + else + { + pay_issuer = ripple::xrpAccount(); + } + + if (isXRP(pay_currency) && !isXRP(pay_issuer)) + return Status{ + RippledError::rpcSRC_ISR_MALFORMED, + "Unneeded field 'taker_pays.issuer' for XRP currency " + "specification."}; + + if (!isXRP(pay_currency) && isXRP(pay_issuer)) + return Status{ + RippledError::rpcSRC_ISR_MALFORMED, + "Invalid field 'taker_pays.issuer', expected non-XRP " + "issuer."}; + + if ((!isXRP(pay_currency)) && (!taker_pays.contains("issuer"))) + return Status{ + RippledError::rpcSRC_ISR_MALFORMED, "Missing non-XRP issuer."}; + + ripple::AccountID get_issuer; + + if (taker_gets.contains("issuer")) + { + if (!taker_gets["issuer"].is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "taker_gets.issuer should be string"}; + + if (!ripple::to_issuer( + get_issuer, taker_gets.at("issuer").as_string().c_str())) + return Status{ + RippledError::rpcDST_ISR_MALFORMED, + "Invalid field 'taker_gets.issuer', bad issuer."}; + + if (get_issuer == ripple::noAccount()) + return Status{ + RippledError::rpcDST_ISR_MALFORMED, + "Invalid field 'taker_gets.issuer', bad issuer account " + "one."}; + } + else + { + get_issuer = ripple::xrpAccount(); + } + + if (ripple::isXRP(get_currency) && !ripple::isXRP(get_issuer)) + return Status{ + RippledError::rpcDST_ISR_MALFORMED, + "Unneeded field 'taker_gets.issuer' for XRP currency " + "specification."}; + + if (!ripple::isXRP(get_currency) && ripple::isXRP(get_issuer)) + return Status{ + RippledError::rpcDST_ISR_MALFORMED, + "Invalid field 'taker_gets.issuer', expected non-XRP issuer."}; + + if (pay_currency == get_currency && pay_issuer == get_issuer) + return Status{RippledError::rpcBAD_MARKET, "badMarket"}; + + return ripple::Book{{pay_currency, pay_issuer}, {get_currency, get_issuer}}; +} + +std::variant +parseTaker(boost::json::value const& taker) +{ + std::optional takerID = {}; + if (!taker.is_string()) + return {Status{RippledError::rpcINVALID_PARAMS, "takerNotString"}}; + + takerID = accountFromStringStrict(taker.as_string().c_str()); + + if (!takerID) + return Status{RippledError::rpcBAD_ISSUER, "invalidTakerAccount"}; + return *takerID; +} +bool +specifiesCurrentOrClosedLedger(boost::json::object const& request) +{ + if (request.contains("ledger_index")) + { + auto indexValue = request.at("ledger_index"); + if (indexValue.is_string()) + { + std::string index = indexValue.as_string().c_str(); + return index == "current" || index == "closed"; + } + } + return false; +} + +std::variant +getNFTID(boost::json::object const& request) +{ + if (!request.contains(JS(nft_id))) + return Status{RippledError::rpcINVALID_PARAMS, "missingTokenID"}; + + if (!request.at(JS(nft_id)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "tokenIDNotString"}; + + ripple::uint256 tokenid; + if (!tokenid.parseHex(request.at(JS(nft_id)).as_string().c_str())) + return Status{RippledError::rpcINVALID_PARAMS, "malformedTokenID"}; + + return tokenid; +} + +// TODO - this function is long and shouldn't be responsible for as much as it +// is. Split it out into some helper functions. +std::variant +traverseTransactions( + Context const& context, + std::function const& backend, + std::uint32_t const, + bool const, + std::optional const&, + boost::asio::yield_context& yield)> transactionFetcher) +{ + auto request = context.params; + boost::json::object response = {}; + + bool const binary = getBool(request, JS(binary), false); + bool const forward = getBool(request, JS(forward), false); + + std::optional cursor; + + if (request.contains(JS(marker))) + { + if (!request.at(JS(marker)).is_object()) + return Status{RippledError::rpcINVALID_PARAMS, "invalidMarker"}; + auto const& obj = request.at(JS(marker)).as_object(); + + std::optional transactionIndex = {}; + if (obj.contains(JS(seq))) + { + if (!obj.at(JS(seq)).is_int64()) + return Status{ + RippledError::rpcINVALID_PARAMS, "transactionIndexNotInt"}; + + transactionIndex = + boost::json::value_to(obj.at(JS(seq))); + } + + std::optional ledgerIndex = {}; + if (obj.contains(JS(ledger))) + { + if (!obj.at(JS(ledger)).is_int64()) + return Status{ + RippledError::rpcINVALID_PARAMS, "ledgerIndexNotInt"}; + + ledgerIndex = + boost::json::value_to(obj.at(JS(ledger))); + } + + if (!transactionIndex || !ledgerIndex) + return Status{ + RippledError::rpcINVALID_PARAMS, "missingLedgerOrSeq"}; + + cursor = {*ledgerIndex, *transactionIndex}; + } + + auto minIndex = context.range.minSequence; + auto maxIndex = context.range.maxSequence; + std::optional min; + std::optional max; + + if (request.contains(JS(ledger_index_min))) + { + if (!request.at(JS(ledger_index_min)).is_int64()) + { + return Status{ + RippledError::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"}; + } + + min = request.at(JS(ledger_index_min)).as_int64(); + + if (*min != -1) + { + if (context.range.maxSequence < *min || + context.range.minSequence > *min) + return Status{ + RippledError::rpcLGR_IDX_MALFORMED, + "ledgerSeqMinOutOfRange"}; + else + minIndex = static_cast(*min); + } + + if (forward && !cursor) + cursor = {minIndex, 0}; + } + + if (request.contains(JS(ledger_index_max))) + { + if (!request.at(JS(ledger_index_max)).is_int64()) + { + return Status{ + RippledError::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"}; + } + + max = request.at(JS(ledger_index_max)).as_int64(); + + if (*max != -1) + { + if (context.range.maxSequence < *max || + context.range.minSequence > *max) + return Status{RippledError::rpcLGR_IDXS_INVALID}; + else + maxIndex = static_cast(*max); + } + + if (minIndex > maxIndex) + return Status{RippledError::rpcINVALID_PARAMS, "invalidIndex"}; + + if (!forward && !cursor) + cursor = {maxIndex, INT32_MAX}; + } + + if (max && min && *max < *min) + { + return Status{RippledError::rpcLGR_IDXS_INVALID, "lgrIdxsInvalid"}; + } + + if (request.contains(JS(ledger_index)) || request.contains(JS(ledger_hash))) + { + if (request.contains(JS(ledger_index_max)) || + request.contains(JS(ledger_index_min))) + return Status{ + RippledError::rpcINVALID_PARAMS, + "containsLedgerSpecifierAndRange"}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v); status) + return *status; + + maxIndex = minIndex = std::get(v).seq; + } + + if (!cursor) + { + if (forward) + cursor = {minIndex, 0}; + else + cursor = {maxIndex, INT32_MAX}; + } + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + if (request.contains(JS(limit))) + response[JS(limit)] = limit; + + boost::json::array txns; + auto [blobs, retCursor] = transactionFetcher( + context.backend, limit, forward, cursor, context.yield); + auto timeDiff = util::timed([&, &retCursor = retCursor, &blobs = blobs]() { + if (retCursor) + { + boost::json::object cursorJson; + cursorJson[JS(ledger)] = retCursor->ledgerSequence; + cursorJson[JS(seq)] = retCursor->transactionIndex; + response[JS(marker)] = cursorJson; + } + + for (auto const& txnPlusMeta : blobs) + { + if ((txnPlusMeta.ledgerSequence < minIndex && !forward) || + (txnPlusMeta.ledgerSequence > maxIndex && forward)) + { + response.erase(JS(marker)); + break; + } + else if (txnPlusMeta.ledgerSequence > maxIndex && !forward) + { + gLog.debug() + << "Skipping over transactions from incomplete ledger"; + continue; + } + + boost::json::object obj; + + if (!binary) + { + auto [txn, meta] = toExpandedJson(txnPlusMeta); + obj[JS(meta)] = meta; + obj[JS(tx)] = txn; + obj[JS(tx)].as_object()[JS(ledger_index)] = + txnPlusMeta.ledgerSequence; + obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date; + } + else + { + obj[JS(meta)] = ripple::strHex(txnPlusMeta.metadata); + obj[JS(tx_blob)] = ripple::strHex(txnPlusMeta.transaction); + obj[JS(ledger_index)] = txnPlusMeta.ledgerSequence; + obj[JS(date)] = txnPlusMeta.date; + } + obj[JS(validated)] = true; + txns.push_back(obj); + } + + response[JS(ledger_index_min)] = minIndex; + response[JS(ledger_index_max)] = maxIndex; + response[JS(transactions)] = txns; + }); + gLog.info() << "serialization took " << timeDiff + + << " milliseconds"; + + return response; +} + +} // namespace RPC diff --git a/src/rpc/RPCHelpers.h b/src/rpc/RPCHelpers.h new file mode 100644 index 00000000..f30ebe7d --- /dev/null +++ b/src/rpc/RPCHelpers.h @@ -0,0 +1,308 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +/* + * This file contains a variety of utility functions used when executing + * the handlers + */ + +#include +#include +#include +#include +#include +#include +#include + +// Useful macro for borrowing from ripple::jss +// static strings. (J)son (S)trings +#define JS(x) ripple::jss::x.c_str() + +// Access (SF)ield name (S)trings +#define SFS(x) ripple::x.jsonName.c_str() + +namespace RPC { +std::optional +accountFromStringStrict(std::string const& account); + +bool +isOwnedByAccount(ripple::SLE const& sle, ripple::AccountID const& accountID); + +std::uint64_t +getStartHint(ripple::SLE const& sle, ripple::AccountID const& accountID); + +std::optional +parseAccountCursor(std::optional jsonCursor); + +// TODO this function should probably be in a different file and namespace +std::pair< + std::shared_ptr, + std::shared_ptr> +deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs); + +// TODO this function should probably be in a different file and namespace +std::pair< + std::shared_ptr, + std::shared_ptr> +deserializeTxPlusMeta( + Backend::TransactionAndMetadata const& blobs, + std::uint32_t seq); + +std::pair +toExpandedJson(Backend::TransactionAndMetadata const& blobs); + +bool +insertDeliveredAmount( + boost::json::object& metaJson, + std::shared_ptr const& txn, + std::shared_ptr const& meta, + uint32_t date); + +boost::json::object +toJson(ripple::STBase const& obj); + +boost::json::object +toJson(ripple::SLE const& sle); + +boost::json::object +toJson(ripple::LedgerInfo const& info); + +boost::json::object +toJson(ripple::TxMeta const& meta); + +using RippledJson = Json::Value; +boost::json::value +toBoostJson(RippledJson const& value); + +boost::json::object +generatePubLedgerMessage( + ripple::LedgerInfo const& lgrInfo, + ripple::Fees const& fees, + std::string const& ledgerRange, + std::uint32_t txnCount); + +std::variant +ledgerInfoFromRequest(Context const& ctx); + +std::variant +getLedgerInfoFromHashOrSeq( + BackendInterface const& backend, + boost::asio::yield_context& yield, + std::optional ledgerHash, + std::optional ledgerIndex, + uint32_t maxSeq); + +std::variant +traverseOwnedNodes( + BackendInterface const& backend, + ripple::AccountID const& accountID, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode); + +std::variant +traverseOwnedNodes( + BackendInterface const& backend, + ripple::Keylet const& owner, + ripple::uint256 const& hexMarker, + std::uint32_t const startHint, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode); + +// Remove the account check from traverseOwnedNodes +// Account check has been done by framework,remove it from internal function +std::variant +ngTraverseOwnedNodes( + BackendInterface const& backend, + ripple::AccountID const& accountID, + std::uint32_t sequence, + std::uint32_t limit, + std::optional jsonCursor, + boost::asio::yield_context& yield, + std::function atOwnedNode); + +std::shared_ptr +read( + ripple::Keylet const& keylet, + ripple::LedgerInfo const& lgrInfo, + Context const& context); + +std::variant> +keypairFromRequst(boost::json::object const& request); + +std::vector +getAccountsFromTransaction(boost::json::object const& transaction); + +std::vector +ledgerInfoToBlob(ripple::LedgerInfo const& info, bool includeHash = false); + +bool +isGlobalFrozen( + BackendInterface const& backend, + std::uint32_t seq, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield); + +bool +isFrozen( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& account, + ripple::Currency const& currency, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield); + +ripple::STAmount +accountFunds( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::STAmount const& amount, + ripple::AccountID const& id, + boost::asio::yield_context& yield); + +ripple::STAmount +accountHolds( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& account, + ripple::Currency const& currency, + ripple::AccountID const& issuer, + bool zeroIfFrozen, + boost::asio::yield_context& yield); + +ripple::Rate +transferRate( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& issuer, + boost::asio::yield_context& yield); + +ripple::XRPAmount +xrpLiquid( + BackendInterface const& backend, + std::uint32_t sequence, + ripple::AccountID const& id, + boost::asio::yield_context& yield); + +boost::json::array +postProcessOrderBook( + std::vector const& offers, + ripple::Book const& book, + ripple::AccountID const& takerID, + Backend::BackendInterface const& backend, + std::uint32_t ledgerSequence, + boost::asio::yield_context& yield); + +std::variant +parseBook(boost::json::object const& request); + +std::variant +parseTaker(boost::json::value const& request); + +std::optional +getUInt(boost::json::object const& request, std::string const& field); + +std::uint32_t +getUInt( + boost::json::object const& request, + std::string const& field, + std::uint32_t dfault); + +std::uint32_t +getRequiredUInt(boost::json::object const& request, std::string const& field); + +std::optional +getBool(boost::json::object const& request, std::string const& field); + +bool +getBool( + boost::json::object const& request, + std::string const& field, + bool dfault); + +bool +getRequiredBool(boost::json::object const& request, std::string const& field); + +std::optional +getString(boost::json::object const& request, std::string const& field); + +std::string +getRequiredString(boost::json::object const& request, std::string const& field); + +std::string +getString( + boost::json::object const& request, + std::string const& field, + std::string dfault); + +Status +getHexMarker(boost::json::object const& request, ripple::uint256& marker); + +Status +getAccount(boost::json::object const& request, ripple::AccountID& accountId); + +Status +getAccount( + boost::json::object const& request, + ripple::AccountID& destAccount, + boost::string_view const& field); + +Status +getOptionalAccount( + boost::json::object const& request, + std::optional& account, + boost::string_view const& field); + +Status +getTaker(boost::json::object const& request, ripple::AccountID& takerID); + +Status +getChannelId(boost::json::object const& request, ripple::uint256& channelId); + +bool +specifiesCurrentOrClosedLedger(boost::json::object const& request); + +std::variant +getNFTID(boost::json::object const& request); + +// This function is the driver for both `account_tx` and `nft_tx` and should +// be used for any future transaction enumeration APIs. +std::variant +traverseTransactions( + Context const& context, + std::function const& backend, + std::uint32_t const, + bool const, + std::optional const&, + boost::asio::yield_context& yield)> transactionFetcher); + +[[nodiscard]] boost::json::object const +computeBookChanges( + ripple::LedgerInfo const& lgrInfo, + std::vector const& transactions); + +} // namespace RPC diff --git a/src/rpc/WorkQueue.cpp b/src/rpc/WorkQueue.cpp new file mode 100644 index 00000000..7565dc45 --- /dev/null +++ b/src/rpc/WorkQueue.cpp @@ -0,0 +1,30 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize) +{ + if (maxSize != 0) + maxSize_ = maxSize; + while (--numWorkers) + { + threads_.emplace_back([this] { ioc_.run(); }); + } +} diff --git a/src/rpc/WorkQueue.h b/src/rpc/WorkQueue.h new file mode 100644 index 00000000..f1caf283 --- /dev/null +++ b/src/rpc/WorkQueue.h @@ -0,0 +1,97 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +class WorkQueue +{ + // these are cumulative for the lifetime of the process + std::atomic_uint64_t queued_ = 0; + std::atomic_uint64_t durationUs_ = 0; + + std::atomic_uint64_t curSize_ = 0; + uint32_t maxSize_ = std::numeric_limits::max(); + clio::Logger log_{"RPC"}; + +public: + WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0); + + template + bool + postCoro(F&& f, bool isWhiteListed) + { + if (curSize_ >= maxSize_ && !isWhiteListed) + { + log_.warn() << "Queue is full. rejecting job. current size = " + << curSize_ << " max size = " << maxSize_; + return false; + } + ++curSize_; + auto start = std::chrono::system_clock::now(); + // Each time we enqueue a job, we want to post a symmetrical job that + // will dequeue and run the job at the front of the job queue. + boost::asio::spawn( + ioc_, + [this, f = std::move(f), start](boost::asio::yield_context yield) { + auto run = std::chrono::system_clock::now(); + auto wait = + std::chrono::duration_cast( + run - start) + .count(); + // increment queued_ here, in the same place we implement + // durationUs_ + ++queued_; + durationUs_ += wait; + log_.info() << "WorkQueue wait time = " << wait + << " queue size = " << curSize_; + f(yield); + --curSize_; + }); + return true; + } + + boost::json::object + report() const + { + boost::json::object obj; + obj["queued"] = queued_; + obj["queued_duration_us"] = durationUs_; + obj["current_queue_size"] = curSize_; + obj["max_queue_size"] = maxSize_; + return obj; + } + +private: + std::vector threads_ = {}; + + boost::asio::io_context ioc_ = {}; + std::optional work_{ioc_}; +}; diff --git a/src/rpc/common/AnyHandler.h b/src/rpc/common/AnyHandler.h new file mode 100644 index 00000000..bc891bef --- /dev/null +++ b/src/rpc/common/AnyHandler.h @@ -0,0 +1,141 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +namespace RPCng { + +/** + * @brief A type-erased Handler that can contain any (NextGen) RPC handler class + * + * This allows to store different handlers in one map/vector etc. + * Support for copying was added in order to allow storing in a + * map/unordered_map using the initializer_list constructor. + */ +class AnyHandler final +{ +public: + /** + * @brief Type-erases any handler class. + * + * @tparam HandlerType The real type of wrapped handler class + * @tparam ProcessingStrategy A strategy that implements how processing of + * JSON is to be done + * @param handler The handler to wrap. Required to fulfil the @ref Handler + * concept. + */ + template < + Handler HandlerType, + typename ProcessingStrategy = detail::DefaultProcessor> + /* implicit */ AnyHandler(HandlerType&& handler) + : pimpl_{std::make_unique>( + std::forward(handler))} + { + } + + ~AnyHandler() = default; + AnyHandler(AnyHandler const& other) : pimpl_{other.pimpl_->clone()} + { + } + AnyHandler& + operator=(AnyHandler const& rhs) + { + AnyHandler copy{rhs}; + pimpl_.swap(copy.pimpl_); + return *this; + } + AnyHandler(AnyHandler&&) = default; + AnyHandler& + operator=(AnyHandler&&) = default; + + /** + * @brief Process incoming JSON by the stored handler + * + * @param value The JSON to process + * @return JSON result or @ref RPC::Status on error + */ + [[nodiscard]] ReturnType + process(boost::json::value const& value) const + { + return pimpl_->process(value); + } + + /** + * @brief Process incoming JSON by the stored handler in a provided + * coroutine + * + * @param value The JSON to process + * @return JSON result or @ref RPC::Status on error + */ + [[nodiscard]] ReturnType + process( + boost::json::value const& value, + boost::asio::yield_context& ptrYield) const + { + return pimpl_->process(value, &ptrYield); + } + +private: + struct Concept + { + virtual ~Concept() = default; + + [[nodiscard]] virtual ReturnType + process( + boost::json::value const& value, + boost::asio::yield_context* ptrYield = nullptr) const = 0; + + [[nodiscard]] virtual std::unique_ptr + clone() const = 0; + }; + + template + struct Model : Concept + { + HandlerType handler; + ProcessorType processor; + + Model(HandlerType&& handler) : handler{std::move(handler)} + { + } + + [[nodiscard]] ReturnType + process( + boost::json::value const& value, + boost::asio::yield_context* ptrYield = nullptr) const override + { + return processor(handler, value, ptrYield); + } + + [[nodiscard]] std::unique_ptr + clone() const override + { + return std::make_unique(*this); + } + }; + +private: + std::unique_ptr pimpl_; +}; + +} // namespace RPCng diff --git a/src/rpc/common/Concepts.h b/src/rpc/common/Concepts.h new file mode 100644 index 00000000..a20fe5ea --- /dev/null +++ b/src/rpc/common/Concepts.h @@ -0,0 +1,78 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include +#include +#include + +#include + +namespace RPCng { + +struct RpcSpec; + +/** + * @brief A concept that specifies what a requirement used with @ref FieldSpec + * must provide + */ +// clang-format off +template +concept Requirement = requires(T a) { + { a.verify(boost::json::value{}, std::string{}) } -> std::same_as; +}; +// clang-format on + +/** + * @brief A concept that specifies what a Handler type must provide + * + * Note that value_from and value_to should be implemented using tag_invoke + * as per boost::json documentation for these functions. + */ +// clang-format off +template +concept CoroutineProcess = requires(T a, typename T::Input in, typename T::Output out, boost::asio::yield_context& y) { + { a.process(in, y) } -> std::same_as>; }; + +template +concept NonCoroutineProcess = requires(T a, typename T::Input in, typename T::Output out) { + { a.process(in) } -> std::same_as>; }; + +template +concept HandlerWithInput = requires(T a, typename T::Input in, typename T::Output out) { + { a.spec() } -> std::same_as; } + and (CoroutineProcess or NonCoroutineProcess) + and boost::json::has_value_to::value; + +template +concept HandlerWithoutInput = requires(T a, typename T::Output out) { + { a.process() } -> std::same_as>; }; + +template +concept Handler = +(HandlerWithInput +or +HandlerWithoutInput) +and boost::json::has_value_from::value; +// clang-format on + +} // namespace RPCng diff --git a/src/rpc/common/Specs.cpp b/src/rpc/common/Specs.cpp new file mode 100644 index 00000000..9fdc5d32 --- /dev/null +++ b/src/rpc/common/Specs.cpp @@ -0,0 +1,42 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace RPCng { + +[[nodiscard]] MaybeError +FieldSpec::validate(boost::json::value const& value) const +{ + return validator_(value); +} + +[[nodiscard]] MaybeError +RpcSpec::validate(boost::json::value const& value) const +{ + for (auto const& field : fields_) + if (auto ret = field.validate(value); not ret) + return Error{ret.error()}; + + return {}; +} + +} // namespace RPCng diff --git a/src/rpc/common/Specs.h b/src/rpc/common/Specs.h new file mode 100644 index 00000000..8bfc9424 --- /dev/null +++ b/src/rpc/common/Specs.h @@ -0,0 +1,95 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include +#include + +namespace RPCng { + +/** + * @brief Represents a Specification for one field of an RPC command + */ +struct FieldSpec final +{ + /** + * @brief Construct a field specification out of a set of requirements + * + * @tparam Requirements The types of requirements @ref Requirement + * @param key The key in a JSON object that the field validates + * @param requirements The requirements, each of them have to fulfil + * the @ref Requirement concept + */ + template + FieldSpec(std::string const& key, Requirements&&... requirements) + : validator_{detail::makeFieldValidator( + key, + std::forward(requirements)...)} + { + } + + /** + * @brief Validates the passed JSON value using the stored requirements + * + * @param value The JSON value to validate + * @return Nothing on success; @ref RPC::Status on error + */ + [[nodiscard]] MaybeError + validate(boost::json::value const& value) const; + +private: + std::function validator_; +}; + +/** + * @brief Represents a Specification of an entire RPC command + * + * Note: this should really be all constexpr and handlers would expose + * static constexpr RpcSpec spec instead. Maybe some day in the future. + */ +struct RpcSpec final +{ + /** + * @brief Construct a full RPC request specification + * + * @param fields The fields of the RPC specification @ref FieldSpec + */ + RpcSpec(std::initializer_list fields) : fields_{fields} + { + } + + /** + * @brief Validates the passed JSON value using the stored field specs + * + * @param value The JSON value to validate + * @return Nothing on success; @ref RPC::Status on error + */ + [[nodiscard]] MaybeError + validate(boost::json::value const& value) const; + +private: + std::vector fields_; +}; + +} // namespace RPCng diff --git a/src/rpc/common/Types.h b/src/rpc/common/Types.h new file mode 100644 index 00000000..26a7b73e --- /dev/null +++ b/src/rpc/common/Types.h @@ -0,0 +1,69 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +#include + +namespace RPCng { + +/** + * @brief Return type used for Validators that can return error but don't have + * specific value to return + */ +using MaybeError = util::Expected; + +/** + * @brief The type that represents just the error part of @ref MaybeError + */ +using Error = util::Unexpected; + +/** + * @brief Return type for each individual handler + */ +template +using HandlerReturnType = util::Expected; + +/** + * @brief The final return type out of RPC engine + */ +using ReturnType = util::Expected; + +struct RpcSpec; +struct FieldSpec; + +using RpcSpecConstRef = RpcSpec const&; + +struct VoidOutput +{ +}; + +inline void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + VoidOutput const&) +{ + jv = boost::json::object{}; +} + +} // namespace RPCng diff --git a/src/rpc/common/Validators.cpp b/src/rpc/common/Validators.cpp new file mode 100644 index 00000000..55a131ef --- /dev/null +++ b/src/rpc/common/Validators.cpp @@ -0,0 +1,203 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include + +#include +#include + +namespace RPCng::validation { + +[[nodiscard]] MaybeError +Section::verify(boost::json::value const& value, std::string_view key) const +{ + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + auto const& res = value.at(key.data()); + // if it is not a json object, let other validators fail + if (!res.is_object()) + return {}; + for (auto const& spec : specs) + { + if (auto const ret = spec.validate(res); not ret) + return Error{ret.error()}; + } + return {}; +} + +[[nodiscard]] MaybeError +Required::verify(boost::json::value const& value, std::string_view key) const +{ + if (not value.is_object() or not value.as_object().contains(key.data())) + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + "Required field '" + std::string{key} + "' missing"}}; + + return {}; +} + +[[nodiscard]] MaybeError +ValidateArrayAt::verify(boost::json::value const& value, std::string_view key) + const +{ + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + if (not value.as_object().at(key.data()).is_array()) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + auto const& arr = value.as_object().at(key.data()).as_array(); + if (idx_ >= arr.size()) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + auto const& res = arr.at(idx_); + for (auto const& spec : specs_) + if (auto const ret = spec.validate(res); not ret) + return Error{ret.error()}; + + return {}; +} + +[[nodiscard]] MaybeError +CustomValidator::verify(boost::json::value const& value, std::string_view key) + const +{ + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + return validator_(value.as_object().at(key.data()), key); +} + +[[nodiscard]] bool +checkIsU32Numeric(std::string_view sv) +{ + uint32_t unused; + auto [_, ec] = std::from_chars(sv.data(), sv.data() + sv.size(), unused); + return ec == std::errc(); +} + +CustomValidator Uint256HexStringValidator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_string()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotString"}}; + } + ripple::uint256 ledgerHash; + if (!ledgerHash.parseHex(value.as_string().c_str())) + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "Malformed"}}; + return MaybeError{}; + }}; + +CustomValidator LedgerIndexValidator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + auto err = Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}}; + if (!value.is_string() && !(value.is_uint64() || value.is_int64())) + { + return err; + } + if (value.is_string() && value.as_string() != "validated" && + !checkIsU32Numeric(value.as_string().c_str())) + { + return err; + } + return MaybeError{}; + }}; + +CustomValidator AccountValidator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_string()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotString"}}; + } + // TODO: we are using accountFromStringStrict from RPCHelpers, after we + // remove all old handler, this function can be moved to here + if (!RPC::accountFromStringStrict(value.as_string().c_str())) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "Malformed"}}; + } + return MaybeError{}; + }}; + +CustomValidator AccountBase58Validator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_string()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotString"}}; + } + auto const account = + ripple::parseBase58(value.as_string().c_str()); + if (!account || account->isZero()) + return Error{RPC::Status{RPC::ClioError::rpcMALFORMED_ADDRESS}}; + return MaybeError{}; + }}; + +CustomValidator MarkerValidator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_string()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotString"}}; + } + // TODO: we are using parseAccountCursor from RPCHelpers, after we + // remove all old handler, this function can be moved to here + if (!RPC::parseAccountCursor(value.as_string().c_str())) + { + // align with the current error message + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, "Malformed cursor"}}; + } + return MaybeError{}; + }}; + +CustomValidator CurrencyValidator = CustomValidator{ + [](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_string()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotString"}}; + } + ripple::Currency currency; + if (!ripple::to_currency(currency, value.as_string().c_str())) + return Error{RPC::Status{ + RPC::ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}}; + return MaybeError{}; + }}; + +} // namespace RPCng::validation diff --git a/src/rpc/common/Validators.h b/src/rpc/common/Validators.h new file mode 100644 index 00000000..41546c68 --- /dev/null +++ b/src/rpc/common/Validators.h @@ -0,0 +1,488 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +namespace RPCng::validation { + +/** + * @brief Check that the type is the same as what was expected + * + * @tparam Expected The expected type that value should be convertible to + * @param value The json value to check the type of + * @return true if convertible; false otherwise + */ +template +[[nodiscard]] bool static checkType(boost::json::value const& value) +{ + auto hasError = false; + if constexpr (std::is_same_v) + { + if (not value.is_bool()) + hasError = true; + } + else if constexpr (std::is_same_v) + { + if (not value.is_string()) + hasError = true; + } + else if constexpr ( + std::is_same_v or std::is_same_v) + { + if (not value.is_double()) + hasError = true; + } + else if constexpr (std::is_same_v) + { + if (not value.is_array()) + hasError = true; + } + else if constexpr (std::is_same_v) + { + if (not value.is_object()) + hasError = true; + } + else if constexpr ( + std::is_convertible_v or + std::is_convertible_v) + { + if (not value.is_int64() && not value.is_uint64()) + hasError = true; + } + + return not hasError; +} + +/** + * @brief A meta-validator that acts as a spec for a sub-object/section + */ +class Section final +{ + std::vector specs; + +public: + /** + * @brief Construct new section validator from a list of specs + * + * @param specs List of specs @ref FieldSpec + */ + explicit Section(std::initializer_list specs) : specs{specs} + { + } + + /** + * @brief Verify that the JSON value representing the section is valid + * according to the given specs + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the section from the outer object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const; +}; + +/** + * @brief A validator that simply requires a field to be present + */ +struct Required final +{ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const; +}; + +/** + * @brief Validates that the type of the value is one of the given types + */ +template +struct Type final +{ + /** + * @brief Verify that the JSON value is (one) of specified type(s) + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the tested value from the outer + * object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + auto const& res = value.as_object().at(key.data()); + auto const convertible = (checkType(res) || ...); + + if (not convertible) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + return {}; + } +}; + +/** + * @brief Validate that value is between specified min and max + */ +template +class Between final +{ + Type min_; + Type max_; + +public: + /** + * @brief Construct the validator storing min and max values + * + * @param min + * @param max + */ + explicit Between(Type min, Type max) : min_{min}, max_{max} + { + } + + /** + * @brief Verify that the JSON value is within a certain range + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the tested value from the outer + * object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + using boost::json::value_to; + auto const res = value_to(value.as_object().at(key.data())); + // todo: may want a way to make this code more generic (e.g. use a free + // function that can be overridden for this comparison) + if (res < min_ || res > max_) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + return {}; + } +}; + +/** + * @brief Validates that the value is equal to the one passed in + */ +template +class EqualTo final +{ + Type original_; + +public: + /** + * @brief Construct the validator with stored original value + * + * @param original The original value to store + */ + explicit EqualTo(Type original) : original_{original} + { + } + + /** + * @brief Verify that the JSON value is equal to the stored original + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the tested value from the outer + * object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + using boost::json::value_to; + auto const res = value_to(value.as_object().at(key.data())); + if (res != original_) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + return {}; + } +}; + +/** + * @brief Deduction guide to help disambiguate what it means to EqualTo a + * "string" without specifying the type. + */ +EqualTo(char const*)->EqualTo; + +/** + * @brief Validates that the value is one of the values passed in + */ +template +class OneOf final +{ + std::vector options_; + +public: + /** + * @brief Construct the validator with stored options + * + * @param options The list of allowed options + */ + explicit OneOf(std::initializer_list options) : options_{options} + { + } + + /** + * @brief Verify that the JSON value is one of the stored options + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the tested value from the outer + * object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + using boost::json::value_to; + auto const res = value_to(value.as_object().at(key.data())); + if (std::find(std::begin(options_), std::end(options_), res) == + std::end(options_)) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; + + return {}; + } +}; + +/** + * @brief Deduction guide to help disambiguate what it means to OneOf a + * few "strings" without specifying the type. + */ +OneOf(std::initializer_list)->OneOf; + +/** + * @brief A meta-validator that specifies a list of specs to run against the + * object at the given index in the array + */ +class ValidateArrayAt final +{ + std::size_t idx_; + std::vector specs_; + +public: + /** + * @brief Constructs a validator that validates the specified element of a + * JSON array + * + * @param idx The index inside the array to validate + * @param specs The specifications to validate against + */ + ValidateArrayAt(std::size_t idx, std::initializer_list specs) + : idx_{idx}, specs_{specs} + { + } + + /** + * @brief Verify that the JSON array element at given index is valid + * according the stored specs + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the array from the outer object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const; +}; + +/** + * @brief A meta-validator that specifies a list of requirements to run against + * when the type matches the template parameter + */ +template +class IfType final +{ +public: + /** + * @brief Constructs a validator that validates the specs if the type + * matches + * @param requirements The requirements to validate against + */ + template + IfType(Requirements&&... requirements) + { + validator_ = [... r = std::forward(requirements)]( + boost::json::value const& j, + std::string_view key) -> MaybeError { + // clang-format off + std::optional firstFailure = std::nullopt; + + // the check logic is the same as fieldspec + ([&j, &key, &firstFailure, req = &r]() { + if (firstFailure) + return; + + if (auto const res = req->verify(j, key); not res) + firstFailure = res.error(); + }(), ...); + // clang-format on + + if (firstFailure) + return Error{firstFailure.value()}; + + return {}; + }; + } + + /** + * @brief Verify that the element is valid + * according the stored requirements when type matches + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the element from the outer object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (not value.is_object() or not value.as_object().contains(key.data())) + return {}; // ignore. field does not exist, let 'required' fail + // instead + + if (not checkType(value.as_object().at(key.data()))) + return {}; // ignore if type does not match + + return validator_(value, key); + } + +private: + std::function + validator_; +}; + +/** + * @brief A meta-validator that wrapp other validator to send the customized + * error + */ +template +class WithCustomError final +{ + Requirement requirement; + RPC::Status error; + +public: + /** + * @brief Constructs a validator that calls the given validator "req" and + * return customized error "err" + */ + WithCustomError(Requirement req, RPC::Status err) + : requirement{std::move(req)}, error{err} + { + } + + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const + { + if (auto const res = requirement.verify(value, key); not res) + return Error{error}; + + return {}; + } +}; + +/** + * @brief A meta-validator that allows to specify a custom validation function + */ +class CustomValidator final +{ + std::function + validator_; + +public: + /** + * @brief Constructs a custom validator from any supported callable + * + * @tparam Fn The type of callable + * @param fn The callable/function object + */ + template + explicit CustomValidator(Fn&& fn) : validator_{std::forward(fn)} + { + } + + /** + * @brief Verify that the JSON value is valid according to the custom + * validation function stored + * + * @param value The JSON value representing the outer object + * @param key The key used to retrieve the tested value from the outer + * object + */ + [[nodiscard]] MaybeError + verify(boost::json::value const& value, std::string_view key) const; +}; + +/** + * @brief Helper function to check if sv is an uint32 number or not + */ +[[nodiscard]] bool +checkIsU32Numeric(std::string_view sv); + +/** + * @brief Provide a common used validator for ledger index + * LedgerIndex must be a string or int + * If the specified LedgerIndex is a string, it's value must be either + * "validated" or a valid integer value represented as a string. + */ +extern CustomValidator LedgerIndexValidator; + +/** + * @brief Provide a common used validator for account + * Account must be a string and the converted public key is valid + */ +extern CustomValidator AccountValidator; + +/** + * @brief Provide a common used validator for account + * Account must be a string and can convert to base58 + */ +extern CustomValidator AccountBase58Validator; + +/** + * @brief Provide a common used validator for marker + * Marker is composed of a comma separated index and start hint. The + * former will be read as hex, and the latter can cast to uint64. + */ +extern CustomValidator MarkerValidator; + +/** + * @brief Provide a common used validator for uint256 hex string + * It must be a string and hex + * Transaction index, ledger hash all use this validator + */ +extern CustomValidator Uint256HexStringValidator; + +/** + * @brief Provide a common used validator for currency + * including standard currency code and token code + */ +extern CustomValidator CurrencyValidator; + +} // namespace RPCng::validation diff --git a/src/rpc/common/impl/Factories.h b/src/rpc/common/impl/Factories.h new file mode 100644 index 00000000..c1fcd7e9 --- /dev/null +++ b/src/rpc/common/impl/Factories.h @@ -0,0 +1,59 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +#include + +#include + +namespace RPCng::detail { + +template +[[nodiscard]] auto +makeFieldValidator(std::string const& key, Requirements&&... requirements) +{ + return [key, ... r = std::forward(requirements)]( + boost::json::value const& j) -> MaybeError { + // clang-format off + std::optional firstFailure = std::nullopt; + + // This expands in order of Requirements and stops evaluating after + // first failure which is stored in `firstFailure` and can be checked + // later on to see whether the verification failed as a whole or not. + ([&j, &key, &firstFailure, req = &r]() { + if (firstFailure) + return; // already failed earlier - skip + + if (auto const res = req->verify(j, key); not res) + firstFailure = res.error(); + }(), ...); + // clang-format on + + if (firstFailure) + return Error{firstFailure.value()}; + + return {}; + }; +} + +} // namespace RPCng::detail diff --git a/src/rpc/common/impl/Processors.h b/src/rpc/common/impl/Processors.h new file mode 100644 index 00000000..a5a20e3a --- /dev/null +++ b/src/rpc/common/impl/Processors.h @@ -0,0 +1,85 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +namespace RPCng::detail { + +template +static constexpr bool unsupported_handler_v = false; + +template +struct DefaultProcessor final +{ + [[nodiscard]] ReturnType + operator()( + HandlerType const& handler, + boost::json::value const& value, + boost::asio::yield_context* ptrYield = nullptr) const + { + using boost::json::value_from; + using boost::json::value_to; + if constexpr (HandlerWithInput) + { + // first we run validation + auto const spec = handler.spec(); + if (auto const ret = spec.validate(value); not ret) + return Error{ret.error()}; // forward Status + + auto const inData = value_to(value); + if constexpr (NonCoroutineProcess) + { + auto const ret = handler.process(inData); + // real handler is given expected Input, not json + if (!ret) + return Error{ret.error()}; // forward Status + else + return value_from(ret.value()); + } + else + { + auto const ret = handler.process(inData, *ptrYield); + // real handler is given expected Input, not json + if (!ret) + return Error{ret.error()}; // forward Status + else + return value_from(ret.value()); + } + } + else if constexpr (HandlerWithoutInput) + { + // no input to pass, ignore the value + if (auto const ret = handler.process(); not ret) + return Error{ret.error()}; // forward Status + else + return value_from(ret.value()); + } + else + { + // when concept HandlerWithInput and HandlerWithoutInput not cover + // all Handler case + static_assert(unsupported_handler_v); + } + } +}; + +} // namespace RPCng::detail diff --git a/src/rpc/handlers/AccountChannels.cpp b/src/rpc/handlers/AccountChannels.cpp new file mode 100644 index 00000000..0042234d --- /dev/null +++ b/src/rpc/handlers/AccountChannels.cpp @@ -0,0 +1,145 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace RPC { + +void +addChannel(boost::json::array& jsonLines, ripple::SLE const& line) +{ + boost::json::object jDst; + jDst[JS(channel_id)] = ripple::to_string(line.key()); + jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount)); + jDst[JS(destination_account)] = + ripple::to_string(line.getAccountID(ripple::sfDestination)); + jDst[JS(amount)] = line[ripple::sfAmount].getText(); + jDst[JS(balance)] = line[ripple::sfBalance].getText(); + if (publicKeyType(line[ripple::sfPublicKey])) + { + ripple::PublicKey const pk(line[ripple::sfPublicKey]); + jDst[JS(public_key)] = toBase58(ripple::TokenType::AccountPublic, pk); + jDst[JS(public_key_hex)] = strHex(pk); + } + jDst[JS(settle_delay)] = line[ripple::sfSettleDelay]; + if (auto const& v = line[~ripple::sfExpiration]) + jDst[JS(expiration)] = *v; + if (auto const& v = line[~ripple::sfCancelAfter]) + jDst[JS(cancel_after)] = *v; + if (auto const& v = line[~ripple::sfSourceTag]) + jDst[JS(source_tag)] = *v; + if (auto const& v = line[~ripple::sfDestinationTag]) + jDst[JS(destination_tag)] = *v; + + jsonLines.push_back(jDst); +} + +Result +doAccountChannels(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + auto rawAcct = context.backend->fetchLedgerObject( + ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + + if (!rawAcct) + return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; + + ripple::AccountID destAccount; + if (auto const status = + getAccount(request, destAccount, JS(destination_account)); + status) + return status; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + std::optional marker = {}; + if (request.contains(JS(marker))) + { + if (!request.at(JS(marker)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + marker = request.at(JS(marker)).as_string().c_str(); + } + + response[JS(account)] = ripple::to_string(accountID); + response[JS(channels)] = boost::json::value(boost::json::array_kind); + response[JS(limit)] = limit; + boost::json::array& jsonChannels = response.at(JS(channels)).as_array(); + + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltPAYCHAN && + sle.getAccountID(ripple::sfAccount) == accountID && + (!destAccount || + destAccount == sle.getAccountID(ripple::sfDestination))) + { + addChannel(jsonChannels, sle); + } + + return true; + }; + + auto next = traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + limit, + marker, + context.yield, + addToResponse); + + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + if (auto status = std::get_if(&next)) + return *status; + + auto nextMarker = std::get(next); + + if (nextMarker.isNonZero()) + response[JS(marker)] = nextMarker.toString(); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountCurrencies.cpp b/src/rpc/handlers/AccountCurrencies.cpp new file mode 100644 index 00000000..8be9f801 --- /dev/null +++ b/src/rpc/handlers/AccountCurrencies.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace RPC { + +Result +doAccountCurrencies(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + auto rawAcct = context.backend->fetchLedgerObject( + ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + + if (!rawAcct) + return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; + + std::set send, receive; + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance); + + auto lowLimit = sle.getFieldAmount(ripple::sfLowLimit); + auto highLimit = sle.getFieldAmount(ripple::sfHighLimit); + bool viewLowest = (lowLimit.getIssuer() == accountID); + auto lineLimit = viewLowest ? lowLimit : highLimit; + auto lineLimitPeer = !viewLowest ? lowLimit : highLimit; + if (!viewLowest) + balance.negate(); + + if (balance < lineLimit) + receive.insert(ripple::to_string(balance.getCurrency())); + if ((-balance) < lineLimitPeer) + send.insert(ripple::to_string(balance.getCurrency())); + } + + return true; + }; + + traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + std::numeric_limits::max(), + {}, + context.yield, + addToResponse); + + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + response[JS(receive_currencies)] = + boost::json::value(boost::json::array_kind); + boost::json::array& jsonReceive = + response.at(JS(receive_currencies)).as_array(); + + for (auto const& currency : receive) + jsonReceive.push_back(currency.c_str()); + + response[JS(send_currencies)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonSend = response.at(JS(send_currencies)).as_array(); + + for (auto const& currency : send) + jsonSend.push_back(currency.c_str()); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountInfo.cpp b/src/rpc/handlers/AccountInfo.cpp new file mode 100644 index 00000000..2a7fbe4c --- /dev/null +++ b/src/rpc/handlers/AccountInfo.cpp @@ -0,0 +1,120 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include + +// { +// account: , +// strict: // optional (default false) +// // if true only allow public keys and addresses. +// ledger_hash : +// ledger_index : +// signer_lists : // optional (default false) +// // if true return SignerList(s). +// queue : // optional (default false) +// // if true return information about transactions +// // in the current TxQ, only if the requested +// // ledger is open. Otherwise if true, returns an +// // error. +// } + +namespace RPC { + +Result +doAccountInfo(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + std::string strIdent; + if (request.contains(JS(account))) + strIdent = request.at(JS(account)).as_string().c_str(); + else if (request.contains(JS(ident))) + strIdent = request.at(JS(ident)).as_string().c_str(); + else + return Status{RippledError::rpcACT_MALFORMED}; + + // We only need to fetch the ledger header because the ledger hash is + // supposed to be included in the response. The ledger sequence is specified + // in the request + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + // Get info on account. + auto accountID = accountFromStringStrict(strIdent); + if (!accountID) + return Status{RippledError::rpcACT_MALFORMED}; + + auto key = ripple::keylet::account(accountID.value()); + std::optional> dbResponse = + context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield); + + if (!dbResponse) + return Status{RippledError::rpcACT_NOT_FOUND}; + + ripple::STLedgerEntry sle{ + ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key}; + + if (!key.check(sle)) + return Status{RippledError::rpcDB_DESERIALIZATION}; + + response[JS(account_data)] = toJson(sle); + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + // Return SignerList(s) if that is requested. + if (request.contains(JS(signer_lists)) && + request.at(JS(signer_lists)).as_bool()) + { + // We put the SignerList in an array because of an anticipated + // future when we support multiple signer lists on one account. + boost::json::array signerList; + auto signersKey = ripple::keylet::signers(*accountID); + + // This code will need to be revisited if in the future we + // support multiple SignerLists on one account. + auto const signers = context.backend->fetchLedgerObject( + signersKey.key, lgrInfo.seq, context.yield); + if (signers) + { + ripple::STLedgerEntry sleSigners{ + ripple::SerialIter{signers->data(), signers->size()}, + signersKey.key}; + if (!signersKey.check(sleSigners)) + return Status{RippledError::rpcDB_DESERIALIZATION}; + + signerList.push_back(toJson(sleSigners)); + } + + response[JS(account_data)].as_object()[JS(signer_lists)] = + std::move(signerList); + } + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountLines.cpp b/src/rpc/handlers/AccountLines.cpp new file mode 100644 index 00000000..b9a7c580 --- /dev/null +++ b/src/rpc/handlers/AccountLines.cpp @@ -0,0 +1,208 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace RPC { + +void +addLine( + boost::json::array& jsonLines, + ripple::SLE const& line, + ripple::AccountID const& account, + std::optional const& peerAccount) +{ + auto flags = line.getFieldU32(ripple::sfFlags); + auto lowLimit = line.getFieldAmount(ripple::sfLowLimit); + auto highLimit = line.getFieldAmount(ripple::sfHighLimit); + auto lowID = lowLimit.getIssuer(); + auto highID = highLimit.getIssuer(); + auto lowQualityIn = line.getFieldU32(ripple::sfLowQualityIn); + auto lowQualityOut = line.getFieldU32(ripple::sfLowQualityOut); + auto highQualityIn = line.getFieldU32(ripple::sfHighQualityIn); + auto highQualityOut = line.getFieldU32(ripple::sfHighQualityOut); + auto balance = line.getFieldAmount(ripple::sfBalance); + + bool viewLowest = (lowID == account); + auto lineLimit = viewLowest ? lowLimit : highLimit; + auto lineLimitPeer = !viewLowest ? lowLimit : highLimit; + auto lineAccountIDPeer = !viewLowest ? lowID : highID; + auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn; + auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut; + + if (peerAccount && peerAccount != lineAccountIDPeer) + return; + + if (!viewLowest) + balance.negate(); + + bool lineAuth = + flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool lineAuthPeer = + flags & (!viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool lineNoRipple = + flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool lineDefaultRipple = flags & ripple::lsfDefaultRipple; + bool lineNoRipplePeer = flags & + (!viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool lineFreeze = + flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + bool lineFreezePeer = + flags & (!viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + + ripple::STAmount const& saBalance(balance); + ripple::STAmount const& saLimit(lineLimit); + ripple::STAmount const& saLimitPeer(lineLimitPeer); + + boost::json::object jPeer; + jPeer[JS(account)] = ripple::to_string(lineAccountIDPeer); + jPeer[JS(balance)] = saBalance.getText(); + jPeer[JS(currency)] = ripple::to_string(saBalance.issue().currency); + jPeer[JS(limit)] = saLimit.getText(); + jPeer[JS(limit_peer)] = saLimitPeer.getText(); + jPeer[JS(quality_in)] = lineQualityIn; + jPeer[JS(quality_out)] = lineQualityOut; + if (lineAuth) + jPeer[JS(authorized)] = true; + if (lineAuthPeer) + jPeer[JS(peer_authorized)] = true; + if (lineNoRipple || !lineDefaultRipple) + jPeer[JS(no_ripple)] = lineNoRipple; + if (lineNoRipple || !lineDefaultRipple) + jPeer[JS(no_ripple_peer)] = lineNoRipplePeer; + if (lineFreeze) + jPeer[JS(freeze)] = true; + if (lineFreezePeer) + jPeer[JS(freeze_peer)] = true; + + jsonLines.push_back(jPeer); +} + +Result +doAccountLines(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + auto rawAcct = context.backend->fetchLedgerObject( + ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + + if (!rawAcct) + return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; + + std::optional peerAccount; + if (auto const status = getOptionalAccount(request, peerAccount, JS(peer)); + status) + return status; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + std::optional marker = {}; + if (request.contains(JS(marker))) + { + if (not request.at(JS(marker)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + marker = request.at(JS(marker)).as_string().c_str(); + } + + auto ignoreDefault = false; + if (request.contains(JS(ignore_default))) + { + if (not request.at(JS(ignore_default)).is_bool()) + return Status{ + RippledError::rpcINVALID_PARAMS, "ignoreDefaultNotBool"}; + + ignoreDefault = request.at(JS(ignore_default)).as_bool(); + } + + response[JS(account)] = ripple::to_string(accountID); + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + response[JS(limit)] = limit; + response[JS(lines)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonLines = response.at(JS(lines)).as_array(); + + auto const addToResponse = [&](ripple::SLE&& sle) -> void { + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + auto ignore = false; + if (ignoreDefault) + { + if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == + accountID) + ignore = + !(sle.getFieldU32(ripple::sfFlags) & + ripple::lsfLowReserve); + else + ignore = + !(sle.getFieldU32(ripple::sfFlags) & + ripple::lsfHighReserve); + } + + if (!ignore) + addLine(jsonLines, sle, accountID, peerAccount); + } + }; + + auto next = traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + limit, + marker, + context.yield, + addToResponse); + + if (auto status = std::get_if(&next)) + return *status; + + auto nextMarker = std::get(next); + + if (nextMarker.isNonZero()) + response[JS(marker)] = nextMarker.toString(); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountObjects.cpp b/src/rpc/handlers/AccountObjects.cpp new file mode 100644 index 00000000..cebf5135 --- /dev/null +++ b/src/rpc/handlers/AccountObjects.cpp @@ -0,0 +1,231 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace RPC { + +std::unordered_map types{ + {"state", ripple::ltRIPPLE_STATE}, + {"ticket", ripple::ltTICKET}, + {"signer_list", ripple::ltSIGNER_LIST}, + {"payment_channel", ripple::ltPAYCHAN}, + {"offer", ripple::ltOFFER}, + {"escrow", ripple::ltESCROW}, + {"deposit_preauth", ripple::ltDEPOSIT_PREAUTH}, + {"check", ripple::ltCHECK}, + {"nft_page", ripple::ltNFTOKEN_PAGE}, + {"nft_offer", ripple::ltNFTOKEN_OFFER}}; + +Result +doAccountNFTs(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + if (!accountID) + return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"}; + + auto rawAcct = context.backend->fetchLedgerObject( + ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + + if (!rawAcct) + return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + ripple::uint256 marker; + if (auto const status = getHexMarker(request, marker); status) + return status; + + response[JS(account)] = ripple::toBase58(accountID); + response[JS(validated)] = true; + response[JS(limit)] = limit; + + std::uint32_t numPages = 0; + response[JS(account_nfts)] = boost::json::value(boost::json::array_kind); + auto& nfts = response.at(JS(account_nfts)).as_array(); + + // if a marker was passed, start at the page specified in marker. Else, + // start at the max page + auto const pageKey = + marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker; + + auto const blob = + context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield); + if (!blob) + return response; + std::optional page{ + ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}}; + + // Continue iteration from the current page + while (page) + { + auto arr = page->getFieldArray(ripple::sfNFTokens); + + for (auto const& o : arr) + { + ripple::uint256 const nftokenID = o[ripple::sfNFTokenID]; + + { + nfts.push_back( + toBoostJson(o.getJson(ripple::JsonOptions::none))); + auto& obj = nfts.back().as_object(); + + // Pull out the components of the nft ID. + obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID); + obj[SFS(sfIssuer)] = + to_string(ripple::nft::getIssuer(nftokenID)); + obj[SFS(sfNFTokenTaxon)] = + ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID)); + obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID); + + if (std::uint16_t xferFee = { + ripple::nft::getTransferFee(nftokenID)}) + obj[SFS(sfTransferFee)] = xferFee; + } + } + + ++numPages; + if (auto npm = (*page)[~ripple::sfPreviousPageMin]) + { + auto const nextKey = ripple::Keylet(ripple::ltNFTOKEN_PAGE, *npm); + if (numPages == limit) + { + response[JS(marker)] = to_string(nextKey.key); + response[JS(limit)] = numPages; + return response; + } + auto const nextBlob = context.backend->fetchLedgerObject( + nextKey.key, lgrInfo.seq, context.yield); + + page.emplace(ripple::SLE{ + ripple::SerialIter{nextBlob->data(), nextBlob->size()}, + nextKey.key}); + } + else + page.reset(); + } + + return response; +} + +Result +doAccountObjects(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + std::optional marker = {}; + if (request.contains("marker")) + { + if (!request.at("marker").is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + marker = request.at("marker").as_string().c_str(); + } + + std::optional objectType = {}; + if (request.contains(JS(type))) + { + if (!request.at(JS(type)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "typeNotString"}; + + std::string typeAsString = request.at(JS(type)).as_string().c_str(); + if (types.find(typeAsString) == types.end()) + return Status{RippledError::rpcINVALID_PARAMS, "typeInvalid"}; + + objectType = types[typeAsString]; + } + + response[JS(account)] = ripple::to_string(accountID); + response[JS(account_objects)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonObjects = + response.at(JS(account_objects)).as_array(); + + auto const addToResponse = [&](ripple::SLE&& sle) { + if (!objectType || objectType == sle.getType()) + { + jsonObjects.push_back(toJson(sle)); + } + }; + + auto next = traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + limit, + marker, + context.yield, + addToResponse); + + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + if (auto status = std::get_if(&next)) + return *status; + + auto const& nextMarker = std::get(next); + if (nextMarker.isNonZero()) + response[JS(marker)] = nextMarker.toString(); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountOffers.cpp b/src/rpc/handlers/AccountOffers.cpp new file mode 100644 index 00000000..2cfd985e --- /dev/null +++ b/src/rpc/handlers/AccountOffers.cpp @@ -0,0 +1,157 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace RPC { + +void +addOffer(boost::json::array& offersJson, ripple::SLE const& offer) +{ + auto quality = getQuality(offer.getFieldH256(ripple::sfBookDirectory)); + ripple::STAmount rate = ripple::amountFromQuality(quality); + + ripple::STAmount takerPays = offer.getFieldAmount(ripple::sfTakerPays); + ripple::STAmount takerGets = offer.getFieldAmount(ripple::sfTakerGets); + + boost::json::object obj; + + if (!takerPays.native()) + { + obj[JS(taker_pays)] = boost::json::value(boost::json::object_kind); + boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object(); + + takerPaysJson[JS(value)] = takerPays.getText(); + takerPaysJson[JS(currency)] = + ripple::to_string(takerPays.getCurrency()); + takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer()); + } + else + { + obj[JS(taker_pays)] = takerPays.getText(); + } + + if (!takerGets.native()) + { + obj[JS(taker_gets)] = boost::json::value(boost::json::object_kind); + boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object(); + + takerGetsJson[JS(value)] = takerGets.getText(); + takerGetsJson[JS(currency)] = + ripple::to_string(takerGets.getCurrency()); + takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer()); + } + else + { + obj[JS(taker_gets)] = takerGets.getText(); + } + + obj[JS(seq)] = offer.getFieldU32(ripple::sfSequence); + obj[JS(flags)] = offer.getFieldU32(ripple::sfFlags); + obj[JS(quality)] = rate.getText(); + if (offer.isFieldPresent(ripple::sfExpiration)) + obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration); + + offersJson.push_back(obj); +}; + +Result +doAccountOffers(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + auto rawAcct = context.backend->fetchLedgerObject( + ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + + if (!rawAcct) + return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + std::optional marker = {}; + if (request.contains(JS(marker))) + { + if (!request.at(JS(marker)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + marker = request.at(JS(marker)).as_string().c_str(); + } + + response[JS(account)] = ripple::to_string(accountID); + response[JS(limit)] = limit; + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + response[JS(offers)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonLines = response.at(JS(offers)).as_array(); + + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltOFFER) + { + addOffer(jsonLines, sle); + } + + return true; + }; + + auto next = traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + limit, + marker, + context.yield, + addToResponse); + + if (auto status = std::get_if(&next)) + return *status; + + auto nextMarker = std::get(next); + + if (nextMarker.isNonZero()) + response[JS(marker)] = nextMarker.toString(); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/AccountTx.cpp b/src/rpc/handlers/AccountTx.cpp new file mode 100644 index 00000000..59aac6b4 --- /dev/null +++ b/src/rpc/handlers/AccountTx.cpp @@ -0,0 +1,67 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { + +Result +doAccountTx(Context const& context) +{ + ripple::AccountID accountID; + if (auto const status = getAccount(context.params, accountID); status) + return status; + + constexpr std::string_view outerFuncName = __func__; + auto const maybeResponse = traverseTransactions( + context, + [&accountID, &outerFuncName]( + std::shared_ptr const& backend, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) { + auto [txnsAndCursor, timeDiff] = util::timed([&]() { + return backend->fetchAccountTransactions( + accountID, limit, forward, cursorIn, yield); + }); + gLog.info() << outerFuncName << " db fetch took " << timeDiff + << " milliseconds - num blobs = " + << txnsAndCursor.txns.size(); + return txnsAndCursor; + }); + + if (auto const status = std::get_if(&maybeResponse); status) + return *status; + auto response = std::get(maybeResponse); + + response[JS(account)] = ripple::to_string(accountID); + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/BookChanges.cpp b/src/rpc/handlers/BookChanges.cpp new file mode 100644 index 00000000..b3a2818b --- /dev/null +++ b/src/rpc/handlers/BookChanges.cpp @@ -0,0 +1,275 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include + +#include +#include + +namespace json = boost::json; +using namespace ripple; + +namespace RPC { + +/** + * @brief Represents an entry in the book_changes' changes array. + */ +struct BookChange +{ + STAmount sideAVolume; + STAmount sideBVolume; + STAmount highRate; + STAmount lowRate; + STAmount openRate; + STAmount closeRate; +}; + +/** + * @brief Encapsulates the book_changes computations and transformations. + */ +class BookChanges final +{ +public: + BookChanges() = delete; // only accessed via static handle function + + /** + * @brief Computes all book_changes for the given transactions. + * + * @param transactions The transactions to compute book changes for + * @return std::vector Book changes + */ + [[nodiscard]] static std::vector + compute(std::vector const& transactions) + { + return HandlerImpl{}(transactions); + } + +private: + class HandlerImpl final + { + std::map tally_ = {}; + std::optional offerCancel_ = {}; + + public: + [[nodiscard]] std::vector + operator()( + std::vector const& transactions) + { + for (auto const& tx : transactions) + handleBookChange(tx); + + // TODO: rewrite this with std::ranges when compilers catch up + std::vector changes; + std::transform( + std::make_move_iterator(std::begin(tally_)), + std::make_move_iterator(std::end(tally_)), + std::back_inserter(changes), + [](auto obj) { return obj.second; }); + return changes; + } + + private: + void + handleAffectedNode(STObject const& node) + { + auto const& metaType = node.getFName(); + auto const nodeType = node.getFieldU16(sfLedgerEntryType); + + // we only care about ltOFFER objects being modified or + // deleted + if (nodeType != ltOFFER || metaType == sfCreatedNode) + return; + + // if either FF or PF are missing we can't compute + // but generally these are cancelled rather than crossed + // so skipping them is consistent + if (!node.isFieldPresent(sfFinalFields) || + !node.isFieldPresent(sfPreviousFields)) + return; + + auto const& finalFields = + node.peekAtField(sfFinalFields).downcast(); + auto const& previousFields = + node.peekAtField(sfPreviousFields).downcast(); + + // defensive case that should never be hit + if (!finalFields.isFieldPresent(sfTakerGets) || + !finalFields.isFieldPresent(sfTakerPays) || + !previousFields.isFieldPresent(sfTakerGets) || + !previousFields.isFieldPresent(sfTakerPays)) + return; + + // filter out any offers deleted by explicit offer cancels + if (metaType == sfDeletedNode && offerCancel_ && + finalFields.getFieldU32(sfSequence) == *offerCancel_) + return; + + // compute the difference in gets and pays actually + // affected onto the offer + auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) - + previousFields.getFieldAmount(sfTakerGets); + auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) - + previousFields.getFieldAmount(sfTakerPays); + + transformAndStore(deltaGets, deltaPays); + } + + void + transformAndStore( + ripple::STAmount const& deltaGets, + ripple::STAmount const& deltaPays) + { + auto const g = to_string(deltaGets.issue()); + auto const p = to_string(deltaPays.issue()); + + auto const noswap = + isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p)); + + auto first = noswap ? deltaGets : deltaPays; + auto second = noswap ? deltaPays : deltaGets; + + // defensively programmed, should (probably) never happen + if (second == beast::zero) + return; + + auto const rate = divide(first, second, noIssue()); + + if (first < beast::zero) + first = -first; + + if (second < beast::zero) + second = -second; + + auto const key = noswap ? (g + '|' + p) : (p + '|' + g); + if (tally_.contains(key)) + { + auto& entry = tally_.at(key); + + entry.sideAVolume += first; + entry.sideBVolume += second; + + if (entry.highRate < rate) + entry.highRate = rate; + + if (entry.lowRate > rate) + entry.lowRate = rate; + + entry.closeRate = rate; + } + else + { + // TODO: use paranthesized initialization when clang catches up + tally_[key] = { + first, // sideAVolume + second, // sideBVolume + rate, // highRate + rate, // lowRate + rate, // openRate + rate, // closeRate + }; + } + } + + void + handleBookChange(Backend::TransactionAndMetadata const& blob) + { + auto const [tx, meta] = deserializeTxPlusMeta(blob); + if (!tx || !meta || !tx->isFieldPresent(sfTransactionType)) + return; + + offerCancel_ = shouldCancelOffer(tx); + for (auto const& node : meta->getFieldArray(sfAffectedNodes)) + handleAffectedNode(node); + } + + std::optional + shouldCancelOffer(std::shared_ptr const& tx) const + { + switch (tx->getFieldU16(sfTransactionType)) + { + // in future if any other ways emerge to cancel an offer + // this switch makes them easy to add + case ttOFFER_CANCEL: + case ttOFFER_CREATE: + if (tx->isFieldPresent(sfOfferSequence)) + return tx->getFieldU32(sfOfferSequence); + default: + return std::nullopt; + } + } + }; +}; + +void +tag_invoke(json::value_from_tag, json::value& jv, BookChange const& change) +{ + auto amountStr = [](STAmount const& amount) -> std::string { + return isXRP(amount) ? to_string(amount.xrp()) + : to_string(amount.iou()); + }; + + auto currencyStr = [](STAmount const& amount) -> std::string { + return isXRP(amount) ? "XRP_drops" : to_string(amount.issue()); + }; + + jv = { + {JS(currency_a), currencyStr(change.sideAVolume)}, + {JS(currency_b), currencyStr(change.sideBVolume)}, + {JS(volume_a), amountStr(change.sideAVolume)}, + {JS(volume_b), amountStr(change.sideBVolume)}, + {JS(high), to_string(change.highRate.iou())}, + {JS(low), to_string(change.lowRate.iou())}, + {JS(open), to_string(change.openRate.iou())}, + {JS(close), to_string(change.closeRate.iou())}, + }; +} + +json::object const +computeBookChanges( + ripple::LedgerInfo const& lgrInfo, + std::vector const& transactions) +{ + return { + {JS(type), "bookChanges"}, + {JS(ledger_index), lgrInfo.seq}, + {JS(ledger_hash), to_string(lgrInfo.hash)}, + {JS(ledger_time), lgrInfo.closeTime.time_since_epoch().count()}, + {JS(changes), json::value_from(BookChanges::compute(transactions))}, + }; +} + +Result +doBookChanges(Context const& context) +{ + auto const request = context.params; + auto const info = ledgerInfoFromRequest(context); + if (auto const status = std::get_if(&info)) + return *status; + + auto const lgrInfo = std::get(info); + auto const transactions = context.backend->fetchAllTransactionsInLedger( + lgrInfo.seq, context.yield); + return computeBookChanges(lgrInfo, transactions); +} + +} // namespace RPC diff --git a/src/rpc/handlers/BookOffers.cpp b/src/rpc/handlers/BookOffers.cpp new file mode 100644 index 00000000..e71508f2 --- /dev/null +++ b/src/rpc/handlers/BookOffers.cpp @@ -0,0 +1,121 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { + +Result +doBookOffers(Context const& context) +{ + auto request = context.params; + + boost::json::object response = {}; + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::Book book; + ripple::uint256 bookBase; + if (request.contains("book")) + { + if (!request.at("book").is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "bookNotString"}; + + if (!bookBase.parseHex(request.at("book").as_string().c_str())) + return Status{RippledError::rpcINVALID_PARAMS, "invalidBook"}; + } + else + { + auto parsed = parseBook(request); + if (auto status = std::get_if(&parsed)) + return *status; + else + { + book = std::get(parsed); + bookBase = getBookBase(book); + } + } + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + ripple::AccountID takerID = beast::zero; + if (auto const status = getTaker(request, takerID); status) + return status; + + ripple::uint256 marker = beast::zero; + if (auto const status = getHexMarker(request, marker); status) + return status; + + auto start = std::chrono::system_clock::now(); + auto [offers, retMarker] = context.backend->fetchBookOffers( + bookBase, lgrInfo.seq, limit, marker, context.yield); + auto end = std::chrono::system_clock::now(); + + gLog.warn() << "Time loading books: " + << std::chrono::duration_cast( + end - start) + .count() + << " milliseconds - request = " << request; + + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + response[JS(offers)] = postProcessOrderBook( + offers, book, takerID, *context.backend, lgrInfo.seq, context.yield); + + auto end2 = std::chrono::system_clock::now(); + + gLog.warn() << "Time transforming to json: " + << std::chrono::duration_cast( + end2 - end) + .count() + << " milliseconds - request = " << request; + + if (retMarker) + response["marker"] = ripple::strHex(*retMarker); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/ChannelAuthorize.cpp b/src/rpc/handlers/ChannelAuthorize.cpp new file mode 100644 index 00000000..477fa451 --- /dev/null +++ b/src/rpc/handlers/ChannelAuthorize.cpp @@ -0,0 +1,96 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace RPC { + +void +serializePayChanAuthorization( + ripple::Serializer& msg, + ripple::uint256 const& key, + ripple::XRPAmount const& amt) +{ + msg.add32(ripple::HashPrefix::paymentChannelClaim); + msg.addBitString(key); + msg.add64(amt.drops()); +} + +Result +doChannelAuthorize(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + if (!request.contains(JS(amount))) + return Status{RippledError::rpcINVALID_PARAMS, "missingAmount"}; + + if (!request.at(JS(amount)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "amountNotString"}; + + if (!request.contains(JS(key_type)) && !request.contains(JS(secret))) + return Status{ + RippledError::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"}; + + auto v = keypairFromRequst(request); + if (auto status = std::get_if(&v)) + return *status; + + auto const [pk, sk] = + std::get>(v); + + ripple::uint256 channelId; + if (auto const status = getChannelId(request, channelId); status) + return status; + + auto optDrops = + ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); + + if (!optDrops) + return Status{ + RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; + + std::uint64_t drops = *optDrops; + + ripple::Serializer msg; + ripple::serializePayChanAuthorization( + msg, channelId, ripple::XRPAmount(drops)); + + try + { + auto const buf = ripple::sign(pk, sk, msg.slice()); + response[JS(signature)] = ripple::strHex(buf); + } + catch (std::exception&) + { + return Status{RippledError::rpcINTERNAL}; + } + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/ChannelVerify.cpp b/src/rpc/handlers/ChannelVerify.cpp new file mode 100644 index 00000000..2db1ea5c --- /dev/null +++ b/src/rpc/handlers/ChannelVerify.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace RPC { + +Result +doChannelVerify(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + if (!request.contains(JS(amount))) + return Status{RippledError::rpcINVALID_PARAMS, "missingAmount"}; + + if (!request.at(JS(amount)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "amountNotString"}; + + if (!request.contains(JS(signature))) + return Status{RippledError::rpcINVALID_PARAMS, "missingSignature"}; + + if (!request.at(JS(signature)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "signatureNotString"}; + + if (!request.contains(JS(public_key))) + return Status{RippledError::rpcINVALID_PARAMS, "missingPublicKey"}; + + if (!request.at(JS(public_key)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "publicKeyNotString"}; + + std::optional pk; + { + std::string const strPk = + request.at(JS(public_key)).as_string().c_str(); + pk = ripple::parseBase58( + ripple::TokenType::AccountPublic, strPk); + + if (!pk) + { + auto pkHex = ripple::strUnHex(strPk); + if (!pkHex) + return Status{ + RippledError::rpcPUBLIC_MALFORMED, "malformedPublicKey"}; + + auto const pkType = + ripple::publicKeyType(ripple::makeSlice(*pkHex)); + if (!pkType) + return Status{ + RippledError::rpcPUBLIC_MALFORMED, "invalidKeyType"}; + + pk.emplace(ripple::makeSlice(*pkHex)); + } + } + + ripple::uint256 channelId; + if (auto const status = getChannelId(request, channelId); status) + return status; + + auto optDrops = + ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); + + if (!optDrops) + return Status{ + RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; + + std::uint64_t drops = *optDrops; + + auto sig = ripple::strUnHex(request.at(JS(signature)).as_string().c_str()); + + if (!sig || !sig->size()) + return Status{RippledError::rpcINVALID_PARAMS, "invalidSignature"}; + + ripple::Serializer msg; + ripple::serializePayChanAuthorization( + msg, channelId, ripple::XRPAmount(drops)); + + response[JS(signature_verified)] = + ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/GatewayBalances.cpp b/src/rpc/handlers/GatewayBalances.cpp new file mode 100644 index 00000000..7cf874f5 --- /dev/null +++ b/src/rpc/handlers/GatewayBalances.cpp @@ -0,0 +1,232 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPC { + +Result +doGatewayBalances(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + std::map sums; + std::map> hotBalances; + std::map> assets; + std::map> frozenBalances; + std::set hotWallets; + + if (request.contains(JS(hotwallet))) + { + auto getAccountID = + [](auto const& j) -> std::optional { + if (j.is_string()) + { + auto const pk = ripple::parseBase58( + ripple::TokenType::AccountPublic, j.as_string().c_str()); + if (pk) + { + return ripple::calcAccountID(*pk); + } + + return ripple::parseBase58( + j.as_string().c_str()); + } + return {}; + }; + + auto const& hw = request.at(JS(hotwallet)); + bool valid = true; + + // null is treated as a valid 0-sized array of hotwallet + if (hw.is_array()) + { + auto const& arr = hw.as_array(); + for (unsigned i = 0; i < arr.size(); ++i) + { + if (auto id = getAccountID(arr[i])) + hotWallets.insert(*id); + else + valid = false; + } + } + else if (hw.is_string()) + { + if (auto id = getAccountID(hw)) + hotWallets.insert(*id); + else + valid = false; + } + else + { + valid = false; + } + + if (!valid) + { + response[JS(error)] = "invalidHotWallet"; + return response; + } + } + + // Traverse the cold wallet's trust lines + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance); + + auto lowLimit = sle.getFieldAmount(ripple::sfLowLimit); + auto highLimit = sle.getFieldAmount(ripple::sfHighLimit); + auto lowID = lowLimit.getIssuer(); + auto highID = highLimit.getIssuer(); + bool viewLowest = (lowLimit.getIssuer() == accountID); + auto lineLimit = viewLowest ? lowLimit : highLimit; + auto lineLimitPeer = !viewLowest ? lowLimit : highLimit; + auto flags = sle.getFieldU32(ripple::sfFlags); + auto freeze = flags & + (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + if (!viewLowest) + balance.negate(); + + int balSign = balance.signum(); + if (balSign == 0) + return true; + + auto const& peer = !viewLowest ? lowID : highID; + + // Here, a negative balance means the cold wallet owes (normal) + // A positive balance means the cold wallet has an asset + // (unusual) + + if (hotWallets.count(peer) > 0) + { + // This is a specified hot wallet + hotBalances[peer].push_back(-balance); + } + else if (balSign > 0) + { + // This is a gateway asset + assets[peer].push_back(balance); + } + else if (freeze) + { + // An obligation the gateway has frozen + frozenBalances[peer].push_back(-balance); + } + else + { + // normal negative balance, obligation to customer + auto& bal = sums[balance.getCurrency()]; + if (bal == beast::zero) + { + // This is needed to set the currency code correctly + bal = -balance; + } + else + { // when overflow happens, insert a flag to indicate + // https://github.com/XRPLF/rippled/pull/4355 + try + { + bal -= balance; + } + catch (std::runtime_error& e) + { + response["overflow"] = true; + } + } + } + } + return true; + }; + + auto result = traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + std::numeric_limits::max(), + {}, + context.yield, + addToResponse); + if (auto status = std::get_if(&result)) + return *status; + + if (!sums.empty()) + { + boost::json::object obj; + for (auto const& [k, v] : sums) + { + obj[ripple::to_string(k)] = v.getText(); + } + response[JS(obligations)] = std::move(obj); + } + + auto toJson = + [](std::map> const& + balances) { + boost::json::object obj; + if (!balances.empty()) + { + for (auto const& [accId, accBalances] : balances) + { + boost::json::array arr; + for (auto const& balance : accBalances) + { + boost::json::object entry; + entry[JS(currency)] = + ripple::to_string(balance.issue().currency); + entry[JS(value)] = balance.getText(); + arr.push_back(std::move(entry)); + } + obj[ripple::to_string(accId)] = std::move(arr); + } + } + return obj; + }; + + auto containsHotWallet = [&](auto const& hw) { + return hotBalances.contains(hw); + }; + if (not std::all_of( + hotWallets.begin(), hotWallets.end(), containsHotWallet)) + return Status{RippledError::rpcINVALID_PARAMS, "invalidHotWallet"}; + + if (auto balances = toJson(hotBalances); balances.size()) + response[JS(balances)] = balances; + if (auto balances = toJson(frozenBalances); balances.size()) + response[JS(frozen_balances)] = balances; + if (auto balances = toJson(assets); assets.size()) + response[JS(assets)] = toJson(assets); + response[JS(account)] = request.at(JS(account)); + response[JS(ledger_index)] = lgrInfo.seq; + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + return response; +} +} // namespace RPC diff --git a/src/rpc/handlers/Ledger.cpp b/src/rpc/handlers/Ledger.cpp new file mode 100644 index 00000000..a3334e5b --- /dev/null +++ b/src/rpc/handlers/Ledger.cpp @@ -0,0 +1,183 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPC { + +Result +doLedger(Context const& context) +{ + auto params = context.params; + boost::json::object response = {}; + + bool binary = false; + if (params.contains(JS(binary))) + { + if (!params.at(JS(binary)).is_bool()) + return Status{RippledError::rpcINVALID_PARAMS, "binaryFlagNotBool"}; + + binary = params.at(JS(binary)).as_bool(); + } + + bool transactions = false; + if (params.contains(JS(transactions))) + { + if (!params.at(JS(transactions)).is_bool()) + return Status{ + RippledError::rpcINVALID_PARAMS, "transactionsFlagNotBool"}; + + transactions = params.at(JS(transactions)).as_bool(); + } + + bool expand = false; + if (params.contains(JS(expand))) + { + if (!params.at(JS(expand)).is_bool()) + return Status{RippledError::rpcINVALID_PARAMS, "expandFlagNotBool"}; + + expand = params.at(JS(expand)).as_bool(); + } + + bool diff = false; + if (params.contains("diff")) + { + if (!params.at("diff").is_bool()) + return Status{RippledError::rpcINVALID_PARAMS, "diffFlagNotBool"}; + + diff = params.at("diff").as_bool(); + } + + if (params.contains(JS(full))) + return Status{RippledError::rpcNOT_SUPPORTED}; + + if (params.contains(JS(accounts))) + return Status{RippledError::rpcNOT_SUPPORTED}; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + boost::json::object header; + if (binary) + { + header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo)); + } + else + { + header[JS(accepted)] = true; + header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash); + header[JS(close_flags)] = lgrInfo.closeFlags; + header[JS(close_time)] = lgrInfo.closeTime.time_since_epoch().count(); + header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime); + header[JS(close_time_resolution)] = lgrInfo.closeTimeResolution.count(); + header[JS(closed)] = true; + header[JS(hash)] = ripple::strHex(lgrInfo.hash); + header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + header[JS(ledger_index)] = std::to_string(lgrInfo.seq); + header[JS(parent_close_time)] = + lgrInfo.parentCloseTime.time_since_epoch().count(); + header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash); + header[JS(seqNum)] = std::to_string(lgrInfo.seq); + header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops); + header[JS(total_coins)] = ripple::to_string(lgrInfo.drops); + header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash); + } + header[JS(closed)] = true; + + if (transactions) + { + header[JS(transactions)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonTxs = header.at(JS(transactions)).as_array(); + if (expand) + { + auto txns = context.backend->fetchAllTransactionsInLedger( + lgrInfo.seq, context.yield); + + std::transform( + std::move_iterator(txns.begin()), + std::move_iterator(txns.end()), + std::back_inserter(jsonTxs), + [binary](auto obj) { + boost::json::object entry; + if (!binary) + { + auto [txn, meta] = toExpandedJson(obj); + entry = txn; + entry[JS(metaData)] = meta; + } + else + { + entry[JS(tx_blob)] = ripple::strHex(obj.transaction); + entry[JS(meta)] = ripple::strHex(obj.metadata); + } + // entry[JS(ledger_index)] = obj.ledgerSequence; + return entry; + }); + } + else + { + auto hashes = context.backend->fetchAllTransactionHashesInLedger( + lgrInfo.seq, context.yield); + std::transform( + std::move_iterator(hashes.begin()), + std::move_iterator(hashes.end()), + std::back_inserter(jsonTxs), + [](auto hash) { + boost::json::object entry; + return boost::json::string(ripple::strHex(hash)); + }); + } + } + + if (diff) + { + header["diff"] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonDiff = header.at("diff").as_array(); + auto diff = + context.backend->fetchLedgerDiff(lgrInfo.seq, context.yield); + for (auto const& obj : diff) + { + boost::json::object entry; + entry["object_id"] = ripple::strHex(obj.key); + if (binary) + entry["object"] = ripple::strHex(obj.blob); + else if (obj.blob.size()) + { + ripple::STLedgerEntry sle{ + ripple::SerialIter{obj.blob.data(), obj.blob.size()}, + obj.key}; + entry["object"] = toJson(sle); + } + else + entry["object"] = ""; + jsonDiff.push_back(std::move(entry)); + } + } + + response[JS(ledger)] = header; + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/LedgerData.cpp b/src/rpc/handlers/LedgerData.cpp new file mode 100644 index 00000000..fdc86f9d --- /dev/null +++ b/src/rpc/handlers/LedgerData.cpp @@ -0,0 +1,230 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +#include + +// Get state nodes from a ledger +// Inputs: +// limit: integer, maximum number of entries +// marker: opaque, resume point +// binary: boolean, format +// type: string // optional, defaults to all ledger node types +// Outputs: +// ledger_hash: chosen ledger's hash +// ledger_index: chosen ledger's index +// state: array of state nodes +// marker: resume point, if any +// +// + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { + +using boost::json::value_to; + +Result +doLedgerData(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + bool const binary = getBool(request, "binary", false); + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + if (!binary) + limit = std::clamp(limit, {1}, {256}); + + bool outOfOrder = false; + if (request.contains("out_of_order")) + { + if (!request.at("out_of_order").is_bool()) + return Status{RippledError::rpcINVALID_PARAMS, "binaryFlagNotBool"}; + outOfOrder = request.at("out_of_order").as_bool(); + } + + std::optional marker; + std::optional diffMarker; + if (request.contains(JS(marker))) + { + if (!request.at(JS(marker)).is_string()) + { + if (outOfOrder) + { + if (!request.at(JS(marker)).is_int64()) + return Status{ + RippledError::rpcINVALID_PARAMS, + "markerNotStringOrInt"}; + diffMarker = value_to(request.at(JS(marker))); + } + else + return Status{ + RippledError::rpcINVALID_PARAMS, "markerNotString"}; + } + else + { + gLog.debug() << "Parsing marker"; + + marker = ripple::uint256{}; + if (!marker->parseHex(request.at(JS(marker)).as_string().c_str())) + return Status{ + RippledError::rpcINVALID_PARAMS, "markerMalformed"}; + } + } + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + boost::json::object header; + // no marker means this is the first call, so we return header info + if (!request.contains(JS(marker))) + { + if (binary) + { + header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo)); + } + else + { + header[JS(accepted)] = true; + header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash); + header[JS(close_flags)] = lgrInfo.closeFlags; + header[JS(close_time)] = + lgrInfo.closeTime.time_since_epoch().count(); + header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime); + header[JS(close_time_resolution)] = + lgrInfo.closeTimeResolution.count(); + header[JS(hash)] = ripple::strHex(lgrInfo.hash); + header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + header[JS(ledger_index)] = std::to_string(lgrInfo.seq); + header[JS(parent_close_time)] = + lgrInfo.parentCloseTime.time_since_epoch().count(); + header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash); + header[JS(seqNum)] = std::to_string(lgrInfo.seq); + header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops); + header[JS(total_coins)] = ripple::to_string(lgrInfo.drops); + header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash); + } + + header[JS(closed)] = true; + response[JS(ledger)] = header; + } + else + { + if (!outOfOrder && + !context.backend->fetchLedgerObject( + *marker, lgrInfo.seq, context.yield)) + return Status{ + RippledError::rpcINVALID_PARAMS, "markerDoesNotExist"}; + } + + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + auto start = std::chrono::system_clock::now(); + std::vector results; + if (diffMarker) + { + assert(outOfOrder); + auto diff = + context.backend->fetchLedgerDiff(*diffMarker, context.yield); + std::vector keys; + for (auto&& [key, object] : diff) + { + if (!object.size()) + { + keys.push_back(std::move(key)); + } + } + auto objs = context.backend->fetchLedgerObjects( + keys, lgrInfo.seq, context.yield); + for (size_t i = 0; i < objs.size(); ++i) + { + auto&& obj = objs[i]; + if (obj.size()) + results.push_back({std::move(keys[i]), std::move(obj)}); + } + if (*diffMarker > lgrInfo.seq) + response["marker"] = *diffMarker - 1; + } + else + { + auto page = context.backend->fetchLedgerPage( + marker, lgrInfo.seq, limit, outOfOrder, context.yield); + results = std::move(page.objects); + if (page.cursor) + response["marker"] = ripple::strHex(*(page.cursor)); + else if (outOfOrder) + response["marker"] = + context.backend->fetchLedgerRange()->maxSequence; + } + auto end = std::chrono::system_clock::now(); + + auto time = + std::chrono::duration_cast(end - start) + .count(); + + gLog.debug() << "Number of results = " << results.size() << " fetched in " + << time << " microseconds"; + boost::json::array objects; + objects.reserve(results.size()); + for (auto const& [key, object] : results) + { + ripple::STLedgerEntry sle{ + ripple::SerialIter{object.data(), object.size()}, key}; + if (binary) + { + boost::json::object entry; + entry[JS(data)] = ripple::serializeHex(sle); + entry[JS(index)] = ripple::to_string(sle.key()); + objects.push_back(std::move(entry)); + } + else + objects.push_back(toJson(sle)); + } + response[JS(state)] = std::move(objects); + if (outOfOrder) + response["cache_full"] = context.backend->cache().isFull(); + auto end2 = std::chrono::system_clock::now(); + + time = std::chrono::duration_cast(end2 - end) + .count(); + gLog.debug() << "Number of results = " << results.size() + << " serialized in " << time << " microseconds"; + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/LedgerEntry.cpp b/src/rpc/handlers/LedgerEntry.cpp new file mode 100644 index 00000000..aa710dd0 --- /dev/null +++ b/src/rpc/handlers/LedgerEntry.cpp @@ -0,0 +1,433 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include +// { +// ledger_hash : +// ledger_index : +// ... +// } + +namespace RPC { + +using boost::json::value_to; + +Result +doLedgerEntry(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + bool const binary = getBool(request, "binary", false); + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::uint256 key; + // the expected type of the entry object + auto expectedType = ripple::ltANY; + + // Note: according to docs, only 1 of the below should be specified at any + // time. see https://xrpl.org/ledger_entry.html#ledger_entry + if (request.contains(JS(index))) + { + if (!request.at(JS(index)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "indexNotString"}; + + if (!key.parseHex(request.at(JS(index)).as_string().c_str())) + return Status{ClioError::rpcMALFORMED_REQUEST}; + } + else if (request.contains(JS(account_root))) + { + if (!request.at(JS(account_root)).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, "account_rootNotString"}; + + auto const account = ripple::parseBase58( + request.at(JS(account_root)).as_string().c_str()); + expectedType = ripple::ltACCOUNT_ROOT; + if (!account || account->isZero()) + return Status{ClioError::rpcMALFORMED_ADDRESS}; + else + key = ripple::keylet::account(*account).key; + } + else if (request.contains(JS(check))) + { + if (!request.at(JS(check)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "checkNotString"}; + + expectedType = ripple::ltCHECK; + if (!key.parseHex(request.at(JS(check)).as_string().c_str())) + { + return Status{RippledError::rpcINVALID_PARAMS, "checkMalformed"}; + } + } + else if (request.contains(JS(deposit_preauth))) + { + expectedType = ripple::ltDEPOSIT_PREAUTH; + if (!request.at(JS(deposit_preauth)).is_object()) + { + if (!request.at(JS(deposit_preauth)).is_string() || + !key.parseHex( + request.at(JS(deposit_preauth)).as_string().c_str())) + { + return Status{ + RippledError::rpcINVALID_PARAMS, + "deposit_preauthMalformed"}; + } + } + else if ( + !request.at(JS(deposit_preauth)).as_object().contains(JS(owner)) || + !request.at(JS(deposit_preauth)) + .as_object() + .at(JS(owner)) + .is_string()) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"}; + } + else if ( + !request.at(JS(deposit_preauth)) + .as_object() + .contains(JS(authorized)) || + !request.at(JS(deposit_preauth)) + .as_object() + .at(JS(authorized)) + .is_string()) + { + return Status{ + RippledError::rpcINVALID_PARAMS, "authorizedNotString"}; + } + else + { + boost::json::object const& deposit_preauth = + request.at(JS(deposit_preauth)).as_object(); + + auto const owner = ripple::parseBase58( + deposit_preauth.at(JS(owner)).as_string().c_str()); + + auto const authorized = ripple::parseBase58( + deposit_preauth.at(JS(authorized)).as_string().c_str()); + + if (!owner) + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedOwner"}; + else if (!authorized) + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedAuthorized"}; + else + key = ripple::keylet::depositPreauth(*owner, *authorized).key; + } + } + else if (request.contains(JS(directory))) + { + expectedType = ripple::ltDIR_NODE; + if (!request.at(JS(directory)).is_object()) + { + if (!request.at(JS(directory)).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, "directoryNotString"}; + + if (!key.parseHex(request.at(JS(directory)).as_string().c_str())) + { + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedDirectory"}; + } + } + else if ( + request.at(JS(directory)).as_object().contains(JS(sub_index)) && + !request.at(JS(directory)).as_object().at(JS(sub_index)).is_int64()) + { + return Status{RippledError::rpcINVALID_PARAMS, "sub_indexNotInt"}; + } + else + { + auto directory = request.at(JS(directory)).as_object(); + std::uint64_t subIndex = directory.contains(JS(sub_index)) + ? boost::json::value_to( + directory.at(JS(sub_index))) + : 0; + + if (directory.contains(JS(dir_root))) + { + ripple::uint256 uDirRoot; + + if (directory.contains(JS(owner))) + { + // May not specify both dir_root and owner. + return Status{ + RippledError::rpcINVALID_PARAMS, + "mayNotSpecifyBothDirRootAndOwner"}; + } + else if (!uDirRoot.parseHex( + directory.at(JS(dir_root)).as_string().c_str())) + { + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedDirRoot"}; + } + else + { + key = ripple::keylet::page(uDirRoot, subIndex).key; + } + } + else if (directory.contains(JS(owner))) + { + auto const ownerID = ripple::parseBase58( + directory.at(JS(owner)).as_string().c_str()); + + if (!ownerID) + { + return Status{ClioError::rpcMALFORMED_ADDRESS}; + } + else + { + key = ripple::keylet::page( + ripple::keylet::ownerDir(*ownerID), subIndex) + .key; + } + } + else + { + return Status{ + RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; + } + } + } + else if (request.contains(JS(escrow))) + { + expectedType = ripple::ltESCROW; + if (!request.at(JS(escrow)).is_object()) + { + if (!key.parseHex(request.at(JS(escrow)).as_string().c_str())) + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedEscrow"}; + } + else if ( + !request.at(JS(escrow)).as_object().contains(JS(owner)) || + !request.at(JS(escrow)).as_object().at(JS(owner)).is_string()) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"}; + } + else if ( + !request.at(JS(escrow)).as_object().contains(JS(seq)) || + !request.at(JS(escrow)).as_object().at(JS(seq)).is_int64()) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedSeq"}; + } + else + { + auto const id = + ripple::parseBase58(request.at(JS(escrow)) + .as_object() + .at(JS(owner)) + .as_string() + .c_str()); + + if (!id) + return Status{ClioError::rpcMALFORMED_ADDRESS}; + else + { + std::uint32_t seq = + request.at(JS(escrow)).as_object().at(JS(seq)).as_int64(); + key = ripple::keylet::escrow(*id, seq).key; + } + } + } + else if (request.contains(JS(offer))) + { + expectedType = ripple::ltOFFER; + if (!request.at(JS(offer)).is_object()) + { + if (!key.parseHex(request.at(JS(offer)).as_string().c_str())) + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedOffer"}; + } + else if ( + !request.at(JS(offer)).as_object().contains(JS(account)) || + !request.at(JS(offer)).as_object().at(JS(account)).is_string()) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"}; + } + else if ( + !request.at(JS(offer)).as_object().contains(JS(seq)) || + !request.at(JS(offer)).as_object().at(JS(seq)).is_int64()) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedSeq"}; + } + else + { + auto offer = request.at(JS(offer)).as_object(); + auto const id = ripple::parseBase58( + offer.at(JS(account)).as_string().c_str()); + + if (!id) + return Status{ClioError::rpcMALFORMED_ADDRESS}; + else + { + std::uint32_t seq = + boost::json::value_to(offer.at(JS(seq))); + key = ripple::keylet::offer(*id, seq).key; + } + } + } + else if (request.contains(JS(payment_channel))) + { + expectedType = ripple::ltPAYCHAN; + if (!request.at(JS(payment_channel)).is_string()) + return Status{ + RippledError::rpcINVALID_PARAMS, "paymentChannelNotString"}; + + if (!key.parseHex(request.at(JS(payment_channel)).as_string().c_str())) + return Status{ + RippledError::rpcINVALID_PARAMS, "malformedPaymentChannel"}; + } + else if (request.contains(JS(ripple_state))) + { + if (!request.at(JS(ripple_state)).is_object()) + return Status{ + RippledError::rpcINVALID_PARAMS, "rippleStateNotObject"}; + + expectedType = ripple::ltRIPPLE_STATE; + ripple::Currency currency; + boost::json::object const& state = + request.at(JS(ripple_state)).as_object(); + + if (!state.contains(JS(currency)) || + !state.at(JS(currency)).is_string()) + { + return Status{RippledError::rpcINVALID_PARAMS, "currencyNotString"}; + } + + if (!state.contains(JS(accounts)) || + !state.at(JS(accounts)).is_array() || + 2 != state.at(JS(accounts)).as_array().size() || + !state.at(JS(accounts)).as_array().at(0).is_string() || + !state.at(JS(accounts)).as_array().at(1).is_string() || + (state.at(JS(accounts)).as_array().at(0).as_string() == + state.at(JS(accounts)).as_array().at(1).as_string())) + { + return Status{RippledError::rpcINVALID_PARAMS, "malformedAccounts"}; + } + + auto const id1 = ripple::parseBase58( + state.at(JS(accounts)).as_array().at(0).as_string().c_str()); + auto const id2 = ripple::parseBase58( + state.at(JS(accounts)).as_array().at(1).as_string().c_str()); + + if (!id1 || !id2) + return Status{ + ClioError::rpcMALFORMED_ADDRESS, "malformedAddresses"}; + + else if (!ripple::to_currency( + currency, state.at(JS(currency)).as_string().c_str())) + return Status{ + ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}; + + key = ripple::keylet::line(*id1, *id2, currency).key; + } + else if (request.contains(JS(ticket))) + { + expectedType = ripple::ltTICKET; + // ticket object : account, ticket_seq + if (!request.at(JS(ticket)).is_object()) + { + if (!request.at(JS(ticket)).is_string()) + return Status{ + ClioError::rpcMALFORMED_REQUEST, "ticketNotString"}; + + if (!key.parseHex(request.at(JS(ticket)).as_string().c_str())) + return Status{ + ClioError::rpcMALFORMED_REQUEST, "malformedTicket"}; + } + else if ( + !request.at(JS(ticket)).as_object().contains(JS(account)) || + !request.at(JS(ticket)).as_object().at(JS(account)).is_string()) + { + return Status{ClioError::rpcMALFORMED_REQUEST}; + } + else if ( + !request.at(JS(ticket)).as_object().contains(JS(ticket_seq)) || + !request.at(JS(ticket)).as_object().at(JS(ticket_seq)).is_int64()) + { + return Status{ + ClioError::rpcMALFORMED_REQUEST, "malformedTicketSeq"}; + } + else + { + auto const id = + ripple::parseBase58(request.at(JS(ticket)) + .as_object() + .at(JS(account)) + .as_string() + .c_str()); + + if (!id) + return Status{ClioError::rpcMALFORMED_OWNER}; + else + { + std::uint32_t seq = request.at(JS(ticket)) + .as_object() + .at(JS(ticket_seq)) + .as_int64(); + + key = ripple::getTicketIndex(*id, seq); + } + } + } + else + { + return Status{RippledError::rpcINVALID_PARAMS, "unknownOption"}; + } + + auto dbResponse = + context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield); + + if (!dbResponse or dbResponse->size() == 0) + return Status{"entryNotFound"}; + + // check expected type matches actual type + ripple::STLedgerEntry sle{ + ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key}; + if (expectedType != ripple::ltANY && sle.getType() != expectedType) + return Status{"unexpectedLedgerType"}; + + response[JS(index)] = ripple::strHex(key); + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response[JS(ledger_index)] = lgrInfo.seq; + + if (binary) + { + response[JS(node_binary)] = ripple::strHex(*dbResponse); + } + else + { + response[JS(node)] = toJson(sle); + } + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/LedgerRange.cpp b/src/rpc/handlers/LedgerRange.cpp new file mode 100644 index 00000000..9a64f90e --- /dev/null +++ b/src/rpc/handlers/LedgerRange.cpp @@ -0,0 +1,44 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPC { + +Result +doLedgerRange(Context const& context) +{ + boost::json::object response = {}; + + auto range = context.backend->fetchLedgerRange(); + if (!range) + { + return Status{RippledError::rpcNOT_READY, "rangeNotFound"}; + } + else + { + response[JS(ledger_index_min)] = range->minSequence; + response[JS(ledger_index_max)] = range->maxSequence; + } + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/NFTHistory.cpp b/src/rpc/handlers/NFTHistory.cpp new file mode 100644 index 00000000..d94ae4ab --- /dev/null +++ b/src/rpc/handlers/NFTHistory.cpp @@ -0,0 +1,70 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +using namespace clio; + +// local to compilation unit loggers +namespace { +clio::Logger gLog{"RPC"}; +} // namespace + +namespace RPC { + +Result +doNFTHistory(Context const& context) +{ + auto const maybeTokenID = getNFTID(context.params); + if (auto const status = std::get_if(&maybeTokenID); status) + return *status; + auto const tokenID = std::get(maybeTokenID); + + constexpr std::string_view outerFuncName = __func__; + auto const maybeResponse = traverseTransactions( + context, + [&tokenID, &outerFuncName]( + std::shared_ptr const& backend, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield) + -> Backend::TransactionsAndCursor { + auto const [txnsAndCursor, timeDiff] = + util::timed([&, &tokenID = tokenID]() { + return backend->fetchNFTTransactions( + tokenID, limit, forward, cursorIn, yield); + }); + gLog.info() << outerFuncName << " db fetch took " << timeDiff + << " milliseconds - num blobs = " + << txnsAndCursor.txns.size(); + return txnsAndCursor; + }); + + if (auto const status = std::get_if(&maybeResponse); status) + return *status; + auto response = std::get(maybeResponse); + + response[JS(nft_id)] = ripple::to_string(tokenID); + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/NFTInfo.cpp b/src/rpc/handlers/NFTInfo.cpp new file mode 100644 index 00000000..365d4d52 --- /dev/null +++ b/src/rpc/handlers/NFTInfo.cpp @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include + +namespace RPC { + +Result +doNFTInfo(Context const& context) +{ + auto const request = context.params; + boost::json::object response = {}; + + auto const maybeTokenID = getNFTID(request); + if (auto const status = std::get_if(&maybeTokenID); status) + return *status; + auto const tokenID = std::get(maybeTokenID); + + auto const maybeLedgerInfo = ledgerInfoFromRequest(context); + if (auto const status = std::get_if(&maybeLedgerInfo); status) + return *status; + auto const lgrInfo = std::get(maybeLedgerInfo); + + auto const dbResponse = + context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield); + if (!dbResponse) + return Status{RippledError::rpcOBJECT_NOT_FOUND, "NFT not found"}; + + response[JS(nft_id)] = ripple::strHex(dbResponse->tokenID); + response[JS(ledger_index)] = dbResponse->ledgerSequence; + response[JS(owner)] = ripple::toBase58(dbResponse->owner); + response["is_burned"] = dbResponse->isBurned; + response[JS(uri)] = ripple::strHex(dbResponse->uri); + + response[JS(flags)] = ripple::nft::getFlags(dbResponse->tokenID); + response["transfer_rate"] = + ripple::nft::getTransferFee(dbResponse->tokenID); + response[JS(issuer)] = + ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID)); + response["nft_taxon"] = + ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID)); + response[JS(nft_serial)] = ripple::nft::getSerial(dbResponse->tokenID); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/NFTOffers.cpp b/src/rpc/handlers/NFTOffers.cpp new file mode 100644 index 00000000..179dde5a --- /dev/null +++ b/src/rpc/handlers/NFTOffers.cpp @@ -0,0 +1,198 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace json = boost::json; + +namespace ripple { + +void +tag_invoke(json::value_from_tag, json::value& jv, SLE const& offer) +{ + auto amount = ::RPC::toBoostJson( + offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); + + json::object obj = { + {JS(nft_offer_index), to_string(offer.key())}, + {JS(flags), offer[sfFlags]}, + {JS(owner), toBase58(offer.getAccountID(sfOwner))}, + {JS(amount), std::move(amount)}, + }; + + if (offer.isFieldPresent(sfDestination)) + obj.insert_or_assign( + JS(destination), toBase58(offer.getAccountID(sfDestination))); + + if (offer.isFieldPresent(sfExpiration)) + obj.insert_or_assign(JS(expiration), offer.getFieldU32(sfExpiration)); + + jv = std::move(obj); +} + +} // namespace ripple + +namespace RPC { + +Result +enumerateNFTOffers( + Context const& context, + ripple::uint256 const& tokenid, + ripple::Keylet const& directory) +{ + auto const& request = context.params; + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + // TODO: just check for existence without pulling + if (!context.backend->fetchLedgerObject( + directory.key, lgrInfo.seq, context.yield)) + return Status{RippledError::rpcOBJECT_NOT_FOUND, "notFound"}; + + std::uint32_t limit; + if (auto const status = getLimit(context, limit); status) + return status; + + boost::json::object response = {}; + boost::json::array jsonOffers = {}; + response[JS(nft_id)] = ripple::to_string(tokenid); + + std::vector offers; + auto reserve = limit; + ripple::uint256 cursor; + uint64_t startHint = 0; + + if (request.contains(JS(marker))) + { + // We have a start point. Use limit - 1 from the result and use the + // very last one for the resume. + auto const& marker(request.at(JS(marker))); + + if (!marker.is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; + + if (!cursor.parseHex(marker.as_string().c_str())) + return Status{RippledError::rpcINVALID_PARAMS, "malformedCursor"}; + + auto const sle = + read(ripple::keylet::nftoffer(cursor), lgrInfo, context); + + if (!sle || + sle->getFieldU16(ripple::sfLedgerEntryType) != + ripple::ltNFTOKEN_OFFER || + tokenid != sle->getFieldH256(ripple::sfNFTokenID)) + return Status{RippledError::rpcINVALID_PARAMS}; + + startHint = sle->getFieldU64(ripple::sfNFTokenOfferNode); + jsonOffers.push_back(json::value_from(*sle)); + offers.reserve(reserve); + } + else + { + // We have no start point, limit should be one higher than requested. + offers.reserve(++reserve); + } + + auto result = traverseOwnedNodes( + *context.backend, + directory, + cursor, + startHint, + lgrInfo.seq, + reserve, + {}, + context.yield, + [&offers](ripple::SLE&& offer) { + if (offer.getType() == ripple::ltNFTOKEN_OFFER) + { + offers.push_back(std::move(offer)); + return true; + } + + return false; + }); + + if (auto status = std::get_if(&result)) + return *status; + + if (offers.size() == reserve) + { + response[JS(limit)] = limit; + response[JS(marker)] = to_string(offers.back().key()); + offers.pop_back(); + } + + std::transform( + std::cbegin(offers), + std::cend(offers), + std::back_inserter(jsonOffers), + [](auto const& offer) { + // uses tag_invoke at the top of this file + return json::value_from(offer); + }); + + response.insert_or_assign(JS(offers), std::move(jsonOffers)); + return response; +} + +Result +doNFTOffers(Context const& context, bool sells) +{ + auto const v = getNFTID(context.params); + if (auto const status = std::get_if(&v)) + return *status; + + auto const getKeylet = [sells, &v]() { + if (sells) + return ripple::keylet::nft_sells(std::get(v)); + + return ripple::keylet::nft_buys(std::get(v)); + }; + + return enumerateNFTOffers( + context, std::get(v), getKeylet()); +} + +Result +doNFTSellOffers(Context const& context) +{ + return doNFTOffers(context, true); +} + +Result +doNFTBuyOffers(Context const& context) +{ + return doNFTOffers(context, false); +} + +} // namespace RPC diff --git a/src/rpc/handlers/NoRippleCheck.cpp b/src/rpc/handlers/NoRippleCheck.cpp new file mode 100644 index 00000000..6520f8c3 --- /dev/null +++ b/src/rpc/handlers/NoRippleCheck.cpp @@ -0,0 +1,190 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPC { + +boost::json::object +getBaseTx( + ripple::AccountID const& accountID, + std::uint32_t accountSeq, + ripple::Fees const& fees) +{ + boost::json::object tx; + tx[JS(Sequence)] = accountSeq; + tx[JS(Account)] = ripple::toBase58(accountID); + tx[JS(Fee)] = RPC::toBoostJson(fees.units.jsonClipped()); + return tx; +} + +Result +doNoRippleCheck(Context const& context) +{ + auto const& request = context.params; + + ripple::AccountID accountID; + if (auto const status = getAccount(request, accountID); status) + return status; + + std::string role = getRequiredString(request, "role"); + bool roleGateway = false; + { + if (role == "gateway") + roleGateway = true; + else if (role != "user") + return Status{ + RippledError::rpcINVALID_PARAMS, "role field is invalid"}; + } + + std::uint32_t limit = 300; + if (auto const status = getLimit(context, limit); status) + return status; + + bool includeTxs = getBool(request, "transactions", false); + + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + std::optional fees = includeTxs + ? context.backend->fetchFees(lgrInfo.seq, context.yield) + : std::nullopt; + + boost::json::array transactions; + + auto keylet = ripple::keylet::account(accountID); + auto accountObj = context.backend->fetchLedgerObject( + keylet.key, lgrInfo.seq, context.yield); + if (!accountObj) + throw AccountNotFoundError(ripple::toBase58(accountID)); + + ripple::SerialIter it{accountObj->data(), accountObj->size()}; + ripple::SLE sle{it, keylet.key}; + + std::uint32_t accountSeq = sle.getFieldU32(ripple::sfSequence); + + boost::json::array problems; + bool bDefaultRipple = + sle.getFieldU32(ripple::sfFlags) & ripple::lsfDefaultRipple; + if (bDefaultRipple & !roleGateway) + { + problems.push_back( + "You appear to have set your default ripple flag even though " + "you " + "are not a gateway. This is not recommended unless you are " + "experimenting"); + } + else if (roleGateway & !bDefaultRipple) + { + problems.push_back( + "You should immediately set your default ripple flag"); + if (includeTxs) + { + auto tx = getBaseTx(accountID, accountSeq++, *fees); + tx[JS(TransactionType)] = JS(AccountSet); + tx[JS(SetFlag)] = 8; + transactions.push_back(tx); + } + } + + traverseOwnedNodes( + *context.backend, + accountID, + lgrInfo.seq, + std::numeric_limits::max(), + {}, + context.yield, + [roleGateway, + includeTxs, + &fees, + &transactions, + &accountSeq, + &limit, + &accountID, + &problems](ripple::SLE&& ownedItem) { + if (ownedItem.getType() == ripple::ltRIPPLE_STATE) + { + bool const bLow = accountID == + ownedItem.getFieldAmount(ripple::sfLowLimit).getIssuer(); + + bool const bNoRipple = ownedItem.getFieldU32(ripple::sfFlags) & + (bLow ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + + std::string problem; + bool needFix = false; + if (bNoRipple & roleGateway) + { + problem = "You should clear the no ripple flag on your "; + needFix = true; + } + else if (!bNoRipple & !roleGateway) + { + problem = + "You should probably set the no ripple flag on " + "your "; + needFix = true; + } + if (needFix) + { + ripple::AccountID peer = + ownedItem + .getFieldAmount( + bLow ? ripple::sfHighLimit : ripple::sfLowLimit) + .getIssuer(); + ripple::STAmount peerLimit = ownedItem.getFieldAmount( + bLow ? ripple::sfHighLimit : ripple::sfLowLimit); + problem += to_string(peerLimit.getCurrency()); + problem += " line to "; + problem += to_string(peerLimit.getIssuer()); + problems.emplace_back(problem); + if (includeTxs) + { + ripple::STAmount limitAmount(ownedItem.getFieldAmount( + bLow ? ripple::sfLowLimit : ripple::sfHighLimit)); + limitAmount.setIssuer(peer); + auto tx = getBaseTx(accountID, accountSeq++, *fees); + tx[JS(TransactionType)] = JS(TrustSet); + tx[JS(LimitAmount)] = RPC::toBoostJson( + limitAmount.getJson(ripple::JsonOptions::none)); + tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple + : ripple::tfSetNoRipple; + transactions.push_back(tx); + } + + if (limit-- == 0) + return false; + } + } + return true; + }); + + boost::json::object response; + response[JS(ledger_index)] = lgrInfo.seq; + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + response["problems"] = std::move(problems); + if (includeTxs) + response[JS(transactions)] = std::move(transactions); + + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/Random.cpp b/src/rpc/handlers/Random.cpp new file mode 100644 index 00000000..318e13f6 --- /dev/null +++ b/src/rpc/handlers/Random.cpp @@ -0,0 +1,40 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +#include + +namespace RPC { + +Result +doRandom(Context const& context) +{ + ripple::uint256 rand; + + beast::rngfill(rand.begin(), rand.size(), ripple::crypto_prng()); + boost::json::object result; + result[JS(random)] = ripple::strHex(rand); + return result; +} + +} // namespace RPC diff --git a/src/rpc/handlers/ServerInfo.cpp b/src/rpc/handlers/ServerInfo.cpp new file mode 100644 index 00000000..0992e017 --- /dev/null +++ b/src/rpc/handlers/ServerInfo.cpp @@ -0,0 +1,121 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include
+#include + +namespace RPC { + +Result +doServerInfo(Context const& context) +{ + boost::json::object response = {}; + + auto range = context.backend->fetchLedgerRange(); + if (!range) + { + return Status{ + RippledError::rpcNOT_READY, + "emptyDatabase", + "The server has no data in the database"}; + } + + auto lgrInfo = context.backend->fetchLedgerBySequence( + range->maxSequence, context.yield); + + auto fees = context.backend->fetchFees(lgrInfo->seq, context.yield); + + if (!lgrInfo || !fees) + return Status{RippledError::rpcINTERNAL}; + + auto age = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count() - + lgrInfo->closeTime.time_since_epoch().count() - 946684800; + + if (age < 0) + age = 0; + + response[JS(info)] = boost::json::object{}; + boost::json::object& info = response[JS(info)].as_object(); + + info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" + + std::to_string(range->maxSequence); + + bool admin = context.clientIp == "127.0.0.1"; + + if (admin) + { + info[JS(counters)] = context.counters.report(); + info[JS(counters)].as_object()["subscriptions"] = + context.subscriptions->report(); + } + + auto serverInfoRippled = context.balancer->forwardToRippled( + {{"command", "server_info"}}, context.clientIp, context.yield); + + info[JS(load_factor)] = 1; + info["clio_version"] = Build::getClioVersionString(); + if (serverInfoRippled && !serverInfoRippled->contains(JS(error))) + { + try + { + auto& rippledResult = serverInfoRippled->at(JS(result)).as_object(); + auto& rippledInfo = rippledResult.at(JS(info)).as_object(); + info[JS(load_factor)] = rippledInfo[JS(load_factor)]; + info[JS(validation_quorum)] = rippledInfo[JS(validation_quorum)]; + info["rippled_version"] = rippledInfo[JS(build_version)]; + } + catch (std::exception const&) + { + } + } + + info[JS(validated_ledger)] = boost::json::object{}; + boost::json::object& validated = info[JS(validated_ledger)].as_object(); + + validated[JS(age)] = age; + validated[JS(hash)] = ripple::strHex(lgrInfo->hash); + validated[JS(seq)] = lgrInfo->seq; + validated[JS(base_fee_xrp)] = fees->base.decimalXRP(); + validated[JS(reserve_base_xrp)] = fees->reserve.decimalXRP(); + validated[JS(reserve_inc_xrp)] = fees->increment.decimalXRP(); + + info["cache"] = boost::json::object{}; + auto& cache = info["cache"].as_object(); + + cache["size"] = context.backend->cache().size(); + cache["is_full"] = context.backend->cache().isFull(); + cache["latest_ledger_seq"] = + context.backend->cache().latestLedgerSequence(); + cache["object_hit_rate"] = context.backend->cache().getObjectHitRate(); + cache["successor_hit_rate"] = + context.backend->cache().getSuccessorHitRate(); + + if (admin) + { + info["etl"] = context.etl->getInfo(); + } + + return response; +} +} // namespace RPC diff --git a/src/rpc/handlers/Subscribe.cpp b/src/rpc/handlers/Subscribe.cpp new file mode 100644 index 00000000..257c91c2 --- /dev/null +++ b/src/rpc/handlers/Subscribe.cpp @@ -0,0 +1,467 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include + +namespace RPC { + +// these are the streams that take no arguments +static std::unordered_set validCommonStreams{ + "ledger", + "transactions", + "transactions_proposed", + "validations", + "manifests", + "book_changes"}; + +Status +validateStreams(boost::json::object const& request) +{ + for (auto const& streams = request.at(JS(streams)).as_array(); + auto const& stream : streams) + { + if (!stream.is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "streamNotString"}; + + if (!validCommonStreams.contains(stream.as_string().c_str())) + return Status{RippledError::rpcSTREAM_MALFORMED}; + } + + return OK; +} + +boost::json::object +subscribeToStreams( + boost::asio::yield_context& yield, + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& streams = request.at(JS(streams)).as_array(); + + boost::json::object response; + for (auto const& stream : streams) + { + std::string s = stream.as_string().c_str(); + + if (s == "ledger") + response = manager.subLedger(yield, session); + else if (s == "transactions") + manager.subTransactions(session); + else if (s == "transactions_proposed") + manager.subProposedTransactions(session); + else if (s == "validations") + manager.subValidation(session); + else if (s == "manifests") + manager.subManifest(session); + else if (s == "book_changes") + manager.subBookChanges(session); + else + assert(false); + } + return response; +} + +void +unsubscribeToStreams( + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& streams = request.at(JS(streams)).as_array(); + + for (auto const& stream : streams) + { + std::string s = stream.as_string().c_str(); + + if (s == "ledger") + manager.unsubLedger(session); + else if (s == "transactions") + manager.unsubTransactions(session); + else if (s == "transactions_proposed") + manager.unsubProposedTransactions(session); + else if (s == "validations") + manager.unsubValidation(session); + else if (s == "manifests") + manager.unsubManifest(session); + else if (s == "book_changes") + manager.unsubBookChanges(session); + else + assert(false); + } +} + +Status +validateAccounts(boost::json::array const& accounts) +{ + for (auto const& account : accounts) + { + if (!account.is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "accountNotString"}; + + if (!accountFromStringStrict(account.as_string().c_str())) + return Status{RippledError::rpcACT_MALFORMED, "Account malformed."}; + } + + return OK; +} + +void +subscribeToAccounts( + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& accounts = request.at(JS(accounts)).as_array(); + + for (auto const& account : accounts) + { + std::string s = account.as_string().c_str(); + + auto accountID = accountFromStringStrict(s); + + if (!accountID) + { + assert(false); + continue; + } + + manager.subAccount(*accountID, session); + } +} + +void +unsubscribeToAccounts( + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& accounts = request.at(JS(accounts)).as_array(); + + for (auto const& account : accounts) + { + std::string s = account.as_string().c_str(); + + auto accountID = accountFromStringStrict(s); + + if (!accountID) + { + assert(false); + continue; + } + + manager.unsubAccount(*accountID, session); + } +} + +void +subscribeToAccountsProposed( + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& accounts = + request.at(JS(accounts_proposed)).as_array(); + + for (auto const& account : accounts) + { + std::string s = account.as_string().c_str(); + + auto accountID = ripple::parseBase58(s); + + if (!accountID) + { + assert(false); + continue; + } + + manager.subProposedAccount(*accountID, session); + } +} + +void +unsubscribeToAccountsProposed( + boost::json::object const& request, + std::shared_ptr session, + SubscriptionManager& manager) +{ + boost::json::array const& accounts = + request.at(JS(accounts_proposed)).as_array(); + + for (auto const& account : accounts) + { + std::string s = account.as_string().c_str(); + + auto accountID = ripple::parseBase58(s); + + if (!accountID) + { + assert(false); + continue; + } + + manager.unsubProposedAccount(*accountID, session); + } +} + +std::variant, boost::json::array>> +validateAndGetBooks( + boost::asio::yield_context& yield, + boost::json::object const& request, + std::shared_ptr const& backend) +{ + if (!request.at(JS(books)).is_array()) + return Status{RippledError::rpcINVALID_PARAMS, "booksNotArray"}; + boost::json::array const& books = request.at(JS(books)).as_array(); + + std::vector booksToSub; + std::optional rng; + boost::json::array snapshot; + for (auto const& book : books) + { + auto parsedBook = parseBook(book.as_object()); + if (auto status = std::get_if(&parsedBook)) + return *status; + + auto b = std::get(parsedBook); + booksToSub.push_back(b); + bool both = book.as_object().contains(JS(both)); + if (both) + booksToSub.push_back(ripple::reversed(b)); + + if (book.as_object().contains(JS(snapshot))) + { + if (!rng) + rng = backend->fetchLedgerRange(); + ripple::AccountID takerID = beast::zero; + if (book.as_object().contains(JS(taker))) + if (auto const status = getTaker(book.as_object(), takerID); + status) + return status; + + auto getOrderBook = [&snapshot, &backend, &rng, &takerID]( + auto book, + boost::asio::yield_context& yield) { + auto bookBase = getBookBase(book); + auto [offers, retMarker] = backend->fetchBookOffers( + bookBase, rng->maxSequence, 200, {}, yield); + + auto orderBook = postProcessOrderBook( + offers, book, takerID, *backend, rng->maxSequence, yield); + std::copy( + orderBook.begin(), + orderBook.end(), + std::back_inserter(snapshot)); + }; + getOrderBook(b, yield); + if (both) + getOrderBook(ripple::reversed(b), yield); + } + } + return std::make_pair(booksToSub, snapshot); +} + +void +subscribeToBooks( + std::vector const& books, + std::shared_ptr session, + SubscriptionManager& manager) +{ + for (auto const& book : books) + { + manager.subBook(book, session); + } +} + +void +unsubscribeToBooks( + std::vector const& books, + std::shared_ptr session, + SubscriptionManager& manager) +{ + for (auto const& book : books) + { + manager.unsubBook(book, session); + } +} + +Result +doSubscribe(Context const& context) +{ + auto request = context.params; + + if (request.contains(JS(streams))) + { + if (!request.at(JS(streams)).is_array()) + return Status{RippledError::rpcINVALID_PARAMS, "streamsNotArray"}; + + auto status = validateStreams(request); + + if (status) + return status; + } + + if (request.contains(JS(accounts))) + { + auto const& jsonAccounts = request.at(JS(accounts)); + if (!jsonAccounts.is_array()) + return Status{RippledError::rpcINVALID_PARAMS, "accountsNotArray"}; + + auto const& accounts = jsonAccounts.as_array(); + if (accounts.empty()) + return Status{RippledError::rpcACT_MALFORMED, "Account malformed."}; + + auto const status = validateAccounts(accounts); + if (status) + return status; + } + + if (request.contains(JS(accounts_proposed))) + { + auto const& jsonAccounts = request.at(JS(accounts_proposed)); + if (!jsonAccounts.is_array()) + return Status{ + RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; + + auto const& accounts = jsonAccounts.as_array(); + if (accounts.empty()) + return Status{RippledError::rpcACT_MALFORMED, "Account malformed."}; + + auto const status = validateAccounts(accounts); + if (status) + return status; + } + + std::vector books; + boost::json::object response; + + if (request.contains(JS(books))) + { + auto parsed = + validateAndGetBooks(context.yield, request, context.backend); + if (auto status = std::get_if(&parsed)) + return *status; + auto [bks, snap] = + std::get, boost::json::array>>( + parsed); + books = std::move(bks); + response[JS(offers)] = std::move(snap); + } + + if (request.contains(JS(streams))) + response = subscribeToStreams( + context.yield, request, context.session, *context.subscriptions); + + if (request.contains(JS(accounts))) + subscribeToAccounts(request, context.session, *context.subscriptions); + + if (request.contains(JS(accounts_proposed))) + subscribeToAccountsProposed( + request, context.session, *context.subscriptions); + + if (request.contains(JS(books))) + subscribeToBooks(books, context.session, *context.subscriptions); + + return response; +} + +Result +doUnsubscribe(Context const& context) +{ + auto request = context.params; + + if (request.contains(JS(streams))) + { + if (!request.at(JS(streams)).is_array()) + return Status{RippledError::rpcINVALID_PARAMS, "streamsNotArray"}; + + auto status = validateStreams(request); + + if (status) + return status; + } + + if (request.contains(JS(accounts))) + { + auto const& jsonAccounts = request.at(JS(accounts)); + if (!jsonAccounts.is_array()) + return Status{RippledError::rpcINVALID_PARAMS, "accountsNotArray"}; + + auto const& accounts = jsonAccounts.as_array(); + if (accounts.empty()) + return Status{RippledError::rpcACT_MALFORMED, "Account malformed."}; + + auto const status = validateAccounts(accounts); + if (status) + return status; + } + + if (request.contains(JS(accounts_proposed))) + { + auto const& jsonAccounts = request.at(JS(accounts_proposed)); + if (!jsonAccounts.is_array()) + return Status{ + RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; + + auto const& accounts = jsonAccounts.as_array(); + if (accounts.empty()) + return Status{RippledError::rpcACT_MALFORMED, "Account malformed."}; + + auto const status = validateAccounts(accounts); + if (status) + return status; + } + + std::vector books; + if (request.contains(JS(books))) + { + auto parsed = + validateAndGetBooks(context.yield, request, context.backend); + + if (auto status = std::get_if(&parsed)) + return *status; + + auto [bks, snap] = + std::get, boost::json::array>>( + parsed); + + books = std::move(bks); + } + + if (request.contains(JS(streams))) + unsubscribeToStreams(request, context.session, *context.subscriptions); + + if (request.contains(JS(accounts))) + unsubscribeToAccounts(request, context.session, *context.subscriptions); + + if (request.contains(JS(accounts_proposed))) + unsubscribeToAccountsProposed( + request, context.session, *context.subscriptions); + + if (request.contains("books")) + unsubscribeToBooks(books, context.session, *context.subscriptions); + + return boost::json::object{}; +} + +} // namespace RPC diff --git a/src/rpc/handlers/TransactionEntry.cpp b/src/rpc/handlers/TransactionEntry.cpp new file mode 100644 index 00000000..c6b3ad81 --- /dev/null +++ b/src/rpc/handlers/TransactionEntry.cpp @@ -0,0 +1,62 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace RPC { + +Result +doTransactionEntry(Context const& context) +{ + boost::json::object response; + auto v = ledgerInfoFromRequest(context); + if (auto status = std::get_if(&v)) + return *status; + + auto lgrInfo = std::get(v); + + ripple::uint256 hash; + if (!hash.parseHex(getRequiredString(context.params, JS(tx_hash)))) + return Status{RippledError::rpcINVALID_PARAMS, "malformedTransaction"}; + + auto dbResponse = context.backend->fetchTransaction(hash, context.yield); + // Note: transaction_entry is meant to only search a specified ledger for + // the specified transaction. tx searches the entire range of history. For + // rippled, having two separate commands made sense, as tx would use SQLite + // and transaction_entry used the nodestore. For clio though, there is no + // difference between the implementation of these two, as clio only stores + // transactions in a transactions table, where the key is the hash. However, + // the API for transaction_entry says the method only searches the specified + // ledger; we simulate that here by returning not found if the transaction + // is in a different ledger than the one specified. + if (!dbResponse || dbResponse->ledgerSequence != lgrInfo.seq) + return Status{ + RippledError::rpcTXN_NOT_FOUND, + "transactionNotFound", + "Transaction not found."}; + + auto [txn, meta] = toExpandedJson(*dbResponse); + response[JS(tx_json)] = std::move(txn); + response[JS(metadata)] = std::move(meta); + response[JS(ledger_index)] = lgrInfo.seq; + response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); + return response; +} + +} // namespace RPC diff --git a/src/rpc/handlers/Tx.cpp b/src/rpc/handlers/Tx.cpp new file mode 100644 index 00000000..1b1de0c2 --- /dev/null +++ b/src/rpc/handlers/Tx.cpp @@ -0,0 +1,101 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPC { + +// { +// transaction: +// } + +Result +doTx(Context const& context) +{ + auto request = context.params; + boost::json::object response = {}; + + if (!request.contains(JS(transaction))) + return Status{RippledError::rpcINVALID_PARAMS, "specifyTransaction"}; + + if (!request.at(JS(transaction)).is_string()) + return Status{RippledError::rpcINVALID_PARAMS, "transactionNotString"}; + + ripple::uint256 hash; + if (!hash.parseHex(request.at(JS(transaction)).as_string().c_str())) + return Status{RippledError::rpcINVALID_PARAMS, "malformedTransaction"}; + + bool binary = false; + if (request.contains(JS(binary))) + { + if (!request.at(JS(binary)).is_bool()) + return Status{RippledError::rpcINVALID_PARAMS, "binaryFlagNotBool"}; + + binary = request.at(JS(binary)).as_bool(); + } + auto minLedger = getUInt(request, JS(min_ledger)); + auto maxLedger = getUInt(request, JS(max_ledger)); + bool rangeSupplied = minLedger && maxLedger; + + if (rangeSupplied) + { + if (*minLedger > *maxLedger) + return Status{RippledError::rpcINVALID_LGR_RANGE}; + if (*maxLedger - *minLedger > 1000) + return Status{RippledError::rpcEXCESSIVE_LGR_RANGE}; + } + + auto range = context.backend->fetchLedgerRange(); + if (!range) + return Status{RippledError::rpcNOT_READY}; + + auto dbResponse = context.backend->fetchTransaction(hash, context.yield); + if (!dbResponse) + { + if (rangeSupplied) + { + bool searchedAll = range->maxSequence >= *maxLedger && + range->minSequence <= *minLedger; + boost::json::object extra; + extra["searched_all"] = searchedAll; + return Status{RippledError::rpcTXN_NOT_FOUND, std::move(extra)}; + } + return Status{RippledError::rpcTXN_NOT_FOUND}; + } + + if (!binary) + { + auto [txn, meta] = toExpandedJson(*dbResponse); + response = txn; + response[JS(meta)] = meta; + } + else + { + response[JS(tx)] = ripple::strHex(dbResponse->transaction); + response[JS(meta)] = ripple::strHex(dbResponse->metadata); + response[JS(hash)] = std::move(request.at(JS(transaction)).as_string()); + } + response[JS(date)] = dbResponse->date; + response[JS(ledger_index)] = dbResponse->ledgerSequence; + + return response; +} + +} // namespace RPC diff --git a/src/rpc/ngHandlers/AccountChannels.cpp b/src/rpc/ngHandlers/AccountChannels.cpp new file mode 100644 index 00000000..aeeb6963 --- /dev/null +++ b/src/rpc/ngHandlers/AccountChannels.cpp @@ -0,0 +1,210 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace RPCng { + +void +AccountChannelsHandler::addChannel( + std::vector& jsonChannels, + ripple::SLE const& channelSle) const +{ + ChannelResponse channel; + channel.channelID = ripple::to_string(channelSle.key()); + channel.account = + ripple::to_string(channelSle.getAccountID(ripple::sfAccount)); + channel.accountDestination = + ripple::to_string(channelSle.getAccountID(ripple::sfDestination)); + channel.amount = channelSle[ripple::sfAmount].getText(); + channel.balance = channelSle[ripple::sfBalance].getText(); + if (publicKeyType(channelSle[ripple::sfPublicKey])) + { + ripple::PublicKey const pk(channelSle[ripple::sfPublicKey]); + channel.publicKey = toBase58(ripple::TokenType::AccountPublic, pk); + channel.publicKeyHex = strHex(pk); + } + channel.settleDelay = channelSle[ripple::sfSettleDelay]; + if (auto const& v = channelSle[~ripple::sfExpiration]) + channel.expiration = *v; + if (auto const& v = channelSle[~ripple::sfCancelAfter]) + channel.cancelAfter = *v; + if (auto const& v = channelSle[~ripple::sfSourceTag]) + channel.sourceTag = *v; + if (auto const& v = channelSle[~ripple::sfDestinationTag]) + channel.destinationTag = *v; + + jsonChannels.push_back(channel); +} + +AccountChannelsHandler::Result +AccountChannelsHandler::process( + AccountChannelsHandler::Input input, + boost::asio::yield_context& yield) const +{ + auto const range = sharedPtrBackend_->fetchLedgerRange(); + auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( + *sharedPtrBackend_, + yield, + input.ledgerHash, + input.ledgerIndex, + range->maxSequence); + + if (auto status = std::get_if(&lgrInfoOrStatus)) + return Error{*status}; + + auto const lgrInfo = std::get(lgrInfoOrStatus); + + // no need to check the return value, validator check for us + auto const accountID = RPC::accountFromStringStrict(input.account); + + auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( + ripple::keylet::account(*accountID).key, lgrInfo.seq, yield); + if (!accountLedgerObject) + return Error{RPC::Status{ + RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + + auto const destAccountID = input.destinationAccount + ? RPC::accountFromStringStrict(input.destinationAccount.value()) + : std::optional{}; + + Output response; + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltPAYCHAN && + sle.getAccountID(ripple::sfAccount) == accountID && + (!destAccountID || + *destAccountID == sle.getAccountID(ripple::sfDestination))) + { + addChannel(response.channels, sle); + } + return true; + }; + + auto const next = RPC::ngTraverseOwnedNodes( + *sharedPtrBackend_, + *accountID, + lgrInfo.seq, + input.limit, + input.marker, + yield, + addToResponse); + + response.account = input.account; + response.limit = input.limit; + response.ledgerHash = ripple::strHex(lgrInfo.hash); + response.ledgerIndex = lgrInfo.seq; + + auto const nextMarker = std::get(next); + if (nextMarker.isNonZero()) + response.marker = nextMarker.toString(); + + return response; +} + +AccountChannelsHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv) +{ + auto const& jsonObject = jv.as_object(); + AccountChannelsHandler::Input input; + input.account = jv.at("account").as_string().c_str(); + if (jsonObject.contains("limit")) + { + input.limit = jv.at("limit").as_int64(); + } + if (jsonObject.contains("marker")) + { + input.marker = jv.at("marker").as_string().c_str(); + } + if (jsonObject.contains("ledger_hash")) + { + input.ledgerHash = jv.at("ledger_hash").as_string().c_str(); + } + if (jsonObject.contains("destination_account")) + { + input.destinationAccount = + jv.at("destination_account").as_string().c_str(); + } + if (jsonObject.contains("ledger_index")) + { + if (!jsonObject.at("ledger_index").is_string()) + { + input.ledgerIndex = jv.at("ledger_index").as_int64(); + } + else if (jsonObject.at("ledger_index").as_string() != "validated") + { + input.ledgerIndex = + std::stoi(jv.at("ledger_index").as_string().c_str()); + } + } + + return input; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountChannelsHandler::Output const& output) +{ + boost::json::object obj; + obj = { + {"account", output.account}, + {"ledger_hash", output.ledgerHash}, + {"ledger_index", output.ledgerIndex}, + {"validated", output.validated}, + {"limit", output.limit}, + {"channels", output.channels}}; + if (output.marker) + obj["marker"] = output.marker.value(); + jv = obj; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountChannelsHandler::ChannelResponse const& channel) +{ + boost::json::object obj; + obj = { + {"channel_id", channel.channelID}, + {"account", channel.account}, + {"account_destination", channel.accountDestination}, + {"amount", channel.amount}, + {"balance", channel.balance}, + {"settle_delay", channel.settleDelay}}; + if (channel.publicKey) + obj["public_key"] = *(channel.publicKey); + if (channel.publicKeyHex) + obj["public_key_hex"] = *(channel.publicKeyHex); + if (channel.expiration) + obj["expiration"] = *(channel.expiration); + if (channel.cancelAfter) + obj["cancel_after"] = *(channel.cancelAfter); + if (channel.sourceTag) + obj["source_tag"] = *(channel.sourceTag); + if (channel.destinationTag) + obj["destination_tag"] = *(channel.destinationTag); + jv = obj; +} +} // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountChannels.h b/src/rpc/ngHandlers/AccountChannels.h new file mode 100644 index 00000000..4a5ef53a --- /dev/null +++ b/src/rpc/ngHandlers/AccountChannels.h @@ -0,0 +1,126 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +#include + +namespace RPCng { +class AccountChannelsHandler +{ + // dependencies + std::shared_ptr const sharedPtrBackend_; + +public: + // type align with SField.h + struct ChannelResponse + { + std::string channelID; + std::string account; + std::string accountDestination; + std::string amount; + std::string balance; + std::optional publicKey; + std::optional publicKeyHex; + uint32_t settleDelay; + std::optional expiration; + std::optional cancelAfter; + std::optional sourceTag; + std::optional destinationTag; + }; + + struct Output + { + std::vector channels; + std::string account; + std::string ledgerHash; + uint32_t ledgerIndex; + // validated should be sent via framework + bool validated = true; + uint32_t limit; + std::optional marker; + }; + + struct Input + { + std::string account; + std::optional destinationAccount; + std::optional ledgerHash; + std::optional ledgerIndex; + uint32_t limit = 50; + std::optional marker; + }; + + using Result = RPCng::HandlerReturnType; + + AccountChannelsHandler( + std::shared_ptr const& sharedPtrBackend) + : sharedPtrBackend_(sharedPtrBackend) + { + } + + RpcSpecConstRef + spec() const + { + // clang-format off + static const RpcSpec rpcSpec = { + {"account", validation::Required{}, validation::AccountValidator}, + {"destination_account", validation::Type{},validation::AccountValidator}, + {"ledger_hash", validation::Uint256HexStringValidator}, + {"limit", validation::Type{},validation::Between{10,400}}, + {"ledger_index", validation::LedgerIndexValidator}, + {"marker", validation::MarkerValidator} + }; + // clang-format on + + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const; + +private: + void + addChannel(std::vector& jsonLines, ripple::SLE const& line) + const; +}; + +AccountChannelsHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv); + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountChannelsHandler::Output const& output); + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountChannelsHandler::ChannelResponse const& channel); +} // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountCurrencies.cpp b/src/rpc/ngHandlers/AccountCurrencies.cpp new file mode 100644 index 00000000..e5b5a17d --- /dev/null +++ b/src/rpc/ngHandlers/AccountCurrencies.cpp @@ -0,0 +1,128 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPCng { +AccountCurrenciesHandler::Result +AccountCurrenciesHandler::process( + AccountCurrenciesHandler::Input input, + boost::asio::yield_context& yield) const +{ + auto const range = sharedPtrBackend_->fetchLedgerRange(); + auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( + *sharedPtrBackend_, + yield, + input.ledgerHash, + input.ledgerIndex, + range->maxSequence); + + if (auto const status = std::get_if(&lgrInfoOrStatus)) + return Error{*status}; + + auto const lgrInfo = std::get(lgrInfoOrStatus); + + auto const accountID = RPC::accountFromStringStrict(input.account); + + auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( + ripple::keylet::account(*accountID).key, lgrInfo.seq, yield); + if (!accountLedgerObject) + return Error{RPC::Status{ + RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + + Output response; + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance); + auto const lowLimit = sle.getFieldAmount(ripple::sfLowLimit); + auto const highLimit = sle.getFieldAmount(ripple::sfHighLimit); + bool const viewLowest = (lowLimit.getIssuer() == accountID); + auto const lineLimit = viewLowest ? lowLimit : highLimit; + auto const lineLimitPeer = !viewLowest ? lowLimit : highLimit; + if (!viewLowest) + balance.negate(); + if (balance < lineLimit) + response.receiveCurrencies.insert( + ripple::to_string(balance.getCurrency())); + if ((-balance) < lineLimitPeer) + response.sendCurrencies.insert( + ripple::to_string(balance.getCurrency())); + } + return true; + }; + + // traverse all owned nodes, limit->max, marker->empty + RPC::ngTraverseOwnedNodes( + *sharedPtrBackend_, + *accountID, + lgrInfo.seq, + std::numeric_limits::max(), + {}, + yield, + addToResponse); + + response.ledgerHash = ripple::strHex(lgrInfo.hash); + response.ledgerIndex = lgrInfo.seq; + return response; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountCurrenciesHandler::Output const& output) +{ + jv = { + {"ledger_hash", output.ledgerHash}, + {"ledger_index", output.ledgerIndex}, + {"validated", output.validated}, + {"receive_currencies", output.receiveCurrencies}, + {"send_currencies", output.sendCurrencies}}; +} + +AccountCurrenciesHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv) +{ + auto const& jsonObject = jv.as_object(); + AccountCurrenciesHandler::Input input; + input.account = jv.at("account").as_string().c_str(); + if (jsonObject.contains("ledger_hash")) + { + input.ledgerHash = jv.at("ledger_hash").as_string().c_str(); + } + if (jsonObject.contains("ledger_index")) + { + if (!jsonObject.at("ledger_index").is_string()) + { + input.ledgerIndex = jv.at("ledger_index").as_int64(); + } + else if (jsonObject.at("ledger_index").as_string() != "validated") + { + input.ledgerIndex = + std::stoi(jv.at("ledger_index").as_string().c_str()); + } + } + return input; +} + +} // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountCurrencies.h b/src/rpc/ngHandlers/AccountCurrencies.h new file mode 100644 index 00000000..c3537a6a --- /dev/null +++ b/src/rpc/ngHandlers/AccountCurrencies.h @@ -0,0 +1,87 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +#include + +namespace RPCng { +class AccountCurrenciesHandler +{ + // dependencies + std::shared_ptr sharedPtrBackend_; + +public: + struct Output + { + std::string ledgerHash; + uint32_t ledgerIndex; + std::set receiveCurrencies; + std::set sendCurrencies; + // validated should be sent via framework + bool validated = true; + }; + + // TODO:we did not implement the "strict" field + struct Input + { + std::string account; + std::optional ledgerHash; + std::optional ledgerIndex; + }; + + using Result = RPCng::HandlerReturnType; + + AccountCurrenciesHandler( + std::shared_ptr const& sharedPtrBackend) + : sharedPtrBackend_(sharedPtrBackend) + { + } + + RpcSpecConstRef + spec() const + { + static const RpcSpec rpcSpec = { + {"account", validation::Required{}, validation::AccountValidator}, + {"ledger_hash", validation::Uint256HexStringValidator}, + {"ledger_index", validation::LedgerIndexValidator}}; + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const; +}; + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + AccountCurrenciesHandler::Output const& output); + +AccountCurrenciesHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv); +} // namespace RPCng diff --git a/src/rpc/ngHandlers/GatewayBalances.cpp b/src/rpc/ngHandlers/GatewayBalances.cpp new file mode 100644 index 00000000..b1303fd8 --- /dev/null +++ b/src/rpc/ngHandlers/GatewayBalances.cpp @@ -0,0 +1,246 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPCng { + +GatewayBalancesHandler::Result +GatewayBalancesHandler::process( + GatewayBalancesHandler::Input input, + boost::asio::yield_context& yield) const +{ + // check ledger + auto const range = sharedPtrBackend_->fetchLedgerRange(); + auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( + *sharedPtrBackend_, + yield, + input.ledgerHash, + input.ledgerIndex, + range->maxSequence); + if (auto const status = std::get_if(&lgrInfoOrStatus)) + return Error{*status}; + + // check account + auto const lgrInfo = std::get(lgrInfoOrStatus); + auto const accountID = RPC::accountFromStringStrict(input.account); + auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( + ripple::keylet::account(*accountID).key, lgrInfo.seq, yield); + if (!accountLedgerObject) + return Error{RPC::Status{ + RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + + GatewayBalancesHandler::Output output; + + auto const addToResponse = [&](ripple::SLE&& sle) { + if (sle.getType() == ripple::ltRIPPLE_STATE) + { + ripple::STAmount balance = sle.getFieldAmount(ripple::sfBalance); + auto const lowLimit = sle.getFieldAmount(ripple::sfLowLimit); + auto const highLimit = sle.getFieldAmount(ripple::sfHighLimit); + auto const lowID = lowLimit.getIssuer(); + auto const highID = highLimit.getIssuer(); + auto const viewLowest = (lowLimit.getIssuer() == accountID); + auto const flags = sle.getFieldU32(ripple::sfFlags); + auto const freeze = flags & + (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + if (!viewLowest) + balance.negate(); + + auto const balSign = balance.signum(); + if (balSign == 0) + return true; + + auto const& peer = !viewLowest ? lowID : highID; + + // Here, a negative balance means the cold wallet owes (normal) + // A positive balance means the cold wallet has an asset + // (unusual) + + if (input.hotWallets.count(peer) > 0) + { + // This is a specified hot wallet + output.hotBalances[peer].push_back(-balance); + } + else if (balSign > 0) + { + // This is a gateway asset + output.assets[peer].push_back(balance); + } + else if (freeze) + { + // An obligation the gateway has frozen + output.frozenBalances[peer].push_back(-balance); + } + else + { + // normal negative balance, obligation to customer + auto& bal = output.sums[balance.getCurrency()]; + if (bal == beast::zero) + { + // This is needed to set the currency code correctly + bal = -balance; + } + else + { + try + { + bal -= balance; + } + catch (std::runtime_error const& e) + { + output.overflow = true; + } + } + } + } + return true; + }; + + // traverse all owned nodes, limit->max, marker->empty + auto const ret = RPC::ngTraverseOwnedNodes( + *sharedPtrBackend_, + *accountID, + lgrInfo.seq, + std::numeric_limits::max(), + {}, + yield, + addToResponse); + + if (auto status = std::get_if(&ret)) + return Error{*status}; + + if (not std::all_of( + input.hotWallets.begin(), + input.hotWallets.end(), + [&](auto const& hw) { return output.hotBalances.contains(hw); })) + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, "invalidHotWallet"}}; + + output.accountID = input.account; + output.ledgerHash = ripple::strHex(lgrInfo.hash); + output.ledgerIndex = lgrInfo.seq; + return output; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + GatewayBalancesHandler::Output const& output) +{ + boost::json::object obj; + if (!output.sums.empty()) + { + boost::json::object obligations; + for (auto const& [k, v] : output.sums) + { + obligations[ripple::to_string(k)] = v.getText(); + } + obj["obligations"] = std::move(obligations); + } + + auto const toJson = + [](std::map> const& + balances) { + boost::json::object balancesObj; + if (!balances.empty()) + { + for (auto const& [accId, accBalances] : balances) + { + boost::json::array arr; + for (auto const& balance : accBalances) + { + boost::json::object entry; + entry[JS(currency)] = + ripple::to_string(balance.issue().currency); + entry[JS(value)] = balance.getText(); + arr.push_back(std::move(entry)); + } + balancesObj[ripple::to_string(accId)] = std::move(arr); + } + } + return balancesObj; + }; + + if (auto balances = toJson(output.hotBalances); balances.size()) + obj["balances"] = balances; + + // we don't have frozen_balances field in the + // document:https://xrpl.org/gateway_balances.html#gateway_balances + if (auto balances = toJson(output.frozenBalances); balances.size()) + obj["frozen_balances"] = balances; + if (auto balances = toJson(output.assets); balances.size()) + obj["assets"] = balances; + obj["account"] = output.accountID; + obj["ledger_index"] = output.ledgerIndex; + obj["ledger_hash"] = output.ledgerHash; + if (output.overflow) + obj["overflow"] = true; + jv = std::move(obj); +} + +GatewayBalancesHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv) +{ + auto const& jsonObject = jv.as_object(); + GatewayBalancesHandler::Input input; + input.account = jv.at("account").as_string().c_str(); + if (jsonObject.contains("ledger_hash")) + { + input.ledgerHash = jv.at("ledger_hash").as_string().c_str(); + } + if (jsonObject.contains("ledger_index")) + { + if (!jsonObject.at("ledger_index").is_string()) + { + input.ledgerIndex = jv.at("ledger_index").as_int64(); + } + else if (jsonObject.at("ledger_index").as_string() != "validated") + { + input.ledgerIndex = + std::stoi(jv.at("ledger_index").as_string().c_str()); + } + } + if (jsonObject.contains("hotwallet")) + { + if (jsonObject.at("hotwallet").is_string()) + { + input.hotWallets.insert(*RPC::accountFromStringStrict( + jv.at("hotwallet").as_string().c_str())); + } + else + { + auto const& hotWallets = jv.at("hotwallet").as_array(); + std::transform( + hotWallets.begin(), + hotWallets.end(), + std::inserter(input.hotWallets, input.hotWallets.begin()), + [](auto const& hotWallet) { + return *RPC::accountFromStringStrict( + hotWallet.as_string().c_str()); + }); + } + } + return input; +} +} // namespace RPCng diff --git a/src/rpc/ngHandlers/GatewayBalances.h b/src/rpc/ngHandlers/GatewayBalances.h new file mode 100644 index 00000000..fa722a09 --- /dev/null +++ b/src/rpc/ngHandlers/GatewayBalances.h @@ -0,0 +1,128 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +namespace RPCng { +class GatewayBalancesHandler +{ + std::shared_ptr sharedPtrBackend_; + +public: + struct Output + { + std::string ledgerHash; + uint32_t ledgerIndex; + std::string accountID; + bool overflow = false; + std::map sums; + std::map> hotBalances; + std::map> assets; + std::map> + frozenBalances; + // validated should be sent via framework + bool validated = true; + }; + + // TODO:we did not implement the "strict" field + struct Input + { + std::string account; + std::set hotWallets; + std::optional ledgerHash; + std::optional ledgerIndex; + }; + + using Result = RPCng::HandlerReturnType; + + GatewayBalancesHandler( + std::shared_ptr const& sharedPtrBackend) + : sharedPtrBackend_(sharedPtrBackend) + { + } + + RpcSpecConstRef + spec() const + { + static auto const hotWalletValidator = validation::CustomValidator{ + [](boost::json::value const& value, + std::string_view key) -> MaybeError { + if (!value.is_string() && !value.is_array()) + { + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "NotStringOrArray"}}; + } + // wallet needs to be an valid accountID or public key + auto const wallets = value.is_array() + ? value.as_array() + : boost::json::array{value}; + auto const getAccountID = + [](auto const& j) -> std::optional { + if (j.is_string()) + { + auto const pk = ripple::parseBase58( + ripple::TokenType::AccountPublic, + j.as_string().c_str()); + if (pk) + return ripple::calcAccountID(*pk); + return ripple::parseBase58( + j.as_string().c_str()); + } + return {}; + }; + for (auto const& wallet : wallets) + { + if (!getAccountID(wallet)) + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + std::string(key) + "Malformed"}}; + } + return MaybeError{}; + }}; + + static const RpcSpec rpcSpec = { + {"account", validation::Required{}, validation::AccountValidator}, + {"ledger_hash", validation::Uint256HexStringValidator}, + {"ledger_index", validation::LedgerIndexValidator}, + {"hotwallet", hotWalletValidator}}; + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const; +}; + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + GatewayBalancesHandler::Output const& output); + +GatewayBalancesHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv); +} // namespace RPCng diff --git a/src/rpc/ngHandlers/LedgerEntry.cpp b/src/rpc/ngHandlers/LedgerEntry.cpp new file mode 100644 index 00000000..ae7acd77 --- /dev/null +++ b/src/rpc/ngHandlers/LedgerEntry.cpp @@ -0,0 +1,282 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include + +namespace RPCng { +LedgerEntryHandler::Result +LedgerEntryHandler::process( + LedgerEntryHandler::Input input, + boost::asio::yield_context& yield) const +{ + ripple::uint256 key; + if (input.index) + { + key = ripple::uint256{std::string_view(*(input.index))}; + } + else if (input.accountRoot) + { + key = ripple::keylet::account( + *ripple::parseBase58(*(input.accountRoot))) + .key; + } + else if (input.directory) + { + auto const keyOrStatus = composeKeyFromDirectory(*input.directory); + if (auto const status = std::get_if(&keyOrStatus)) + return Error{*status}; + key = std::get(keyOrStatus); + } + else if (input.offer) + { + auto const id = ripple::parseBase58( + input.offer->at("account").as_string().c_str()); + key = ripple::keylet::offer( + *id, + boost::json::value_to(input.offer->at("seq"))) + .key; + } + else if (input.rippleStateAccount) + { + auto const id1 = ripple::parseBase58( + input.rippleStateAccount->at("accounts") + .as_array() + .at(0) + .as_string() + .c_str()); + auto const id2 = ripple::parseBase58( + input.rippleStateAccount->at("accounts") + .as_array() + .at(1) + .as_string() + .c_str()); + auto const currency = ripple::to_currency( + input.rippleStateAccount->at("currency").as_string().c_str()); + key = ripple::keylet::line(*id1, *id2, currency).key; + } + else if (input.escrow) + { + auto const id = ripple::parseBase58( + input.escrow->at("owner").as_string().c_str()); + key = + ripple::keylet::escrow(*id, input.escrow->at("seq").as_int64()).key; + } + else if (input.depositPreauth) + { + auto const owner = ripple::parseBase58( + input.depositPreauth->at("owner").as_string().c_str()); + auto const authorized = ripple::parseBase58( + input.depositPreauth->at("authorized").as_string().c_str()); + key = ripple::keylet::depositPreauth(*owner, *authorized).key; + } + else if (input.ticket) + { + auto const id = ripple::parseBase58( + input.ticket->at("account").as_string().c_str()); + key = ripple::getTicketIndex( + *id, input.ticket->at("ticket_seq").as_int64()); + } + else + { + // Must specify 1 of the following fields to indicate what type + return Error{ + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "unknownOption"}}; + } + + // check ledger exists + auto const range = sharedPtrBackend_->fetchLedgerRange(); + auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( + *sharedPtrBackend_, + yield, + input.ledgerHash, + input.ledgerIndex, + range->maxSequence); + + if (auto const status = std::get_if(&lgrInfoOrStatus)) + return Error{*status}; + + auto const lgrInfo = std::get(lgrInfoOrStatus); + auto const ledgerObject = + sharedPtrBackend_->fetchLedgerObject(key, lgrInfo.seq, yield); + if (!ledgerObject || ledgerObject->size() == 0) + return Error{RPC::Status{"entryNotFound"}}; + + ripple::STLedgerEntry const sle{ + ripple::SerialIter{ledgerObject->data(), ledgerObject->size()}, key}; + if (input.expectedType != ripple::ltANY && + sle.getType() != input.expectedType) + return Error{RPC::Status{"unexpectedLedgerType"}}; + + LedgerEntryHandler::Output output; + output.index = ripple::strHex(key); + output.ledgerIndex = lgrInfo.seq; + output.ledgerHash = ripple::strHex(lgrInfo.hash); + if (input.binary) + { + output.nodeBinary = ripple::strHex(*ledgerObject); + } + else + { + output.node = RPC::toJson(sle); + } + return output; +} + +std::variant +LedgerEntryHandler::composeKeyFromDirectory( + boost::json::object const& directory) const noexcept +{ + // can not specify both dir_root and owner. + if (directory.contains("dir_root") && directory.contains("owner")) + return RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + "mayNotSpecifyBothDirRootAndOwner"}; + // at least one should availiable + if (!(directory.contains("dir_root") || directory.contains("owner"))) + return RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; + + uint64_t const subIndex = directory.contains("sub_index") + ? boost::json::value_to(directory.at("sub_index")) + : 0; + + if (directory.contains("dir_root")) + { + ripple::uint256 const uDirRoot{ + directory.at("dir_root").as_string().c_str()}; + return ripple::keylet::page(uDirRoot, subIndex).key; + } + + auto const ownerID = ripple::parseBase58( + directory.at("owner").as_string().c_str()); + return ripple::keylet::page(ripple::keylet::ownerDir(*ownerID), subIndex) + .key; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + LedgerEntryHandler::Output const& output) +{ + auto object = boost::json::object{ + {"ledger_hash", output.ledgerHash}, + {"ledger_index", output.ledgerIndex}, + {"validated", output.validated}, + {"index", output.index}}; + if (output.nodeBinary) + { + object["node_binary"] = *(output.nodeBinary); + } + else + { + object["node"] = *(output.node); + } + jv = std::move(object); +} + +LedgerEntryHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv) +{ + auto const& jsonObject = jv.as_object(); + LedgerEntryHandler::Input input; + if (jsonObject.contains("ledger_hash")) + { + input.ledgerHash = jv.at("ledger_hash").as_string().c_str(); + } + if (jsonObject.contains("ledger_index")) + { + if (!jsonObject.at("ledger_index").is_string()) + { + input.ledgerIndex = jv.at("ledger_index").as_int64(); + } + else if (jsonObject.at("ledger_index").as_string() != "validated") + { + input.ledgerIndex = + std::stoi(jv.at("ledger_index").as_string().c_str()); + } + } + if (jsonObject.contains("binary")) + { + input.binary = jv.at("binary").as_bool(); + } + // check all the protential index + static auto const indexFieldTypeMap = + std::unordered_map{ + {"index", ripple::ltANY}, + {"directory", ripple::ltDIR_NODE}, + {"offer", ripple::ltOFFER}, + {"check", ripple::ltCHECK}, + {"escrow", ripple::ltESCROW}, + {"payment_channel", ripple::ltPAYCHAN}, + {"deposit_preauth", ripple::ltDEPOSIT_PREAUTH}, + {"ticket", ripple::ltTICKET}}; + + auto const indexFieldType = std::find_if( + indexFieldTypeMap.begin(), + indexFieldTypeMap.end(), + [&jsonObject](auto const& pair) { + auto const& [field, _] = pair; + return jsonObject.contains(field) && + jsonObject.at(field).is_string(); + }); + if (indexFieldType != indexFieldTypeMap.end()) + { + input.index = jv.at(indexFieldType->first).as_string().c_str(); + input.expectedType = indexFieldType->second; + } + // check if request for account root + else if (jsonObject.contains("account_root")) + { + input.accountRoot = jv.at("account_root").as_string().c_str(); + } + // no need to check if_object again, validator only allows string or object + else if (jsonObject.contains("directory")) + { + input.directory = jv.at("directory").as_object(); + } + else if (jsonObject.contains("offer")) + { + input.offer = jv.at("offer").as_object(); + } + else if (jsonObject.contains("ripple_state")) + { + input.rippleStateAccount = jv.at("ripple_state").as_object(); + } + else if (jsonObject.contains("escrow")) + { + input.escrow = jv.at("escrow").as_object(); + } + else if (jsonObject.contains("deposit_preauth")) + { + input.depositPreauth = jv.at("deposit_preauth").as_object(); + } + else if (jsonObject.contains("ticket")) + { + input.ticket = jv.at("ticket").as_object(); + } + return input; +} + +} // namespace RPCng diff --git a/src/rpc/ngHandlers/LedgerEntry.h b/src/rpc/ngHandlers/LedgerEntry.h new file mode 100644 index 00000000..2a2f948b --- /dev/null +++ b/src/rpc/ngHandlers/LedgerEntry.h @@ -0,0 +1,209 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +namespace RPCng { + +class LedgerEntryHandler +{ + std::shared_ptr sharedPtrBackend_; + +public: + struct Output + { + std::string index; + uint32_t ledgerIndex; + std::string ledgerHash; + std::optional node; + std::optional nodeBinary; + bool validated = true; + }; + + // TODO: nft_page has not been implemented + struct Input + { + std::optional ledgerHash; + std::optional ledgerIndex; + bool binary = false; + // id of this ledger entry: 256 bits hex string + std::optional index; + // index can be extracted from payment_channel, check, escrow, offer + // etc, expectedType is used to save the type of index + ripple::LedgerEntryType expectedType = ripple::ltANY; + // account id to address account root object + std::optional accountRoot; + // TODO: extract into custom objects, remove json from Input + std::optional directory; + std::optional offer; + std::optional rippleStateAccount; + std::optional escrow; + std::optional depositPreauth; + std::optional ticket; + }; + + using Result = RPCng::HandlerReturnType; + + LedgerEntryHandler( + std::shared_ptr const& sharedPtrBackend) + : sharedPtrBackend_(sharedPtrBackend) + { + } + + RpcSpecConstRef + spec() const + { + // Validator only works in this handler + // The accounts array must have two different elements + // Each element must be a valid address + static auto const rippleStateAccountsCheck = + validation::CustomValidator{ + [](boost::json::value const& value, + std::string_view key) -> MaybeError { + if (!value.is_array() || value.as_array().size() != 2 || + !value.as_array()[0].is_string() || + !value.as_array()[1].is_string() || + value.as_array()[0].as_string() == + value.as_array()[1].as_string()) + return Error{RPC::Status{ + RPC::RippledError::rpcINVALID_PARAMS, + "malformedAccounts"}}; + auto const id1 = ripple::parseBase58( + value.as_array()[0].as_string().c_str()); + auto const id2 = ripple::parseBase58( + value.as_array()[1].as_string().c_str()); + if (!id1 || !id2) + return Error{RPC::Status{ + RPC::ClioError::rpcMALFORMED_ADDRESS, + "malformedAddresses"}}; + return MaybeError{}; + }}; + + static const RpcSpec rpcSpec = { + {"binary", validation::Type{}}, + {"ledger_hash", validation::Uint256HexStringValidator}, + {"ledger_index", validation::LedgerIndexValidator}, + {"index", validation::Uint256HexStringValidator}, + {"account_root", validation::AccountBase58Validator}, + {"check", validation::Uint256HexStringValidator}, + {"deposit_preauth", + validation::Type{}, + validation::IfType{ + validation::Uint256HexStringValidator}, + validation::IfType{ + validation::Section{ + {"owner", + validation::Required{}, + validation::AccountBase58Validator}, + {"authorized", + validation::Required{}, + validation::AccountBase58Validator}, + }, + }}, + {"directory", + validation::Type{}, + validation::IfType{ + validation::Uint256HexStringValidator}, + validation::IfType{validation::Section{ + {"owner", validation::AccountBase58Validator}, + {"dir_root", validation::Uint256HexStringValidator}, + {"sub_index", validation::Type{}}}}}, + {"escrow", + validation::Type{}, + validation::IfType{ + validation::Uint256HexStringValidator}, + validation::IfType{ + validation::Section{ + {"owner", + validation::Required{}, + validation::AccountBase58Validator}, + {"seq", + validation::Required{}, + validation::Type{}}, + }, + }}, + {"offer", + validation::Type{}, + validation::IfType{ + validation::Uint256HexStringValidator}, + validation::IfType{ + validation::Section{ + {"account", + validation::Required{}, + validation::AccountBase58Validator}, + {"seq", + validation::Required{}, + validation::Type{}}, + }, + }}, + {"payment_channel", validation::Uint256HexStringValidator}, + {"ripple_state", + validation::Type{}, + validation::Section{ + {"accounts", validation::Required{}, rippleStateAccountsCheck}, + {"currency", + validation::Required{}, + validation::CurrencyValidator}, + }}, + {"ticket", + validation::Type{}, + validation::IfType{ + validation::Uint256HexStringValidator}, + validation::IfType{ + validation::Section{ + {"account", + validation::Required{}, + validation::AccountBase58Validator}, + {"ticket_seq", + validation::Required{}, + validation::Type{}}, + }, + }}, + }; + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const; + +private: + // dir_root and owner can not be both empty or filled at the same time + // This function will return an error if this is the case + std::variant + composeKeyFromDirectory( + boost::json::object const& directory) const noexcept; +}; + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + LedgerEntryHandler::Output const& output); + +LedgerEntryHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv); +} // namespace RPCng diff --git a/src/rpc/ngHandlers/Ping.h b/src/rpc/ngHandlers/Ping.h new file mode 100644 index 00000000..1fd769cd --- /dev/null +++ b/src/rpc/ngHandlers/Ping.h @@ -0,0 +1,38 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace RPCng { + +class PingHandler +{ +public: + using Output = VoidOutput; + using Result = HandlerReturnType; + + Result + process() const + { + return Output{}; + } +}; +} // namespace RPCng diff --git a/src/rpc/ngHandlers/Tx.cpp b/src/rpc/ngHandlers/Tx.cpp new file mode 100644 index 00000000..632a2f2b --- /dev/null +++ b/src/rpc/ngHandlers/Tx.cpp @@ -0,0 +1,123 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace RPCng { + +TxHandler::Result +TxHandler::process(Input input, boost::asio::yield_context& yield) const +{ + constexpr static auto maxLedgerRange = 1000u; + auto const rangeSupplied = input.minLedger && input.maxLedger; + + if (rangeSupplied) + { + if (*input.minLedger > *input.maxLedger) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_LGR_RANGE}}; + + if (*input.maxLedger - *input.minLedger > maxLedgerRange) + return Error{ + RPC::Status{RPC::RippledError::rpcEXCESSIVE_LGR_RANGE}}; + } + TxHandler::Output output; + auto const dbResponse = sharedPtrBackend_->fetchTransaction( + ripple::uint256{std::string_view(input.transaction)}, yield); + if (!dbResponse) + { + if (rangeSupplied) + { + auto const range = sharedPtrBackend_->fetchLedgerRange(); + auto const searchedAll = range->maxSequence >= *input.maxLedger && + range->minSequence <= *input.minLedger; + boost::json::object extra; + extra["searched_all"] = searchedAll; + return Error{RPC::Status{ + RPC::RippledError::rpcTXN_NOT_FOUND, std::move(extra)}}; + } + return Error{RPC::Status{RPC::RippledError::rpcTXN_NOT_FOUND}}; + } + + // clio does not implement 'inLedger' which is a deprecated field + if (!input.binary) + { + auto const [txn, meta] = RPC::toExpandedJson(*dbResponse); + output.tx = txn; + output.meta = meta; + } + else + { + output.txStr = ripple::strHex(dbResponse->transaction); + output.metaStr = ripple::strHex(dbResponse->metadata); + output.hash = std::move(input.transaction); + } + + output.date = dbResponse->date; + output.ledgerIndex = dbResponse->ledgerSequence; + return output; +} + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + TxHandler::Output const& output) +{ + auto obj = boost::json::object{}; + if (output.tx) + { + obj = *output.tx; + obj["meta"] = *output.meta; + } + else + { + obj["meta"] = *output.metaStr; + obj["tx"] = *output.txStr; + obj["hash"] = output.hash; + } + obj["date"] = output.date; + obj["ledger_index"] = output.ledgerIndex; + jv = std::move(obj); +} + +TxHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv) +{ + TxHandler::Input input; + auto const& jsonObject = jv.as_object(); + input.transaction = jv.at("transaction").as_string().c_str(); + if (jsonObject.contains("binary")) + { + input.binary = jv.at("binary").as_bool(); + } + if (jsonObject.contains("min_ledger")) + { + input.minLedger = jv.at("min_ledger").as_int64(); + } + if (jsonObject.contains("max_ledger")) + { + input.maxLedger = jv.at("max_ledger").as_int64(); + } + return input; +} + +} // namespace RPCng diff --git a/src/rpc/ngHandlers/Tx.h b/src/rpc/ngHandlers/Tx.h new file mode 100644 index 00000000..06690b0b --- /dev/null +++ b/src/rpc/ngHandlers/Tx.h @@ -0,0 +1,91 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +namespace RPCng { +class TxHandler +{ + std::shared_ptr sharedPtrBackend_; + +public: + struct Output + { + uint32_t date; + std::string hash; + uint32_t ledgerIndex; + std::optional meta; + std::optional tx; + std::optional metaStr; + std::optional txStr; + bool validated = true; + }; + + // TODO: we did not implement the "strict" field + struct Input + { + std::string transaction; + bool binary = false; + std::optional minLedger; + std::optional maxLedger; + }; + + using Result = RPCng::HandlerReturnType; + + TxHandler(std::shared_ptr const& sharedPtrBackend) + : sharedPtrBackend_(sharedPtrBackend) + { + } + + RpcSpecConstRef + spec() const + { + static const RpcSpec rpcSpec = { + {"transaction", + validation::Required{}, + validation::Uint256HexStringValidator}, + {"binary", validation::Type{}}, + {"min_ledger", validation::Type{}}, + {"max_ledger", validation::Type{}}, + }; + + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const; +}; + +void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + TxHandler::Output const& output); + +TxHandler::Input +tag_invoke( + boost::json::value_to_tag, + boost::json::value const& jv); +} // namespace RPCng diff --git a/src/subscriptions/Message.h b/src/subscriptions/Message.h new file mode 100644 index 00000000..f464c793 --- /dev/null +++ b/src/subscriptions/Message.h @@ -0,0 +1,56 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +// This class should only be constructed once, then it can +// be read from in parallel by many websocket senders +class Message +{ + std::string message_; + +public: + Message() = delete; + Message(std::string&& message) : message_(std::move(message)) + { + } + + Message(Message const&) = delete; + Message(Message&&) = delete; + Message& + operator=(Message const&) = delete; + Message& + operator=(Message&&) = delete; + + ~Message() = default; + + char* + data() + { + return message_.data(); + } + + std::size_t + size() + { + return message_.size(); + } +}; diff --git a/src/subscriptions/SubscriptionManager.cpp b/src/subscriptions/SubscriptionManager.cpp new file mode 100644 index 00000000..273faafd --- /dev/null +++ b/src/subscriptions/SubscriptionManager.cpp @@ -0,0 +1,424 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +void +Subscription::subscribe(std::shared_ptr const& session) +{ + boost::asio::post(strand_, [this, session]() { + addSession(session, subscribers_, subCount_); + }); +} + +void +Subscription::unsubscribe(std::shared_ptr const& session) +{ + boost::asio::post(strand_, [this, session]() { + removeSession(session, subscribers_, subCount_); + }); +} + +void +Subscription::publish(std::shared_ptr const& message) +{ + boost::asio::post(strand_, [this, message]() { + sendToSubscribers(message, subscribers_, subCount_); + }); +} + +boost::json::object +getLedgerPubMessage( + ripple::LedgerInfo const& lgrInfo, + ripple::Fees const& fees, + std::string const& ledgerRange, + std::uint32_t txnCount) +{ + boost::json::object pubMsg; + + pubMsg["type"] = "ledgerClosed"; + pubMsg["ledger_index"] = lgrInfo.seq; + pubMsg["ledger_hash"] = to_string(lgrInfo.hash); + pubMsg["ledger_time"] = lgrInfo.closeTime.time_since_epoch().count(); + + pubMsg["fee_ref"] = RPC::toBoostJson(fees.units.jsonClipped()); + pubMsg["fee_base"] = RPC::toBoostJson(fees.base.jsonClipped()); + pubMsg["reserve_base"] = RPC::toBoostJson(fees.reserve.jsonClipped()); + pubMsg["reserve_inc"] = RPC::toBoostJson(fees.increment.jsonClipped()); + + pubMsg["validated_ledgers"] = ledgerRange; + pubMsg["txn_count"] = txnCount; + return pubMsg; +} + +boost::json::object +SubscriptionManager::subLedger( + boost::asio::yield_context& yield, + std::shared_ptr session) +{ + subscribeHelper(session, ledgerSubscribers_, [this](session_ptr session) { + unsubLedger(session); + }); + + auto ledgerRange = backend_->fetchLedgerRange(); + assert(ledgerRange); + auto lgrInfo = + backend_->fetchLedgerBySequence(ledgerRange->maxSequence, yield); + assert(lgrInfo); + + std::optional fees; + fees = backend_->fetchFees(lgrInfo->seq, yield); + assert(fees); + + std::string range = std::to_string(ledgerRange->minSequence) + "-" + + std::to_string(ledgerRange->maxSequence); + + auto pubMsg = getLedgerPubMessage(*lgrInfo, *fees, range, 0); + pubMsg.erase("txn_count"); + pubMsg.erase("type"); + return pubMsg; +} + +void +SubscriptionManager::unsubLedger(std::shared_ptr session) +{ + ledgerSubscribers_.unsubscribe(session); +} + +void +SubscriptionManager::subTransactions(std::shared_ptr session) +{ + subscribeHelper(session, txSubscribers_, [this](session_ptr session) { + unsubTransactions(session); + }); +} + +void +SubscriptionManager::unsubTransactions(std::shared_ptr session) +{ + txSubscribers_.unsubscribe(session); +} + +void +SubscriptionManager::subAccount( + ripple::AccountID const& account, + std::shared_ptr& session) +{ + subscribeHelper( + session, + account, + accountSubscribers_, + [this, account](session_ptr session) { + unsubAccount(account, session); + }); +} + +void +SubscriptionManager::unsubAccount( + ripple::AccountID const& account, + std::shared_ptr& session) +{ + accountSubscribers_.unsubscribe(session, account); +} + +void +SubscriptionManager::subBook( + ripple::Book const& book, + std::shared_ptr session) +{ + subscribeHelper( + session, book, bookSubscribers_, [this, book](session_ptr session) { + unsubBook(book, session); + }); +} + +void +SubscriptionManager::unsubBook( + ripple::Book const& book, + std::shared_ptr session) +{ + bookSubscribers_.unsubscribe(session, book); +} + +void +SubscriptionManager::subBookChanges(std::shared_ptr session) +{ + subscribeHelper( + session, bookChangesSubscribers_, [this](session_ptr session) { + unsubBookChanges(session); + }); +} + +void +SubscriptionManager::unsubBookChanges(std::shared_ptr session) +{ + bookChangesSubscribers_.unsubscribe(session); +} + +void +SubscriptionManager::pubLedger( + ripple::LedgerInfo const& lgrInfo, + ripple::Fees const& fees, + std::string const& ledgerRange, + std::uint32_t txnCount) +{ + auto message = std::make_shared(boost::json::serialize( + getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount))); + + ledgerSubscribers_.publish(message); +} + +void +SubscriptionManager::pubTransaction( + Backend::TransactionAndMetadata const& blobs, + ripple::LedgerInfo const& lgrInfo) +{ + auto [tx, meta] = RPC::deserializeTxPlusMeta(blobs, lgrInfo.seq); + boost::json::object pubObj; + pubObj["transaction"] = RPC::toJson(*tx); + pubObj["meta"] = RPC::toJson(*meta); + RPC::insertDeliveredAmount( + pubObj["meta"].as_object(), tx, meta, blobs.date); + pubObj["type"] = "transaction"; + pubObj["validated"] = true; + pubObj["status"] = "closed"; + + pubObj["ledger_index"] = lgrInfo.seq; + pubObj["ledger_hash"] = ripple::strHex(lgrInfo.hash); + pubObj["transaction"].as_object()["date"] = + lgrInfo.closeTime.time_since_epoch().count(); + + pubObj["engine_result_code"] = meta->getResult(); + std::string token; + std::string human; + ripple::transResultInfo(meta->getResultTER(), token, human); + pubObj["engine_result"] = token; + pubObj["engine_result_message"] = human; + if (tx->getTxnType() == ripple::ttOFFER_CREATE) + { + auto account = tx->getAccountID(ripple::sfAccount); + auto amount = tx->getFieldAmount(ripple::sfTakerGets); + if (account != amount.issue().account) + { + ripple::STAmount ownerFunds; + auto fetchFundsSynchronous = [&]() { + Backend::synchronous([&](boost::asio::yield_context& yield) { + ownerFunds = RPC::accountFunds( + *backend_, lgrInfo.seq, amount, account, yield); + }); + }; + + Backend::retryOnTimeout(fetchFundsSynchronous); + + pubObj["transaction"].as_object()["owner_funds"] = + ownerFunds.getText(); + } + } + + auto pubMsg = std::make_shared(boost::json::serialize(pubObj)); + txSubscribers_.publish(pubMsg); + + auto accounts = meta->getAffectedAccounts(); + + for (auto const& account : accounts) + accountSubscribers_.publish(pubMsg, account); + + std::unordered_set alreadySent; + + for (auto const& node : meta->getNodes()) + { + if (node.getFieldU16(ripple::sfLedgerEntryType) == ripple::ltOFFER) + { + ripple::SField const* field = nullptr; + + // We need a field that contains the TakerGets and TakerPays + // parameters. + if (node.getFName() == ripple::sfModifiedNode) + field = &ripple::sfPreviousFields; + else if (node.getFName() == ripple::sfCreatedNode) + field = &ripple::sfNewFields; + else if (node.getFName() == ripple::sfDeletedNode) + field = &ripple::sfFinalFields; + + if (field) + { + auto data = dynamic_cast( + node.peekAtPField(*field)); + + if (data && data->isFieldPresent(ripple::sfTakerPays) && + data->isFieldPresent(ripple::sfTakerGets)) + { + // determine the OrderBook + ripple::Book book{ + data->getFieldAmount(ripple::sfTakerGets).issue(), + data->getFieldAmount(ripple::sfTakerPays).issue()}; + if (alreadySent.find(book) == alreadySent.end()) + { + bookSubscribers_.publish(pubMsg, book); + alreadySent.insert(book); + } + } + } + } + } +} + +void +SubscriptionManager::pubBookChanges( + ripple::LedgerInfo const& lgrInfo, + std::vector const& transactions) +{ + auto const json = RPC::computeBookChanges(lgrInfo, transactions); + auto const bookChangesMsg = + std::make_shared(boost::json::serialize(json)); + bookChangesSubscribers_.publish(bookChangesMsg); +} + +void +SubscriptionManager::forwardProposedTransaction( + boost::json::object const& response) +{ + auto pubMsg = std::make_shared(boost::json::serialize(response)); + txProposedSubscribers_.publish(pubMsg); + + auto transaction = response.at("transaction").as_object(); + auto accounts = RPC::getAccountsFromTransaction(transaction); + + for (ripple::AccountID const& account : accounts) + accountProposedSubscribers_.publish(pubMsg, account); +} + +void +SubscriptionManager::forwardManifest(boost::json::object const& response) +{ + auto pubMsg = std::make_shared(boost::json::serialize(response)); + manifestSubscribers_.publish(pubMsg); +} + +void +SubscriptionManager::forwardValidation(boost::json::object const& response) +{ + auto pubMsg = std::make_shared(boost::json::serialize(response)); + validationsSubscribers_.publish(pubMsg); +} + +void +SubscriptionManager::subProposedAccount( + ripple::AccountID const& account, + std::shared_ptr session) +{ + subscribeHelper( + session, + account, + accountProposedSubscribers_, + [this, account](session_ptr session) { + unsubProposedAccount(account, session); + }); +} + +void +SubscriptionManager::subManifest(std::shared_ptr session) +{ + subscribeHelper(session, manifestSubscribers_, [this](session_ptr session) { + unsubManifest(session); + }); +} + +void +SubscriptionManager::unsubManifest(std::shared_ptr session) +{ + manifestSubscribers_.unsubscribe(session); +} + +void +SubscriptionManager::subValidation(std::shared_ptr session) +{ + subscribeHelper( + session, validationsSubscribers_, [this](session_ptr session) { + unsubValidation(session); + }); +} + +void +SubscriptionManager::unsubValidation(std::shared_ptr session) +{ + validationsSubscribers_.unsubscribe(session); +} + +void +SubscriptionManager::unsubProposedAccount( + ripple::AccountID const& account, + std::shared_ptr session) +{ + accountProposedSubscribers_.unsubscribe(session, account); +} + +void +SubscriptionManager::subProposedTransactions(std::shared_ptr session) +{ + subscribeHelper( + session, txProposedSubscribers_, [this](session_ptr session) { + unsubProposedTransactions(session); + }); +} + +void +SubscriptionManager::unsubProposedTransactions(std::shared_ptr session) +{ + txProposedSubscribers_.unsubscribe(session); +} +void +SubscriptionManager::subscribeHelper( + std::shared_ptr& session, + Subscription& subs, + CleanupFunction&& func) +{ + subs.subscribe(session); + std::scoped_lock lk(cleanupMtx_); + cleanupFuncs_[session].push_back(std::move(func)); +} +template +void +SubscriptionManager::subscribeHelper( + std::shared_ptr& session, + Key const& k, + SubscriptionMap& subs, + CleanupFunction&& func) +{ + subs.subscribe(session, k); + std::scoped_lock lk(cleanupMtx_); + cleanupFuncs_[session].push_back(std::move(func)); +} + +void +SubscriptionManager::cleanup(std::shared_ptr session) +{ + std::scoped_lock lk(cleanupMtx_); + if (!cleanupFuncs_.contains(session)) + return; + + for (auto f : cleanupFuncs_[session]) + { + f(session); + } + + cleanupFuncs_.erase(session); +} diff --git a/src/subscriptions/SubscriptionManager.h b/src/subscriptions/SubscriptionManager.h new file mode 100644 index 00000000..70ef3d7b --- /dev/null +++ b/src/subscriptions/SubscriptionManager.h @@ -0,0 +1,405 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +#include + +class WsBase; + +class Subscription +{ + boost::asio::io_context::strand strand_; + std::unordered_set> subscribers_ = {}; + std::atomic_uint64_t subCount_ = 0; + +public: + Subscription() = delete; + Subscription(Subscription&) = delete; + Subscription(Subscription&&) = delete; + + explicit Subscription(boost::asio::io_context& ioc) : strand_(ioc) + { + } + + ~Subscription() = default; + + void + subscribe(std::shared_ptr const& session); + + void + unsubscribe(std::shared_ptr const& session); + + void + publish(std::shared_ptr const& message); + + std::uint64_t + count() const + { + return subCount_.load(); + } + + bool + empty() const + { + return count() == 0; + } +}; + +template +class SubscriptionMap +{ + using ptr = std::shared_ptr; + using subscribers = std::set; + + boost::asio::io_context::strand strand_; + std::unordered_map subscribers_ = {}; + std::atomic_uint64_t subCount_ = 0; + +public: + SubscriptionMap() = delete; + SubscriptionMap(SubscriptionMap&) = delete; + SubscriptionMap(SubscriptionMap&&) = delete; + + explicit SubscriptionMap(boost::asio::io_context& ioc) : strand_(ioc) + { + } + + ~SubscriptionMap() = default; + + void + subscribe(std::shared_ptr const& session, Key const& key); + + void + unsubscribe(std::shared_ptr const& session, Key const& key); + + void + publish(std::shared_ptr const& message, Key const& key); + + std::uint64_t + count() + { + return subCount_.load(); + } +}; + +template +inline void +sendToSubscribers( + std::shared_ptr const& message, + T& subscribers, + std::atomic_uint64_t& counter) +{ + for (auto it = subscribers.begin(); it != subscribers.end();) + { + auto& session = *it; + if (session->dead()) + { + it = subscribers.erase(it); + --counter; + } + else + { + session->send(message); + ++it; + } + } +} + +template +inline void +addSession( + std::shared_ptr session, + T& subscribers, + std::atomic_uint64_t& counter) +{ + if (!subscribers.contains(session)) + { + subscribers.insert(session); + ++counter; + } +} + +template +inline void +removeSession( + std::shared_ptr session, + T& subscribers, + std::atomic_uint64_t& counter) +{ + if (subscribers.contains(session)) + { + subscribers.erase(session); + --counter; + } +} + +template +void +SubscriptionMap::subscribe( + std::shared_ptr const& session, + Key const& account) +{ + boost::asio::post(strand_, [this, session, account]() { + addSession(session, subscribers_[account], subCount_); + }); +} + +template +void +SubscriptionMap::unsubscribe( + std::shared_ptr const& session, + Key const& account) +{ + boost::asio::post(strand_, [this, account, session]() { + if (!subscribers_.contains(account)) + return; + + if (!subscribers_[account].contains(session)) + return; + + --subCount_; + + subscribers_[account].erase(session); + + if (subscribers_[account].size() == 0) + { + subscribers_.erase(account); + } + }); +} + +template +void +SubscriptionMap::publish( + std::shared_ptr const& message, + Key const& account) +{ + boost::asio::post(strand_, [this, account, message]() { + if (!subscribers_.contains(account)) + return; + + sendToSubscribers(message, subscribers_[account], subCount_); + }); +} + +class SubscriptionManager +{ + using session_ptr = std::shared_ptr; + clio::Logger log_{"Subscriptions"}; + + std::vector workers_; + boost::asio::io_context ioc_; + std::optional work_; + + Subscription ledgerSubscribers_; + Subscription txSubscribers_; + Subscription txProposedSubscribers_; + Subscription manifestSubscribers_; + Subscription validationsSubscribers_; + Subscription bookChangesSubscribers_; + + SubscriptionMap accountSubscribers_; + SubscriptionMap accountProposedSubscribers_; + SubscriptionMap bookSubscribers_; + + std::shared_ptr backend_; + +public: + static std::shared_ptr + make_SubscriptionManager( + clio::Config const& config, + std::shared_ptr const& b) + { + auto numThreads = config.valueOr("subscription_workers", 1); + return std::make_shared(numThreads, b); + } + + SubscriptionManager( + std::uint64_t numThreads, + std::shared_ptr const& b) + : ledgerSubscribers_(ioc_) + , txSubscribers_(ioc_) + , txProposedSubscribers_(ioc_) + , manifestSubscribers_(ioc_) + , validationsSubscribers_(ioc_) + , bookChangesSubscribers_(ioc_) + , accountSubscribers_(ioc_) + , accountProposedSubscribers_(ioc_) + , bookSubscribers_(ioc_) + , backend_(b) + { + work_.emplace(ioc_); + + // We will eventually want to clamp this to be the number of strands, + // since adding more threads than we have strands won't see any + // performance benefits + log_.info() << "Starting subscription manager with " << numThreads + << " workers"; + + workers_.reserve(numThreads); + for (auto i = numThreads; i > 0; --i) + workers_.emplace_back([this] { ioc_.run(); }); + } + + ~SubscriptionManager() + { + work_.reset(); + + ioc_.stop(); + for (auto& worker : workers_) + worker.join(); + } + + boost::json::object + subLedger(boost::asio::yield_context& yield, session_ptr session); + + void + pubLedger( + ripple::LedgerInfo const& lgrInfo, + ripple::Fees const& fees, + std::string const& ledgerRange, + std::uint32_t txnCount); + + void + pubBookChanges( + ripple::LedgerInfo const& lgrInfo, + std::vector const& transactions); + + void + unsubLedger(session_ptr session); + + void + subTransactions(session_ptr session); + + void + unsubTransactions(session_ptr session); + + void + pubTransaction( + Backend::TransactionAndMetadata const& blobs, + ripple::LedgerInfo const& lgrInfo); + + void + subAccount(ripple::AccountID const& account, session_ptr& session); + + void + unsubAccount(ripple::AccountID const& account, session_ptr& session); + + void + subBook(ripple::Book const& book, session_ptr session); + + void + unsubBook(ripple::Book const& book, session_ptr session); + + void + subBookChanges(std::shared_ptr session); + + void + unsubBookChanges(std::shared_ptr session); + + void + subManifest(session_ptr session); + + void + unsubManifest(session_ptr session); + + void + subValidation(session_ptr session); + + void + unsubValidation(session_ptr session); + + void + forwardProposedTransaction(boost::json::object const& response); + + void + forwardManifest(boost::json::object const& response); + + void + forwardValidation(boost::json::object const& response); + + void + subProposedAccount(ripple::AccountID const& account, session_ptr session); + + void + unsubProposedAccount(ripple::AccountID const& account, session_ptr session); + + void + subProposedTransactions(session_ptr session); + + void + unsubProposedTransactions(session_ptr session); + + void + cleanup(session_ptr session); + + boost::json::object + report() + { + boost::json::object counts = {}; + + counts["ledger"] = ledgerSubscribers_.count(); + counts["transactions"] = txSubscribers_.count(); + counts["transactions_proposed"] = txProposedSubscribers_.count(); + counts["manifests"] = manifestSubscribers_.count(); + counts["validations"] = validationsSubscribers_.count(); + counts["account"] = accountSubscribers_.count(); + counts["accounts_proposed"] = accountProposedSubscribers_.count(); + counts["books"] = bookSubscribers_.count(); + counts["book_changes"] = bookChangesSubscribers_.count(); + + return counts; + } + +private: + void + sendAll(std::string const& pubMsg, std::unordered_set& subs); + + using CleanupFunction = std::function; + + void + subscribeHelper( + std::shared_ptr& session, + Subscription& subs, + CleanupFunction&& func); + + template + void + subscribeHelper( + std::shared_ptr& session, + Key const& k, + SubscriptionMap& subs, + CleanupFunction&& func); + + /** + * This is how we chose to cleanup subscriptions that have been closed. + * Each time we add a subscriber, we add the opposite lambda that + * unsubscribes that subscriber when cleanup is called with the session that + * closed. + */ + std::mutex cleanupMtx_; + std::unordered_map> + cleanupFuncs_ = {}; +}; diff --git a/src/util/Expected.h b/src/util/Expected.h new file mode 100644 index 00000000..09f707ae --- /dev/null +++ b/src/util/Expected.h @@ -0,0 +1,264 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +/* + * NOTE: + * + * This entire file is taken from rippled and modified slightly to fit this + * codebase as well as fixing the original issue that made this necessary. + * + * The reason is that currently there is no easy way to injest the fix that is + * required to make this implementation correctly work with boost::json::value. + * Since this will be replaced by `std::expected` as soon as possible there is + * not much harm done in doing it this way. + */ + +#pragma once + +#include +#include +#include +#include + +namespace util { + +/** Expected is an approximation of std::expected (hoped for in C++23) + + See: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p0323r10.html + + The implementation is entirely based on boost::outcome_v2::result. +*/ + +// Exception thrown by an invalid access to Expected. +struct bad_expected_access : public std::runtime_error +{ + bad_expected_access() : runtime_error("bad expected access") + { + } +}; + +namespace detail { + +// Custom policy for Expected. Always throw on an invalid access. +struct throw_policy : public boost::outcome_v2::policy::base +{ + template + static constexpr void + wide_value_check(Impl&& self) + { + if (!base::_has_value(std::forward(self))) + ripple::Throw(); + } + + template + static constexpr void + wide_error_check(Impl&& self) + { + if (!base::_has_error(std::forward(self))) + ripple::Throw(); + } + + template + static constexpr void + wide_exception_check(Impl&& self) + { + if (!base::_has_exception(std::forward(self))) + ripple::Throw(); + } +}; + +} // namespace detail + +// Definition of Unexpected, which is used to construct the unexpected +// return type of an Expected. +template +class Unexpected +{ +public: + static_assert(!std::is_same::value, "E must not be void"); + + Unexpected() = delete; + + constexpr explicit Unexpected(E const& e) : val_(e) + { + } + + constexpr explicit Unexpected(E&& e) : val_(std::move(e)) + { + } + + constexpr const E& + value() const& + { + return val_; + } + + constexpr E& + value() & + { + return val_; + } + + constexpr E&& + value() && + { + return std::move(val_); + } + + constexpr const E&& + value() const&& + { + return std::move(val_); + } + +private: + E val_; +}; + +// Unexpected deduction guide that converts array to const*. +template +Unexpected(E (&)[N]) -> Unexpected; + +// Definition of Expected. All of the machinery comes from boost::result. +template +class [[nodiscard]] Expected + : private boost::outcome_v2::result +{ + using Base = boost::outcome_v2::result; + +public: + template < + typename U, + typename = std::enable_if_t>> + constexpr Expected(U r) : Base(T(std::forward(r))) + { + } + + template < + typename U, + typename = std::enable_if_t>> + constexpr Expected(Unexpected e) : Base(E(std::forward(e.value()))) + { + } + + constexpr bool + has_value() const + { + return Base::has_value(); + } + + constexpr T const& + value() const + { + return Base::value(); + } + + constexpr T& + value() + { + return Base::value(); + } + + constexpr E const& + error() const + { + return Base::error(); + } + + constexpr E& + error() + { + return Base::error(); + } + + constexpr explicit operator bool() const + { + return has_value(); + } + + // Add operator* and operator-> so the Expected API looks a bit more like + // what std::expected is likely to look like. See: + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p0323r10.html + [[nodiscard]] constexpr T& + operator*() + { + return this->value(); + } + + [[nodiscard]] constexpr T const& + operator*() const + { + return this->value(); + } + + [[nodiscard]] constexpr T* + operator->() + { + return &this->value(); + } + + [[nodiscard]] constexpr T const* + operator->() const + { + return &this->value(); + } +}; + +// Specialization of Expected. Allows returning either success +// (without a value) or the reason for the failure. +template +class [[nodiscard]] Expected + : private boost::outcome_v2::result +{ + using Base = boost::outcome_v2::result; + +public: + // The default constructor makes a successful Expected. + // This aligns with std::expected behavior proposed in P0323R10. + constexpr Expected() : Base(boost::outcome_v2::success()) + { + } + + template < + typename U, + typename = std::enable_if_t>> + constexpr Expected(Unexpected e) : Base(E(std::forward(e.value()))) + { + } + + constexpr E const& + error() const + { + return Base::error(); + } + + constexpr E& + error() + { + return Base::error(); + } + + constexpr explicit operator bool() const + { + return Base::has_value(); + } +}; + +} // namespace util diff --git a/src/util/Profiler.h b/src/util/Profiler.h new file mode 100644 index 00000000..ff89f5bb --- /dev/null +++ b/src/util/Profiler.h @@ -0,0 +1,59 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +namespace util { + +/** + * @brief Profiler function to measure the time consuming + * @param func function object, can be a lamdba or function wrapper + * @return return a pair if function wrapper has return value: result of + * function wrapper and the elapsed time(ms) during executing the given + * function only return the elapsed time if function wrapper does not have + * return value + */ +template +[[nodiscard]] auto +timed(F&& func) +{ + auto start = std::chrono::system_clock::now(); + + if constexpr (std::is_same_v) + { + func(); + return std::chrono::duration_cast( + std::chrono::system_clock::now() - start) + .count(); + } + else + { + auto ret = func(); + auto elapsed = std::chrono::duration_cast( + std::chrono::system_clock::now() - start) + .count(); + return std::make_pair(ret, elapsed); + } +} + +} // namespace util diff --git a/src/util/Taggable.cpp b/src/util/Taggable.cpp new file mode 100644 index 00000000..cb136367 --- /dev/null +++ b/src/util/Taggable.cpp @@ -0,0 +1,76 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include + +#include +#include +#include + +namespace util::detail { + +UIntTagGenerator::tag_t +UIntTagGenerator::next() +{ + static std::atomic_uint64_t num{0}; + return num++; +} + +UUIDTagGenerator::tag_t +UUIDTagGenerator::next() +{ + static boost::uuids::random_generator gen{}; + static std::mutex mtx{}; + + std::lock_guard lk(mtx); + return gen(); +} + +} // namespace util::detail + +namespace util { + +std::unique_ptr +TagDecoratorFactory::make() const +{ + switch (type_) + { + case Type::UINT: + return std::make_unique>( + parent_); + case Type::UUID: + return std::make_unique>( + parent_); + case Type::NONE: + default: + return std::make_unique>(); + } +} + +TagDecoratorFactory +TagDecoratorFactory::with(parent_t parent) const noexcept +{ + return TagDecoratorFactory(type_, parent); +} + +} // namespace util diff --git a/src/util/Taggable.h b/src/util/Taggable.h new file mode 100644 index 00000000..7c899a74 --- /dev/null +++ b/src/util/Taggable.h @@ -0,0 +1,279 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace util { +namespace detail { + +/** + * @brief A `null` tag generator - does nothing. + */ +struct NullTagGenerator final +{ +}; + +/** + * @brief This strategy uses an `atomic_uint64_t` to remain lock free. + */ +struct UIntTagGenerator final +{ + using tag_t = std::atomic_uint64_t; + + static tag_t + next(); +}; + +/** + * @brief This strategy uses `boost::uuids::uuid` with a static random generator + * and a mutex + */ +struct UUIDTagGenerator final +{ + using tag_t = boost::uuids::uuid; + + static tag_t + next(); +}; + +} // namespace detail + +/** + * @brief Represents any tag decorator + */ +class BaseTagDecorator +{ +public: + virtual ~BaseTagDecorator() = default; + + /** + * @brief Decorates a std::ostream. + * @param os The stream to decorate + */ + virtual void + decorate(std::ostream& os) const = 0; + + /** + * @brief Support for decorating streams (boost log, cout, etc.). + * + * @param os The stream + * @param decorator The decorator + * @return std::ostream& The same stream that we were given + */ + friend std::ostream& + operator<<(std::ostream& os, BaseTagDecorator const& decorator) + { + decorator.decorate(os); + return os; + } +}; + +/** + * @brief A decorator that decorates a string (log line) with a unique tag. + * @tparam Generator The strategy used to generate the tag. + */ +template +class TagDecorator final : public BaseTagDecorator +{ + using parent_t = + std::optional>; + using tag_t = typename Generator::tag_t; + + parent_t parent_ = std::nullopt; + tag_t tag_ = Generator::next(); + +public: + /** + * @brief Create a new tag decorator with an optional parent + * + * If the `parent` is specified it will be streamed out as a chain when this + * decorator will decorate an ostream. + * + * Note that if `parent` is specified it is your responsibility that the + * decorator referred to by `parent` outlives this decorator. + * + * @param parent An optional parent tag decorator + */ + explicit TagDecorator(parent_t parent = std::nullopt) : parent_{parent} + { + } + + /** + * @brief Implementation of the decoration. Chaining tags when parent is + * available. + * @param os The stream to output into + */ + void + decorate(std::ostream& os) const override + { + os << "["; + + if (parent_.has_value()) + (*parent_).get().decorate(os); + + os << tag_ << "] "; + } +}; + +/** + * @brief Specialization for a nop/null decorator. + * + * This generates a pass-thru decorate member function which can be optimized + * away by the compiler. + */ +template <> +class TagDecorator final : public BaseTagDecorator +{ +public: + /** + * @brief Nop implementation for the decorator. + * @param os The stream + */ + void + decorate([[maybe_unused]] std::ostream& os) const override + { + // nop + } +}; + +/** + * @brief A factory for TagDecorator instantiation. + */ +class TagDecoratorFactory final +{ + using parent_t = + std::optional>; + + /** + * @brief Represents the type of tag decorator + */ + enum class Type { + NONE, /*! No decoration and no tag */ + UUID, /*! Tag based on `boost::uuids::uuid`, thread-safe via mutex */ + UINT /*! atomic_uint64_t tag, thread-safe, lock-free */ + }; + + Type type_; /*! The type of TagDecorator this factory produces */ + parent_t parent_ = std::nullopt; /*! The parent tag decorator to bind */ + +public: + ~TagDecoratorFactory() = default; + + /** + * @brief Instantiates a tag decorator factory from `clio` configuration. + * @param config The configuration as a json object + */ + explicit TagDecoratorFactory(clio::Config const& config) + : type_{config.valueOr("log_tag_style", Type::NONE)} + { + } + +private: + TagDecoratorFactory(Type type, parent_t parent) noexcept + : type_{type}, parent_{parent} + { + } + +public: + /** + * @brief Instantiates the TagDecorator specified by `type_` with parent + * bound from `parent_`. + * + * @return std::unique_ptr An instance of the requested + * decorator + */ + std::unique_ptr + make() const; + + /** + * @brief Creates a new tag decorator factory with a bound parent tag + * decorator. + * + * @param parent The parent tag decorator to use + * @return TagDecoratorFactory A new instance of the tag decorator factory + */ + TagDecoratorFactory + with(parent_t parent) const noexcept; + +private: + friend Type + tag_invoke(boost::json::value_to_tag, boost::json::value const& value) + { + if (not value.is_string()) + throw std::runtime_error("`log_tag_style` must be a string"); + auto const& style = value.as_string(); + + if (boost::iequals(style, "int") || boost::iequals(style, "uint")) + return TagDecoratorFactory::Type::UINT; + else if (boost::iequals(style, "null") || boost::iequals(style, "none")) + return TagDecoratorFactory::Type::NONE; + else if (boost::iequals(style, "uuid")) + return TagDecoratorFactory::Type::UUID; + else + throw std::runtime_error( + "Could not parse `log_tag_style`: expected `uint`, `uuid` or " + "`null`"); + } +}; + +/** + * @brief A base class that allows attaching a tag decorator to a subclass. + */ +class Taggable +{ + using decorator_t = std::unique_ptr; + decorator_t tagDecorator_; + +protected: + /** + * @brief New Taggable from a specified factory + * @param tagFactory The factory to use + */ + explicit Taggable(util::TagDecoratorFactory const& tagFactory) + : tagDecorator_{tagFactory.make()} + { + } + +public: + virtual ~Taggable() = default; + + /** + * @brief Getter for tag decorator. + * @return util::BaseTagDecorator const& Reference to the tag decorator + */ + util::BaseTagDecorator const& + tag() const + { + return *tagDecorator_; + } +}; + +} // namespace util diff --git a/src/webserver/DOSGuard.h b/src/webserver/DOSGuard.h new file mode 100644 index 00000000..502b3fb4 --- /dev/null +++ b/src/webserver/DOSGuard.h @@ -0,0 +1,317 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include + +namespace clio { + +class BaseDOSGuard +{ +public: + virtual ~BaseDOSGuard() = default; + + virtual void + clear() noexcept = 0; +}; + +/** + * @brief A simple denial of service guard used for rate limiting. + * + * @tparam SweepHandler Type of the sweep handler + */ +template +class BasicDOSGuard : public BaseDOSGuard +{ + // Accumulated state per IP, state will be reset accordingly + struct ClientState + { + // accumulated transfered byte + std::uint32_t transferedByte = 0; + // accumulated served requests count + std::uint32_t requestsCount = 0; + }; + + mutable std::mutex mtx_; + // accumulated states map + std::unordered_map ipState_; + std::unordered_map ipConnCount_; + std::unordered_set const whitelist_; + + std::uint32_t const maxFetches_; + std::uint32_t const maxConnCount_; + std::uint32_t const maxRequestCount_; + clio::Logger log_{"RPC"}; + +public: + /** + * @brief Constructs a new DOS guard. + * + * @param config Clio config + * @param sweepHandler Sweep handler that implements the sweeping behaviour + */ + BasicDOSGuard(clio::Config const& config, SweepHandler& sweepHandler) + : whitelist_{getWhitelist(config)} + , maxFetches_{config.valueOr("dos_guard.max_fetches", 1000000u)} + , maxConnCount_{config.valueOr("dos_guard.max_connections", 20u)} + , maxRequestCount_{config.valueOr("dos_guard.max_requests", 20u)} + { + sweepHandler.setup(this); + } + + /** + * @brief Check whether an ip address is in the whitelist or not + * + * @param ip The ip address to check + * @return true + * @return false + */ + [[nodiscard]] bool + isWhiteListed(std::string const& ip) const noexcept + { + return whitelist_.contains(ip); + } + + /** + * @brief Check whether an ip address is currently rate limited or not + * + * @param ip The ip address to check + * @return true If not rate limited + * @return false If rate limited and the request should not be processed + */ + [[nodiscard]] bool + isOk(std::string const& ip) const noexcept + { + if (whitelist_.contains(ip)) + return true; + + { + std::scoped_lock lck(mtx_); + if (ipState_.find(ip) != ipState_.end()) + { + auto [transferedByte, requests] = ipState_.at(ip); + if (transferedByte > maxFetches_ || requests > maxRequestCount_) + { + log_.warn() + << "Dosguard:Client surpassed the rate limit. ip = " + << ip << " Transfered Byte:" << transferedByte + << " Requests:" << requests; + return false; + } + } + auto it = ipConnCount_.find(ip); + if (it != ipConnCount_.end()) + { + if (it->second > maxConnCount_) + { + log_.warn() + << "Dosguard:Client surpassed the rate limit. ip = " + << ip << " Concurrent connection:" << it->second; + return false; + } + } + } + return true; + } + + /** + * @brief Increment connection count for the given ip address + * + * @param ip + */ + void + increment(std::string const& ip) noexcept + { + if (whitelist_.contains(ip)) + return; + std::scoped_lock lck{mtx_}; + ipConnCount_[ip]++; + } + + /** + * @brief Decrement connection count for the given ip address + * + * @param ip + */ + void + decrement(std::string const& ip) noexcept + { + if (whitelist_.contains(ip)) + return; + std::scoped_lock lck{mtx_}; + assert(ipConnCount_[ip] > 0); + ipConnCount_[ip]--; + if (ipConnCount_[ip] == 0) + ipConnCount_.erase(ip); + } + + /** + * @brief Adds numObjects of usage for the given ip address. + * + * If the total sums up to a value equal or larger than maxFetches_ + * the operation is no longer allowed and false is returned; true is + * returned otherwise. + * + * @param ip + * @param numObjects + * @return true + * @return false + */ + [[maybe_unused]] bool + add(std::string const& ip, uint32_t numObjects) noexcept + { + if (whitelist_.contains(ip)) + return true; + + { + std::scoped_lock lck(mtx_); + ipState_[ip].transferedByte += numObjects; + } + + return isOk(ip); + } + + /** + * @brief Adds one request for the given ip address. + * + * If the total sums up to a value equal or larger than maxRequestCount_ + * the operation is no longer allowed and false is returned; true is + * returned otherwise. + * + * @param ip + * @return true + * @return false + */ + [[maybe_unused]] bool + request(std::string const& ip) noexcept + { + if (whitelist_.contains(ip)) + return true; + + { + std::scoped_lock lck(mtx_); + ipState_[ip].requestsCount++; + } + + return isOk(ip); + } + + /** + * @brief Instantly clears all fetch counters added by @see add(std::string + * const&, uint32_t) + */ + void + clear() noexcept override + { + std::scoped_lock lck(mtx_); + ipState_.clear(); + } + +private: + [[nodiscard]] std::unordered_set const + getWhitelist(clio::Config const& config) const + { + using T = std::unordered_set const; + auto whitelist = config.arrayOr("dos_guard.whitelist", {}); + auto const transform = [](auto const& elem) { + return elem.template value(); + }; + return T{ + boost::transform_iterator(std::begin(whitelist), transform), + boost::transform_iterator(std::end(whitelist), transform)}; + } +}; + +/** + * @brief Sweep handler using a steady_timer and boost::asio::io_context. + */ +class IntervalSweepHandler +{ + std::chrono::milliseconds sweepInterval_; + std::reference_wrapper ctx_; + BaseDOSGuard* dosGuard_ = nullptr; + + boost::asio::steady_timer timer_{ctx_.get()}; + +public: + /** + * @brief Construct a new interval-based sweep handler + * + * @param config Clio config + * @param ctx The boost::asio::io_context + */ + IntervalSweepHandler( + clio::Config const& config, + boost::asio::io_context& ctx) + : sweepInterval_{std::max( + 1u, + static_cast( + config.valueOr("dos_guard.sweep_interval", 1.0) * 1000.0))} + , ctx_{std::ref(ctx)} + { + } + + ~IntervalSweepHandler() + { + timer_.cancel(); + } + + /** + * @brief This setup member function is called by @ref BasicDOSGuard during + * its initialization. + * + * @param guard Pointer to the dos guard + */ + void + setup(BaseDOSGuard* guard) + { + assert(dosGuard_ == nullptr); + dosGuard_ = guard; + assert(dosGuard_ != nullptr); + + createTimer(); + } + +private: + void + createTimer() + { + timer_.expires_after(sweepInterval_); + timer_.async_wait([this](boost::system::error_code const& error) { + if (error == boost::asio::error::operation_aborted) + return; + + dosGuard_->clear(); + createTimer(); + }); + } +}; + +using DOSGuard = BasicDOSGuard; + +} // namespace clio diff --git a/src/webserver/HttpBase.h b/src/webserver/HttpBase.h new file mode 100644 index 00000000..33172ed4 --- /dev/null +++ b/src/webserver/HttpBase.h @@ -0,0 +1,517 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include
+#include +#include +#include +#include +#include +#include +#include + +// TODO: consider removing those - visible to anyone including this header +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +using tcp = boost::asio::ip::tcp; + +static std::string defaultResponse = + "" + " Test page for reporting mode

" + " Test

This page shows xrpl reporting http(s) " + "connectivity is working.

"; + +// From Boost Beast examples http_server_flex.cpp +template +class HttpBase : public util::Taggable +{ + // Access the derived class, this is part of + // the Curiously Recurring Template Pattern idiom. + Derived& + derived() + { + return static_cast(*this); + } + + struct send_lambda + { + HttpBase& self_; + + explicit send_lambda(HttpBase& self) : self_(self) + { + } + + template + void + operator()(http::message&& msg) const + { + if (self_.dead()) + return; + + // The lifetime of the message has to extend + // for the duration of the async operation so + // we use a shared_ptr to manage it. + auto sp = std::make_shared>( + std::move(msg)); + + // Store a type-erased version of the shared + // pointer in the class to keep it alive. + self_.res_ = sp; + + // Write the response + http::async_write( + self_.derived().stream(), + *sp, + boost::beast::bind_front_handler( + &HttpBase::on_write, + self_.derived().shared_from_this(), + sp->need_eof())); + } + }; + + boost::system::error_code ec_; + boost::asio::io_context& ioc_; + http::request req_; + std::shared_ptr res_; + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory const& tagFactory_; + clio::DOSGuard& dosGuard_; + RPC::Counters& counters_; + WorkQueue& workQueue_; + send_lambda lambda_; + +protected: + clio::Logger log_{"WebServer"}; + clio::Logger perfLog_{"Performance"}; + boost::beast::flat_buffer buffer_; + bool upgraded_ = false; + + bool + dead() + { + return ec_ != boost::system::error_code{}; + } + + inline void + httpFail(boost::beast::error_code ec, char const* what) + { + // ssl::error::stream_truncated, also known as an SSL "short read", + // indicates the peer closed the connection without performing the + // required closing handshake (for example, Google does this to + // improve performance). Generally this can be a security issue, + // but if your communication protocol is self-terminated (as + // it is with both HTTP and WebSocket) then you may simply + // ignore the lack of close_notify. + // + // https://github.com/boostorg/beast/issues/38 + // + // https://security.stackexchange.com/questions/91435/how-to-handle-a-malicious-ssl-tls-shutdown + // + // When a short read would cut off the end of an HTTP message, + // Beast returns the error boost::beast::http::error::partial_message. + // Therefore, if we see a short read here, it has occurred + // after the message has been completed, so it is safe to ignore it. + + if (ec == net::ssl::error::stream_truncated) + return; + + if (!ec_ && ec != boost::asio::error::operation_aborted) + { + ec_ = ec; + perfLog_.info() << tag() << ": " << what << ": " << ec.message(); + boost::beast::get_lowest_layer(derived().stream()) + .socket() + .close(ec); + } + } + +public: + HttpBase( + boost::asio::io_context& ioc, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer buffer) + : Taggable(tagFactory) + , ioc_(ioc) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , workQueue_(queue) + , lambda_(*this) + , buffer_(std::move(buffer)) + { + perfLog_.debug() << tag() << "http session created"; + } + + virtual ~HttpBase() + { + perfLog_.debug() << tag() << "http session closed"; + } + + clio::DOSGuard& + dosGuard() + { + return dosGuard_; + } + + void + do_read() + { + if (dead()) + return; + // Make the request empty before reading, + // otherwise the operation behavior is undefined. + req_ = {}; + + // Set the timeout. + boost::beast::get_lowest_layer(derived().stream()) + .expires_after(std::chrono::seconds(30)); + + // Read a request + http::async_read( + derived().stream(), + buffer_, + req_, + boost::beast::bind_front_handler( + &HttpBase::on_read, derived().shared_from_this())); + } + + void + on_read(boost::beast::error_code ec, std::size_t bytes_transferred) + { + boost::ignore_unused(bytes_transferred); + + // This means they closed the connection + if (ec == http::error::end_of_stream) + return derived().do_close(); + + if (ec) + return httpFail(ec, "read"); + + auto ip = derived().ip(); + + if (!ip) + { + return; + } + + auto const httpResponse = [&](http::status status, + std::string content_type, + std::string message) { + http::response res{status, req_.version()}; + res.set( + http::field::server, + "clio-server-" + Build::getClioVersionString()); + res.set(http::field::content_type, content_type); + res.keep_alive(req_.keep_alive()); + res.body() = std::string(message); + res.prepare_payload(); + return res; + }; + + if (boost::beast::websocket::is_upgrade(req_)) + { + upgraded_ = true; + // Disable the timeout. + // The websocket::stream uses its own timeout settings. + boost::beast::get_lowest_layer(derived().stream()).expires_never(); + return make_websocket_session( + ioc_, + derived().release_stream(), + derived().ip(), + std::move(req_), + std::move(buffer_), + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + workQueue_); + } + + // to avoid overwhelm work queue, the request limit check should be + // before posting to queue the web socket creation will be guarded via + // connection limit + if (!dosGuard_.request(ip.value())) + { + return lambda_(httpResponse( + http::status::service_unavailable, + "text/plain", + "Server is overloaded")); + } + + log_.info() << tag() << "Received request from ip = " << *ip + << " - posting to WorkQueue"; + + auto session = derived().shared_from_this(); + + // Requests are handed using coroutines. Here we spawn a coroutine + // which will asynchronously handle a request. + if (!workQueue_.postCoro( + [this, ip, session](boost::asio::yield_context yield) { + handle_request( + yield, + std::move(req_), + lambda_, + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + *ip, + session, + perfLog_); + }, + dosGuard_.isWhiteListed(*ip))) + { + // Non-whitelist connection rejected due to full connection + // queue + lambda_(httpResponse( + http::status::ok, + "application/json", + boost::json::serialize( + RPC::makeError(RPC::RippledError::rpcTOO_BUSY)))); + } + } + + void + on_write( + bool close, + boost::beast::error_code ec, + std::size_t bytes_transferred) + { + boost::ignore_unused(bytes_transferred); + + if (ec) + return httpFail(ec, "write"); + + if (close) + { + // This means we should close the connection, usually because + // the response indicated the "Connection: close" semantic. + return derived().do_close(); + } + + // We're done with the response so delete it + res_ = nullptr; + + // Read another request + do_read(); + } +}; + +// This function produces an HTTP response for the given +// request. The type of the response object depends on the +// contents of the request, so the interface requires the +// caller to pass a generic lambda for receiving the response. +template +void +handle_request( + boost::asio::yield_context& yc, + boost::beast::http:: + request>&& req, + Send&& send, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + std::string const& ip, + std::shared_ptr http, + clio::Logger& perfLog) +{ + auto const httpResponse = [&req]( + http::status status, + std::string content_type, + std::string message) { + http::response res{status, req.version()}; + res.set( + http::field::server, + "clio-server-" + Build::getClioVersionString()); + res.set(http::field::content_type, content_type); + res.keep_alive(req.keep_alive()); + res.body() = std::string(message); + res.prepare_payload(); + return res; + }; + + if (req.method() == http::verb::get && req.body() == "") + { + send(httpResponse(http::status::ok, "text/html", defaultResponse)); + return; + } + + if (req.method() != http::verb::post) + return send(httpResponse( + http::status::bad_request, "text/html", "Expected a POST request")); + + try + { + perfLog.debug() << http->tag() + << "http received request from work queue: " + << req.body(); + + boost::json::object request; + std::string responseStr = ""; + try + { + request = boost::json::parse(req.body()).as_object(); + + if (!request.contains("params")) + request["params"] = boost::json::array({boost::json::object{}}); + } + catch (std::runtime_error const& e) + { + return send(httpResponse( + http::status::ok, + "application/json", + boost::json::serialize( + RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); + } + + auto range = backend->fetchLedgerRange(); + if (!range) + return send(httpResponse( + http::status::ok, + "application/json", + boost::json::serialize( + RPC::makeError(RPC::RippledError::rpcNOT_READY)))); + + std::optional context = RPC::make_HttpContext( + yc, + request, + backend, + subscriptions, + balancer, + etl, + tagFactory.with(std::cref(http->tag())), + *range, + counters, + ip); + + if (!context) + return send(httpResponse( + http::status::ok, + "application/json", + boost::json::serialize( + RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); + + boost::json::object response; + auto [v, timeDiff] = + util::timed([&]() { return RPC::buildResponse(*context); }); + + auto us = std::chrono::duration(timeDiff); + RPC::logDuration(*context, us); + + if (auto status = std::get_if(&v)) + { + counters.rpcErrored(context->method); + auto error = RPC::makeError(*status); + error["request"] = request; + response["result"] = error; + + perfLog.debug() + << http->tag() << "Encountered error: " << responseStr; + } + else + { + // This can still technically be an error. Clio counts forwarded + // requests as successful. + + counters.rpcComplete(context->method, us); + + auto result = std::get(v); + if (result.contains("result") && result.at("result").is_object()) + result = result.at("result").as_object(); + + if (!result.contains("error")) + result["status"] = "success"; + + response["result"] = result; + } + + boost::json::array warnings; + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_CLIO)); + auto lastCloseAge = context->etl->lastCloseAgeSeconds(); + if (lastCloseAge >= 60) + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_OUTDATED)); + response["warnings"] = warnings; + responseStr = boost::json::serialize(response); + if (!dosGuard.add(ip, responseStr.size())) + { + response["warning"] = "load"; + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_RATE_LIMIT)); + response["warnings"] = warnings; + // reserialize when we need to include this warning + responseStr = boost::json::serialize(response); + } + return send( + httpResponse(http::status::ok, "application/json", responseStr)); + } + catch (std::exception const& e) + { + perfLog.error() << http->tag() << "Caught exception : " << e.what(); + return send(httpResponse( + http::status::internal_server_error, + "application/json", + boost::json::serialize( + RPC::makeError(RPC::RippledError::rpcINTERNAL)))); + } +} diff --git a/src/webserver/HttpSession.h b/src/webserver/HttpSession.h new file mode 100644 index 00000000..7c0eac66 --- /dev/null +++ b/src/webserver/HttpSession.h @@ -0,0 +1,120 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +using tcp = boost::asio::ip::tcp; + +// Handles an HTTP server connection +class HttpSession : public HttpBase, + public std::enable_shared_from_this +{ + boost::beast::tcp_stream stream_; + std::optional ip_; + +public: + // Take ownership of the socket + explicit HttpSession( + boost::asio::io_context& ioc, + tcp::socket&& socket, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer buffer) + : HttpBase( + ioc, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(buffer)) + , stream_(std::move(socket)) + { + try + { + ip_ = stream_.socket().remote_endpoint().address().to_string(); + } + catch (std::exception const&) + { + } + if (ip_) + HttpBase::dosGuard().increment(*ip_); + } + + ~HttpSession() + { + if (ip_ and not upgraded_) + HttpBase::dosGuard().decrement(*ip_); + } + + boost::beast::tcp_stream& + stream() + { + return stream_; + } + boost::beast::tcp_stream + release_stream() + { + return std::move(stream_); + } + + std::optional + ip() + { + return ip_; + } + + // Start the asynchronous operation + void + run() + { + // We need to be executing within a strand to perform async operations + // on the I/O objects in this HttpSession. Although not strictly + // necessary for single-threaded contexts, this example code is written + // to be thread-safe by default. + net::dispatch( + stream_.get_executor(), + boost::beast::bind_front_handler( + &HttpBase::do_read, shared_from_this())); + } + + void + do_close() + { + // Send a TCP shutdown + boost::beast::error_code ec; + stream_.socket().shutdown(tcp::socket::shutdown_send, ec); + + // At this point the connection is closed gracefully + } +}; diff --git a/src/webserver/Listener.h b/src/webserver/Listener.h new file mode 100644 index 00000000..200ce313 --- /dev/null +++ b/src/webserver/Listener.h @@ -0,0 +1,393 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +class SubscriptionManager; + +template +class Detector + : public std::enable_shared_from_this> +{ + using std::enable_shared_from_this< + Detector>::shared_from_this; + + clio::Logger log_{"WebServer"}; + boost::asio::io_context& ioc_; + boost::beast::tcp_stream stream_; + std::optional> ctx_; + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory const& tagFactory_; + clio::DOSGuard& dosGuard_; + RPC::Counters& counters_; + WorkQueue& queue_; + boost::beast::flat_buffer buffer_; + +public: + Detector( + boost::asio::io_context& ioc, + tcp::socket&& socket, + std::optional> ctx, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue) + : ioc_(ioc) + , stream_(std::move(socket)) + , ctx_(ctx) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + { + } + + inline void + fail(boost::system::error_code ec, char const* message) + { + if (ec == net::ssl::error::stream_truncated) + return; + + log_.info() << "Detector failed (" << message << "): " << ec.message(); + } + + // Launch the detector + void + run() + { + // Set the timeout. + boost::beast::get_lowest_layer(stream_).expires_after( + std::chrono::seconds(30)); + // Detect a TLS handshake + async_detect_ssl( + stream_, + buffer_, + boost::beast::bind_front_handler( + &Detector::on_detect, shared_from_this())); + } + + void + on_detect(boost::beast::error_code ec, bool result) + { + if (ec) + return fail(ec, "detect"); + + if (result) + { + if (!ctx_) + return fail(ec, "ssl not supported by this server"); + // Launch SSL session + std::make_shared( + ioc_, + stream_.release_socket(), + *ctx_, + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + queue_, + std::move(buffer_)) + ->run(); + return; + } + + // Launch plain session + std::make_shared( + ioc_, + stream_.release_socket(), + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + queue_, + std::move(buffer_)) + ->run(); + } +}; + +void +make_websocket_session( + boost::asio::io_context& ioc, + boost::beast::tcp_stream stream, + std::optional const& ip, + http::request req, + boost::beast::flat_buffer buffer, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue) +{ + std::make_shared( + ioc, + std::move(stream), + ip, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(buffer), + std::move(req)) + ->run(); +} + +void +make_websocket_session( + boost::asio::io_context& ioc, + boost::beast::ssl_stream stream, + std::optional const& ip, + http::request req, + boost::beast::flat_buffer buffer, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue) +{ + std::make_shared( + ioc, + std::move(stream), + ip, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(buffer), + std::move(req)) + ->run(); +} + +template +class Listener + : public std::enable_shared_from_this> +{ + using std::enable_shared_from_this< + Listener>::shared_from_this; + + clio::Logger log_{"WebServer"}; + boost::asio::io_context& ioc_; + std::optional> ctx_; + tcp::acceptor acceptor_; + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory tagFactory_; + clio::DOSGuard& dosGuard_; + WorkQueue queue_; + RPC::Counters counters_; + +public: + Listener( + boost::asio::io_context& ioc, + uint32_t numWorkerThreads, + uint32_t maxQueueSize, + std::optional> ctx, + tcp::endpoint endpoint, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory tagFactory, + clio::DOSGuard& dosGuard) + : ioc_(ioc) + , ctx_(ctx) + , acceptor_(net::make_strand(ioc)) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(std::move(tagFactory)) + , dosGuard_(dosGuard) + , queue_(numWorkerThreads, maxQueueSize) + , counters_(queue_) + { + boost::beast::error_code ec; + + // Open the acceptor + acceptor_.open(endpoint.protocol(), ec); + if (ec) + return; + + // Allow address reuse + acceptor_.set_option(net::socket_base::reuse_address(true), ec); + if (ec) + return; + + // Bind to the server address + acceptor_.bind(endpoint, ec); + if (ec) + { + log_.error() << "Failed to bind to endpoint: " << endpoint + << ". message: " << ec.message(); + throw std::runtime_error("Failed to bind to specified endpoint"); + } + + // Start listening for connections + acceptor_.listen(net::socket_base::max_listen_connections, ec); + if (ec) + { + log_.error() << "Failed to listen at endpoint: " << endpoint + << ". message: " << ec.message(); + throw std::runtime_error("Failed to listen at specified endpoint"); + } + } + + // Start accepting incoming connections + void + run() + { + do_accept(); + } + +private: + void + do_accept() + { + // The new connection gets its own strand + acceptor_.async_accept( + net::make_strand(ioc_), + boost::beast::bind_front_handler( + &Listener::on_accept, shared_from_this())); + } + + void + on_accept(boost::beast::error_code ec, tcp::socket socket) + { + if (!ec) + { + auto ctxRef = ctx_ + ? std::optional< + std::reference_wrapper>{ctx_.value()} + : std::nullopt; + // Create the detector session and run it + std::make_shared>( + ioc_, + std::move(socket), + ctxRef, + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + queue_) + ->run(); + } + + // Accept another connection + do_accept(); + } +}; + +namespace Server { + +using WebsocketServer = Listener; +using HttpServer = Listener; + +static std::shared_ptr +make_HttpServer( + clio::Config const& config, + boost::asio::io_context& ioc, + std::optional> sslCtx, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + clio::DOSGuard& dosGuard) +{ + static clio::Logger log{"WebServer"}; + if (!config.contains("server")) + return nullptr; + + auto const serverConfig = config.section("server"); + auto const address = + boost::asio::ip::make_address(serverConfig.value("ip")); + auto const port = serverConfig.value("port"); + auto const numThreads = config.valueOr( + "workers", std::thread::hardware_concurrency()); + auto const maxQueueSize = + serverConfig.valueOr("max_queue_size", 0); // 0 is no limit + + log.info() << "Number of workers = " << numThreads + << ". Max queue size = " << maxQueueSize; + + auto server = std::make_shared( + ioc, + numThreads, + maxQueueSize, + sslCtx, + boost::asio::ip::tcp::endpoint{address, port}, + backend, + subscriptions, + balancer, + etl, + util::TagDecoratorFactory(config), + dosGuard); + + server->run(); + return server; +} +} // namespace Server diff --git a/src/webserver/PlainWsSession.h b/src/webserver/PlainWsSession.h new file mode 100644 index 00000000..c2b299a2 --- /dev/null +++ b/src/webserver/PlainWsSession.h @@ -0,0 +1,226 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +namespace websocket = boost::beast::websocket; +using tcp = boost::asio::ip::tcp; + +class ReportingETL; + +// Echoes back all received WebSocket messages +class PlainWsSession : public WsSession +{ + websocket::stream ws_; + +public: + // Take ownership of the socket + explicit PlainWsSession( + boost::asio::io_context& ioc, + boost::asio::ip::tcp::socket&& socket, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& buffer) + : WsSession( + ioc, + ip, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(buffer)) + , ws_(std::move(socket)) + { + } + + websocket::stream& + ws() + { + return ws_; + } + + std::optional + ip() + { + return ip_; + } + + ~PlainWsSession() = default; +}; + +class WsUpgrader : public std::enable_shared_from_this +{ + boost::asio::io_context& ioc_; + boost::beast::tcp_stream http_; + boost::optional> parser_; + boost::beast::flat_buffer buffer_; + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory const& tagFactory_; + clio::DOSGuard& dosGuard_; + RPC::Counters& counters_; + WorkQueue& queue_; + http::request req_; + std::optional ip_; + +public: + WsUpgrader( + boost::asio::io_context& ioc, + boost::asio::ip::tcp::socket&& socket, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& b) + : ioc_(ioc) + , http_(std::move(socket)) + , buffer_(std::move(b)) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + , ip_(ip) + { + } + WsUpgrader( + boost::asio::io_context& ioc, + boost::beast::tcp_stream&& stream, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& b, + http::request req) + : ioc_(ioc) + , http_(std::move(stream)) + , buffer_(std::move(b)) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + , req_(std::move(req)) + , ip_(ip) + { + } + + void + run() + { + // We need to be executing within a strand to perform async operations + // on the I/O objects in this session. Although not strictly necessary + // for single-threaded contexts, this example code is written to be + // thread-safe by default. + + net::dispatch( + http_.get_executor(), + boost::beast::bind_front_handler( + &WsUpgrader::do_upgrade, shared_from_this())); + } + +private: + void + do_upgrade() + { + parser_.emplace(); + + // Apply a reasonable limit to the allowed size + // of the body in bytes to prevent abuse. + parser_->body_limit(10000); + + // Set the timeout. + boost::beast::get_lowest_layer(http_).expires_after( + std::chrono::seconds(30)); + + on_upgrade(); + } + + void + on_upgrade() + { + // See if it is a WebSocket Upgrade + if (!websocket::is_upgrade(req_)) + return; + + // Disable the timeout. + // The websocket::stream uses its own timeout settings. + boost::beast::get_lowest_layer(http_).expires_never(); + + std::make_shared( + ioc_, + http_.release_socket(), + ip_, + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + queue_, + std::move(buffer_)) + ->run(std::move(req_)); + } +}; diff --git a/src/webserver/README.md b/src/webserver/README.md new file mode 100644 index 00000000..6d3d9d6d --- /dev/null +++ b/src/webserver/README.md @@ -0,0 +1,9 @@ +This folder contains all of the classes for running the webserver. + +The webserver handles JSON-RPC and websocket requests. +The webserver supports SSL if a cert and key file are specified in the config. +The webserver handles all types of requests on a single port. + +Each request is handled asynchronously using boost asio. + +Much of this code was originally copied from boost beast example code. diff --git a/src/webserver/Ssl.h b/src/webserver/Ssl.h new file mode 100644 index 00000000..dd64a1a0 --- /dev/null +++ b/src/webserver/Ssl.h @@ -0,0 +1,63 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include +#include + +namespace ssl = boost::asio::ssl; + +static std::optional +parse_certs(const char* certFilename, const char* keyFilename) +{ + std::ifstream readCert(certFilename, std::ios::in | std::ios::binary); + if (!readCert) + return {}; + + std::stringstream contents; + contents << readCert.rdbuf(); + readCert.close(); + std::string cert = contents.str(); + + std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary); + if (!readKey) + return {}; + + contents.str(""); + contents << readKey.rdbuf(); + readKey.close(); + std::string key = contents.str(); + + ssl::context ctx{ssl::context::tlsv12}; + + ctx.set_options( + boost::asio::ssl::context::default_workarounds | + boost::asio::ssl::context::no_sslv2); + + ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size())); + + ctx.use_private_key( + boost::asio::buffer(key.data(), key.size()), + boost::asio::ssl::context::file_format::pem); + + return ctx; +} diff --git a/src/webserver/SslHttpSession.h b/src/webserver/SslHttpSession.h new file mode 100644 index 00000000..34d3b76b --- /dev/null +++ b/src/webserver/SslHttpSession.h @@ -0,0 +1,155 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +using tcp = boost::asio::ip::tcp; + +// Handles an HTTPS server connection +class SslHttpSession : public HttpBase, + public std::enable_shared_from_this +{ + boost::beast::ssl_stream stream_; + std::optional ip_; + +public: + // Take ownership of the socket + explicit SslHttpSession( + boost::asio::io_context& ioc, + tcp::socket&& socket, + ssl::context& ctx, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer buffer) + : HttpBase( + ioc, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(buffer)) + , stream_(std::move(socket), ctx) + { + try + { + ip_ = stream_.next_layer() + .socket() + .remote_endpoint() + .address() + .to_string(); + } + catch (std::exception const&) + { + } + if (ip_) + HttpBase::dosGuard().increment(*ip_); + } + + ~SslHttpSession() + { + if (ip_ and not upgraded_) + HttpBase::dosGuard().decrement(*ip_); + } + + boost::beast::ssl_stream& + stream() + { + return stream_; + } + boost::beast::ssl_stream + release_stream() + { + return std::move(stream_); + } + + std::optional + ip() + { + return ip_; + } + + // Start the asynchronous operation + void + run() + { + auto self = shared_from_this(); + // We need to be executing within a strand to perform async operations + // on the I/O objects in this session. + net::dispatch(stream_.get_executor(), [self]() { + // Set the timeout. + boost::beast::get_lowest_layer(self->stream()) + .expires_after(std::chrono::seconds(30)); + + // Perform the SSL handshake + // Note, this is the buffered version of the handshake. + self->stream_.async_handshake( + ssl::stream_base::server, + self->buffer_.data(), + boost::beast::bind_front_handler( + &SslHttpSession::on_handshake, self)); + }); + } + + void + on_handshake(boost::beast::error_code ec, std::size_t bytes_used) + { + if (ec) + return httpFail(ec, "handshake"); + + buffer_.consume(bytes_used); + + do_read(); + } + + void + do_close() + { + // Set the timeout. + boost::beast::get_lowest_layer(stream_).expires_after( + std::chrono::seconds(30)); + + // Perform the SSL shutdown + stream_.async_shutdown(boost::beast::bind_front_handler( + &SslHttpSession::on_shutdown, shared_from_this())); + } + + void + on_shutdown(boost::beast::error_code ec) + { + if (ec) + return httpFail(ec, "shutdown"); + + // At this point the connection is closed gracefully + } +}; diff --git a/src/webserver/SslWsSession.h b/src/webserver/SslWsSession.h new file mode 100644 index 00000000..00a867d7 --- /dev/null +++ b/src/webserver/SslWsSession.h @@ -0,0 +1,238 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include + +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +namespace websocket = boost::beast::websocket; +using tcp = boost::asio::ip::tcp; + +class ReportingETL; + +class SslWsSession : public WsSession +{ + boost::beast::websocket::stream< + boost::beast::ssl_stream> + ws_; + +public: + // Take ownership of the socket + explicit SslWsSession( + boost::asio::io_context& ioc, + boost::beast::ssl_stream&& stream, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& b) + : WsSession( + ioc, + ip, + backend, + subscriptions, + balancer, + etl, + tagFactory, + dosGuard, + counters, + queue, + std::move(b)) + , ws_(std::move(stream)) + { + } + boost::beast::websocket::stream< + boost::beast::ssl_stream>& + ws() + { + return ws_; + } + + std::optional + ip() + { + return ip_; + } +}; + +class SslWsUpgrader : public std::enable_shared_from_this +{ + boost::asio::io_context& ioc_; + boost::beast::ssl_stream https_; + boost::optional> parser_; + boost::beast::flat_buffer buffer_; + std::optional ip_; + std::shared_ptr backend_; + std::shared_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory const& tagFactory_; + clio::DOSGuard& dosGuard_; + RPC::Counters& counters_; + WorkQueue& queue_; + http::request req_; + +public: + SslWsUpgrader( + boost::asio::io_context& ioc, + std::optional ip, + boost::asio::ip::tcp::socket&& socket, + ssl::context& ctx, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& b) + : ioc_(ioc) + , https_(std::move(socket), ctx) + , buffer_(std::move(b)) + , ip_(ip) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + { + } + SslWsUpgrader( + boost::asio::io_context& ioc, + boost::beast::ssl_stream stream, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& b, + http::request req) + : ioc_(ioc) + , https_(std::move(stream)) + , buffer_(std::move(b)) + , ip_(ip) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + , req_(std::move(req)) + { + } + + ~SslWsUpgrader() = default; + + void + run() + { + // Set the timeout. + boost::beast::get_lowest_layer(https_).expires_after( + std::chrono::seconds(30)); + + net::dispatch( + https_.get_executor(), + boost::beast::bind_front_handler( + &SslWsUpgrader::do_upgrade, shared_from_this())); + } + +private: + void + on_handshake(boost::beast::error_code ec, std::size_t bytes_used) + { + if (ec) + return logError(ec, "handshake"); + + // Consume the portion of the buffer used by the handshake + buffer_.consume(bytes_used); + + do_upgrade(); + } + + void + do_upgrade() + { + parser_.emplace(); + + // Apply a reasonable limit to the allowed size + // of the body in bytes to prevent abuse. + parser_->body_limit(10000); + + // Set the timeout. + boost::beast::get_lowest_layer(https_).expires_after( + std::chrono::seconds(30)); + + on_upgrade(); + } + + void + on_upgrade() + { + // See if it is a WebSocket Upgrade + if (!websocket::is_upgrade(req_)) + { + return; + } + + // Disable the timeout. + // The websocket::stream uses its own timeout settings. + boost::beast::get_lowest_layer(https_).expires_never(); + + std::make_shared( + ioc_, + std::move(https_), + ip_, + backend_, + subscriptions_, + balancer_, + etl_, + tagFactory_, + dosGuard_, + counters_, + queue_, + std::move(buffer_)) + ->run(std::move(req_)); + } +}; diff --git a/src/webserver/WsBase.h b/src/webserver/WsBase.h new file mode 100644 index 00000000..2648d576 --- /dev/null +++ b/src/webserver/WsBase.h @@ -0,0 +1,485 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +// TODO: Consider removing these. Visible to anyone including this header. +namespace http = boost::beast::http; +namespace net = boost::asio; +namespace ssl = boost::asio::ssl; +namespace websocket = boost::beast::websocket; +using tcp = boost::asio::ip::tcp; + +inline void +logError(boost::beast::error_code ec, char const* what) +{ + static clio::Logger log{"WebServer"}; + log.debug() << what << ": " << ec.message() << "\n"; +} + +inline boost::json::object +getDefaultWsResponse(boost::json::value const& id) +{ + boost::json::object defaultResp = {}; + if (!id.is_null()) + defaultResp["id"] = id; + + defaultResp["status"] = "success"; + defaultResp["type"] = "response"; + + return defaultResp; +} + +class WsBase : public util::Taggable +{ +protected: + clio::Logger log_{"WebServer"}; + clio::Logger perfLog_{"Performance"}; + boost::system::error_code ec_; + +public: + explicit WsBase(util::TagDecoratorFactory const& tagFactory) + : Taggable{tagFactory} + { + } + + /** + * @brief Send, that enables SubscriptionManager to publish to clients + * @param msg The message to send + */ + virtual void + send(std::shared_ptr msg) = 0; + + virtual ~WsBase() = default; + + /** + * @brief Indicates whether the connection had an error and is considered + * dead + * + * @return true + * @return false + */ + bool + dead() + { + return ec_ != boost::system::error_code{}; + } +}; + +class SubscriptionManager; +class ETLLoadBalancer; + +// Echoes back all received WebSocket messages +template +class WsSession : public WsBase, + public std::enable_shared_from_this> +{ + using std::enable_shared_from_this>::shared_from_this; + + boost::beast::flat_buffer buffer_; + + boost::asio::io_context& ioc_; + std::shared_ptr backend_; + // has to be a weak ptr because SubscriptionManager maintains collections + // of std::shared_ptr objects. If this were shared, there would be + // a cyclical dependency that would block destruction + std::weak_ptr subscriptions_; + std::shared_ptr balancer_; + std::shared_ptr etl_; + util::TagDecoratorFactory const& tagFactory_; + clio::DOSGuard& dosGuard_; + RPC::Counters& counters_; + WorkQueue& queue_; + std::mutex mtx_; + + bool sending_ = false; + std::queue> messages_; + +protected: + std::optional ip_; + + void + wsFail(boost::beast::error_code ec, char const* what) + { + if (!ec_ && ec != boost::asio::error::operation_aborted) + { + ec_ = ec; + perfLog_.info() << tag() << ": " << what << ": " << ec.message(); + boost::beast::get_lowest_layer(derived().ws()).socket().close(ec); + + if (auto manager = subscriptions_.lock(); manager) + manager->cleanup(derived().shared_from_this()); + } + } + +public: + explicit WsSession( + boost::asio::io_context& ioc, + std::optional ip, + std::shared_ptr backend, + std::shared_ptr subscriptions, + std::shared_ptr balancer, + std::shared_ptr etl, + util::TagDecoratorFactory const& tagFactory, + clio::DOSGuard& dosGuard, + RPC::Counters& counters, + WorkQueue& queue, + boost::beast::flat_buffer&& buffer) + : WsBase(tagFactory) + , buffer_(std::move(buffer)) + , ioc_(ioc) + , backend_(backend) + , subscriptions_(subscriptions) + , balancer_(balancer) + , etl_(etl) + , tagFactory_(tagFactory) + , dosGuard_(dosGuard) + , counters_(counters) + , queue_(queue) + , ip_(ip) + { + perfLog_.info() << tag() << "session created"; + } + + virtual ~WsSession() + { + perfLog_.info() << tag() << "session closed"; + if (ip_) + dosGuard_.decrement(*ip_); + } + + // Access the derived class, this is part of + // the Curiously Recurring Template Pattern idiom. + Derived& + derived() + { + return static_cast(*this); + } + + void + do_write() + { + sending_ = true; + derived().ws().async_write( + net::buffer(messages_.front()->data(), messages_.front()->size()), + boost::beast::bind_front_handler( + &WsSession::on_write, derived().shared_from_this())); + } + + void + on_write(boost::system::error_code ec, std::size_t) + { + if (ec) + { + wsFail(ec, "Failed to write"); + } + else + { + messages_.pop(); + sending_ = false; + maybe_send_next(); + } + } + + void + maybe_send_next() + { + if (ec_ || sending_ || messages_.empty()) + return; + + do_write(); + } + + void + send(std::shared_ptr msg) override + { + net::dispatch( + derived().ws().get_executor(), + [this, + self = derived().shared_from_this(), + msg = std::move(msg)]() { + messages_.push(std::move(msg)); + maybe_send_next(); + }); + } + + void + send(std::string&& msg) + { + auto sharedMsg = std::make_shared(std::move(msg)); + send(sharedMsg); + } + + void + run(http::request req) + { + // Set suggested timeout settings for the websocket + derived().ws().set_option(websocket::stream_base::timeout::suggested( + boost::beast::role_type::server)); + + // Set a decorator to change the Server of the handshake + derived().ws().set_option(websocket::stream_base::decorator( + [](websocket::response_type& res) { + res.set( + http::field::server, + std::string(BOOST_BEAST_VERSION_STRING) + + " websocket-server-async"); + })); + + derived().ws().async_accept( + req, + boost::beast::bind_front_handler( + &WsSession::on_accept, this->shared_from_this())); + } + + void + on_accept(boost::beast::error_code ec) + { + if (ec) + return wsFail(ec, "accept"); + + perfLog_.info() << tag() << "accepting new connection"; + + // Read a message + do_read(); + } + + void + do_read() + { + if (dead()) + return; + + std::lock_guard lck{mtx_}; + // Clear the buffer + buffer_.consume(buffer_.size()); + // Read a message into our buffer + derived().ws().async_read( + buffer_, + boost::beast::bind_front_handler( + &WsSession::on_read, this->shared_from_this())); + } + + void + handle_request( + boost::json::object const&& request, + boost::json::value const& id, + boost::asio::yield_context& yield) + { + auto ip = derived().ip(); + if (!ip) + return; + + boost::json::object response = {}; + auto sendError = [this, &request, id](auto error) { + auto e = RPC::makeError(error); + if (!id.is_null()) + e["id"] = id; + e["request"] = request; + this->send(boost::json::serialize(e)); + }; + + try + { + log_.info() << tag() + << "ws received request from work queue : " << request; + + auto range = backend_->fetchLedgerRange(); + if (!range) + return sendError(RPC::RippledError::rpcNOT_READY); + + std::optional context = RPC::make_WsContext( + yield, + request, + backend_, + subscriptions_.lock(), + balancer_, + etl_, + shared_from_this(), + tagFactory_.with(std::cref(tag())), + *range, + counters_, + *ip); + + if (!context) + { + perfLog_.warn() << tag() << "Could not create RPC context"; + return sendError(RPC::RippledError::rpcBAD_SYNTAX); + } + + response = getDefaultWsResponse(id); + + auto [v, timeDiff] = + util::timed([&]() { return RPC::buildResponse(*context); }); + + auto us = std::chrono::duration(timeDiff); + logDuration(*context, us); + + if (auto status = std::get_if(&v)) + { + counters_.rpcErrored(context->method); + + auto error = RPC::makeError(*status); + + if (!id.is_null()) + error["id"] = id; + + error["request"] = request; + response = error; + } + else + { + counters_.rpcComplete(context->method, us); + + auto const& result = std::get(v); + auto const isForwarded = result.contains("forwarded") && + result.at("forwarded").is_bool() && + result.at("forwarded").as_bool(); + + // if the result is forwarded - just use it as is + // but keep all default fields in the response too. + if (isForwarded) + for (auto const& [k, v] : result) + response.insert_or_assign(k, v); + else + response["result"] = result; + } + } + catch (std::exception const& e) + { + perfLog_.error() << tag() << "Caught exception : " << e.what(); + + return sendError(RPC::RippledError::rpcINTERNAL); + } + + boost::json::array warnings; + + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_CLIO)); + + auto lastCloseAge = etl_->lastCloseAgeSeconds(); + if (lastCloseAge >= 60) + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_OUTDATED)); + response["warnings"] = warnings; + std::string responseStr = boost::json::serialize(response); + if (!dosGuard_.add(*ip, responseStr.size())) + { + response["warning"] = "load"; + warnings.emplace_back(RPC::makeWarning(RPC::warnRPC_RATE_LIMIT)); + response["warnings"] = warnings; + // reserialize if we need to include this warning + responseStr = boost::json::serialize(response); + } + send(std::move(responseStr)); + } + + void + on_read(boost::beast::error_code ec, std::size_t bytes_transferred) + { + boost::ignore_unused(bytes_transferred); + + if (ec) + return wsFail(ec, "read"); + + std::string msg{ + static_cast(buffer_.data().data()), buffer_.size()}; + auto ip = derived().ip(); + + if (!ip) + return; + + perfLog_.info() << tag() << "Received request from ip = " << *ip; + + auto sendError = [this, ip]( + auto error, + boost::json::value const& id, + boost::json::object const& request) { + auto e = RPC::makeError(error); + + if (!id.is_null()) + e["id"] = id; + e["request"] = request; + + auto responseStr = boost::json::serialize(e); + log_.trace() << responseStr; + dosGuard_.add(*ip, responseStr.size()); + send(std::move(responseStr)); + }; + + boost::json::value raw = [](std::string const&& msg) { + try + { + return boost::json::parse(msg); + } + catch (std::exception&) + { + return boost::json::value{nullptr}; + } + }(std::move(msg)); + + boost::json::object request; + // dosGuard served request++ and check ip address + // dosGuard should check before any request, even invalid request + if (!dosGuard_.request(*ip)) + { + sendError(RPC::RippledError::rpcSLOW_DOWN, nullptr, request); + } + else if (!raw.is_object()) + { + // handle invalid request and async read again + sendError(RPC::RippledError::rpcINVALID_PARAMS, nullptr, request); + } + else + { + request = raw.as_object(); + + auto id = request.contains("id") ? request.at("id") : nullptr; + perfLog_.debug() << tag() << "Adding to work queue"; + + if (!queue_.postCoro( + [shared_this = shared_from_this(), + r = std::move(request), + id](boost::asio::yield_context yield) { + shared_this->handle_request(std::move(r), id, yield); + }, + dosGuard_.isWhiteListed(*ip))) + sendError(RPC::RippledError::rpcTOO_BUSY, id, request); + } + + do_read(); + } +}; diff --git a/test.py b/test.py new file mode 100755 index 00000000..121fd7fc --- /dev/null +++ b/test.py @@ -0,0 +1,1308 @@ +#!/usr/bin/python3 + +from ast import parse +import websockets +import asyncio +import json +import io +import os +import subprocess +import argparse +import time +import threading + +def checkAccountInfo(aldous, p2p): + return isSubset(aldous["object"], p2p["result"]["account_data"]) + +def isSubset(sub, sup): + for x in sub: + if x == "deserialization_time_microsecond": + continue + if not x in sup: + return False + elif not sub[x] == sup[x]: + return False + return True + + +async def call(ip,port,msg): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(msg) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + except websockets.exceptions.ConnectionClosedError as e: + print(e) + + +def compareAccountInfo(aldous, p2p): + p2p = p2p["result"]["account_data"] + aldous = aldous["object"] + if isSubset(p2p,aldous): + print("Responses match!!") + else: + print("Response mismatch") + print(aldous) + print(p2p) +def compareTx(aldous, p2p): + p2p = p2p["result"] + if aldous["transaction"] != p2p["tx"]: + print("transaction mismatch") + print(aldous["transaction"]) + print(p2p["tx"]) + return False + if aldous["metadata"] != p2p["meta"] and not isinstance(p2p["meta"],dict): + print("metadata mismatch") + print("aldous : " + aldous["metadata"]) + print("p2p : " + str(p2p["meta"])) + return False + if aldous["ledger_sequence"] != p2p["ledger_index"]: + print("ledger sequence mismatch") + print(aldous["ledger_sequence"]) + print(p2p["ledger_index"]) + print("responses match!!") + return True + +def compareAccountTx(aldous, p2p): + print(p2p) + if "result" in p2p: + p2p = p2p["result"] + maxLedger = getMinAndMax(aldous)[1] + minLedger = getMinAndMax(p2p)[0] + p2pTxns = [] + p2pMetas = [] + p2pLedgerSequences = [] + for x in p2p["transactions"]: + p2pTxns.append(x["tx_blob"]) + p2pMetas.append(x["meta"]) + p2pLedgerSequences.append(x["ledger_index"]) + aldousTxns = [] + aldousMetas = [] + aldousLedgerSequences = [] + for x in aldous["transactions"]: + aldousTxns.append(x["transaction"]) + aldousMetas.append(x["metadata"]) + aldousLedgerSequences.append(x["ledger_sequence"]) + + p2pTxns.sort() + p2pMetas.sort() + p2pLedgerSequences.sort() + aldousTxns.sort() + aldousMetas.sort() + aldousLedgerSequences.sort() + if p2pTxns == aldousTxns and p2pMetas == aldousMetas and p2pLedgerSequences == aldousLedgerSequences: + print("Responses match!!!") + print(len(aldousTxns)) + print(len(p2pTxns)) + else: + print("Mismatch responses") + print(len(aldousTxns)) + print(len(aldous["transactions"])) + print(len(p2pTxns)) + print(len(p2p["transactions"])) + print(maxLedger) + +def getAccounts(filename): + accounts = [] + with open(filename) as f: + for line in f: + if line[0] == "{": + jv = json.loads(line) + accounts.append(jv["Account"]) + if len(line) == 35: + accounts.append(line[0:34]) + if len(line) == 44: + accounts.append(line[3:43]) + if len(line) == 65: + accounts.append(line[0:64]) + if len(line) == 41 or len(line) == 40: + accounts.append(line[0:40]) + elif len(line) == 43: + accounts.append(line[2:42]) + return accounts +def getAccountsAndCursors(filename): + accounts = [] + cursors = [] + with open(filename) as f: + for line in f: + if len(line) == 0: + continue + space = line.find(" ") + cursor = line[space+1:len(line)-1] + if cursor == "None": + cursors.append(None) + else: + cursors.append(json.loads(cursor)) + accounts.append(line[0:space]) + + return (accounts,cursors) +def getBooks(filename): + books = [] + with open(filename) as f: + for line in f: + if len(line) == 68: + books.append(line[3:67]) + return books +def compareLedgerData(aldous, p2p): + aldous[0].sort() + aldous[1].sort() + p2p[0].sort() + p2p[1].sort() + if aldous[0] != p2p[0]: + print("Keys mismatch :(") + print(len(aldous[0])) + print(len(p2p[0])) + return False + if aldous[1] != p2p[1]: + print("Objects mismatch :(") + print(len(aldous[1])) + print(len(p2p[1])) + return False + print("Responses match!!!!") + + + + + + + +async def account_infos(ip, port, accounts, numCalls): + + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + try: + async with websockets.connect(address,max_size=1000000000) as ws: + print(len(accounts)) + for x in range(0,numCalls): + account = accounts[random.randrange(0,len(accounts))] + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"account_info","account":account,"binary":True})) + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + if (end - start) > 0.1: + print("request took more than 100ms") + + except websockets.exceptions.connectionclosederror as e: + print(e) + + +async def account_info(ip, port, account, ledger, binary): + address = 'ws://' + str(ip) + ':' + str(port) + print(binary) + try: + async with websockets.connect(address) as ws: + if ledger is None: + await ws.send(json.dumps({"command":"account_info","account":account, "binary":bool(binary)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + else: + await ws.send(json.dumps({"command":"account_info","account":account, "ledger_index":int(ledger), "binary":bool(binary)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + return res + except websockets.exceptions.ConnectionClosedError as e: + print(e) + +def getMinAndMax(res): + minSeq = None + maxSeq = None + for x in res["transactions"]: + seq = None + if "ledger_sequence" in x: + seq = int(x["ledger_sequence"]) + else: + seq = int(x["ledger_index"]) + if minSeq is None or seq < minSeq: + minSeq = seq + if maxSeq is None or seq > maxSeq: + maxSeq = seq + return (minSeq,maxSeq) + + +async def account_tx(ip, port, account, binary, forward=False, minLedger=None, maxLedger=None): + + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + if minLedger is None or maxLedger is None: + await ws.send(json.dumps({"command":"account_tx","account":account, "binary":bool(binary),"forward":bool(forward),"limit":200})) + else: + await ws.send(json.dumps({"command":"account_tx","account":account, "binary":bool(binary),"forward":bool(forward),"limit":200,"ledger_index_min":minLedger, "ledger_index_max":maxLedger})) + + res = json.loads(await ws.recv()) + #print(json.dumps(res,indent=4,sort_keys=True)) + return res + except websockets.exceptions.ConnectionClosedError as e: + print(e) + +async def account_txs_full(ip, port, accounts, cursors, numCalls, limit): + + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + try: + async with websockets.connect(address,max_size=1000000000) as ws: + print(len(accounts)) + cursor = None + account = None + time = 0.0 + for x in range(0,numCalls): + + idx = random.randrange(0,len(accounts)) + account = accounts[idx] + cursor = cursors[idx] + start = datetime.datetime.now().timestamp() + if cursor is None: + await ws.send(json.dumps({"command":"account_tx","account":account,"binary":True,"limit":limit})) + else: + marker = {} + marker["ledger"] = cursor["ledger_sequence"] + marker["seq"] = cursor["transaction_index"] + await ws.send(json.dumps({"command":"account_tx","account":account,"cursor":cursor,"marker":marker,"binary":True,"limit":limit,"forward":False})) + + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + print(end-start) + time += (end - start) + txns = [] + if "result" in res: + txns = res["result"]["transactions"] + else: + txns = res["transactions"] + print(len(txns)) + print(account + " " + json.dumps(cursor)) + if (end - start) > 0.1: + print("request took more than 100ms") + print("Latency = " + str(time / numCalls)) + + except websockets.exceptions.connectionclosederror as e: + print(e) +async def account_txs(ip, port, accounts, numCalls): + + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + try: + async with websockets.connect(address,max_size=1000000000) as ws: + print(len(accounts)) + cursor = None + account = None + for x in range(0,numCalls): + + if cursor is None: + account = accounts[random.randrange(0,len(accounts))] + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"account_tx","account":account,"binary":True,"limit":200})) + else: + await ws.send(json.dumps({"command":"account_tx","account":account,"cursor":cursor,"binary":True,"limit":200})) + + + res = json.loads(await ws.recv()) + if "cursor" in res: + if cursor: + print(account + " " + json.dumps(cursor)) + else: + print(account + " " + "None") + #cursor = res["cursor"] + elif cursor: + print(account + " " + json.dumps(cursor)) + cursor = None + + + end = datetime.datetime.now().timestamp() + if (end - start) > 0.1: + print("request took more than 100ms") + + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def account_tx_full(ip, port, account, binary,forward=False,minLedger=None, maxLedger=None): + address = 'ws://' + str(ip) + ':' + str(port) + try: + cursor = None + marker = None + req = {"command":"account_tx","account":account, "binary":bool(binary),"forward":bool(forward),"limit":200} + results = {"transactions":[]} + numCalls = 0 + async with websockets.connect(address) as ws: + while True: + numCalls = numCalls+1 + if not cursor is None: + req["cursor"] = cursor + if not marker is None: + req["marker"] = marker + if minLedger is not None and maxLedger is not None: + req["ledger_index_min"] = minLedger + req["ledger_index_max"] = maxLedger + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps(req)) + res = await ws.recv() + + end = datetime.datetime.now().timestamp() + + print(end - start) + res = json.loads(res) + #print(json.dumps(res,indent=4,sort_keys=True)) + if "result" in res: + print(len(res["result"]["transactions"])) + else: + print(len(res["transactions"])) + if "result" in res: + results["transactions"].extend(res["result"]["transactions"]) + else: + results["transactions"].extend(res["transactions"]) + if "cursor" in res: + cursor = {"ledger_sequence":res["cursor"]["ledger_sequence"],"transaction_index":res["cursor"]["transaction_index"]} + print(cursor) + elif "result" in res and "marker" in res["result"]: + marker={"ledger":res["result"]["marker"]["ledger"],"seq":res["result"]["marker"]["seq"]} + print(marker) + else: + print(res) + break + return results + except websockets.exceptions.ConnectionClosedError as e: + print(e) + +async def tx(ip, port, tx_hash, binary): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"tx","transaction":tx_hash,"binary":bool(binary)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + return res + except websockets.exceptions.connectionclosederror as e: + print(e) +async def txs(ip, port, hashes, numCalls): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + for x in range(0,numCalls): + h = hashes[random.randrange(0,len(hashes))] + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"tx","transaction":h,"binary":True})) + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + if (end - start) > 0.1: + print("request took more than 100ms") + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def ledger_entry(ip, port, index, ledger, binary): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"ledger_entry","index":index,"binary":bool(binary),"ledger_index":int(ledger)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + if "result" in res: + res = res["result"] + if "object" in res: + return (index,res["object"]) + else: + return (index,res["node_binary"]) + except websockets.exceptions.connectionclosederror as e: + print(e) +async def ledger_entries(ip, port, ledger, keys, numCalls): + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + try: + async with websockets.connect(address) as ws: + print(len(keys)) + for x in range(0,numCalls): + index = keys[random.randrange(0,len(keys))] + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"ledger_entry","index":index,"binary":True,"ledger_index":int(ledger)})) + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + if (end - start) > 0.1: + print("request took more than 100ms") + + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def ledger_entries(ip, port,ledger): + address = 'ws://' + str(ip) + ':' + str(port) + entries = await ledger_data(ip, port, ledger, 200, True) + + try: + async with websockets.connect(address) as ws: + objects = [] + for x,y in zip(entries[0],entries[1]): + await ws.send(json.dumps({"command":"ledger_entry","index":x,"binary":True,"ledger_index":int(ledger)})) + res = json.loads(await ws.recv()) + objects.append((x,res["object"])) + if res["object"] != y: + print("data mismatch") + return None + print("Data matches!") + return objects + + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def ledger_data(ip, port, ledger, limit, binary, cursor): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + if limit is not None: + await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"limit":int(limit),"cursor":cursor,"marker":cursor})) + else: + await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"cursor":cursor,"marker":cursor})) + res = json.loads(await ws.recv()) + print(res) + objects = [] + blobs = [] + keys = [] + if "result" in res: + objects = res["result"]["state"] + else: + objects = res["objects"] + if binary: + for x in objects: + blobs.append(x["data"]) + keys.append(x["index"]) + if len(x["index"]) != 64: + print("bad key") + return (keys,blobs) + + except websockets.exceptions.connectionclosederror as e: + print(e) + +def writeLedgerData(state,filename): + print(len(state)) + + with open(filename,'w') as f: + for k,v in state.items(): + f.write(k) + f.write(':') + f.write(v) + f.write('\n') + + +async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1, marker = None): + address = 'ws://' + str(ip) + ':' + str(port) + try: + state = {} + async with websockets.connect(address,max_size=1000000000) as ws: + if int(limit) < 2048: + limit = 2048 + while True: + res = {} + if marker is None: + await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":binary, "limit":int(limit),"out_of_order":True})) + res = json.loads(await ws.recv()) + + else: + + await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"cursor":marker, "marker":marker,"binary":bool(binary), "limit":int(limit),"out_of_order":True})) + res = json.loads(await ws.recv()) + + + if "error" in res: + print(res["error"]) + continue + + objects = [] + if "result" in res: + objects = res["result"]["state"] + else: + objects = res["objects"] + for x in objects: + if binary: + if typ is None or x["data"][2:6] == typ: + #print(json.dumps(x)) + state[x["index"]] = x["data"] + else: + if typ is None or x["LedgerEntryType"] == typ: + state[x["index"]] = x + if count != -1 and len(state) > count: + print("stopping early") + print(len(state)) + print("done") + return state + if "cursor" in res: + marker = res["cursor"] + print(marker) + elif "result" in res and "marker" in res["result"]: + marker = res["result"]["marker"] + print(marker) + else: + print("done") + return state + + + except websockets.exceptions.connectionclosederror as e: + print(e) + +def compare_offer(aldous, p2p): + for k,v in aldous.items(): + if k == "deserialization_time_microseconds": + continue + if p2p[k] != v: + print("mismatch at field") + print(k) + return False + return True + +def compare_book_offers(aldous, p2p): + p2pOffers = {} + for x in p2p: + matched = False + for y in aldous: + if y["index"] == x["index"]: + if not compare_offer(y,x): + print("Mismatched offer") + print(y) + print(x) + return False + else: + matched = True + if not matched: + print("offer not found") + print(x) + return False + print("offers match!") + return True + +async def book_changes(ip, port, ledger): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({ + "command" : "book_changes", + "ledger_index" : ledger + })) + res = json.loads(await ws.recv()) + print(json.dumps(res, indent=4, sort_keys=True)) + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def book_offerses(ip, port, ledger, books, numCalls): + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + try: + async with websockets.connect(address,max_size=1000000000) as ws: + print(len(books)) + for x in range(0,numCalls): + book = books[random.randrange(0,len(books))] + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"book_offers","book":book,"binary":True})) + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + print(book) + print(len(res["offers"])) + if (end - start) > 0.1: + print("request took more than 100ms") + + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def book_offers(ip, port, ledger, pay_currency, pay_issuer, get_currency, get_issuer, binary, limit): + + address = 'ws://' + str(ip) + ':' + str(port) + try: + offers = [] + cursor = None + async with websockets.connect(address) as ws: + while True: + taker_gets = json.loads("{\"currency\":\"" + get_currency+"\"}") + if get_issuer is not None: + taker_gets["issuer"] = get_issuer + taker_pays = json.loads("{\"currency\":\"" + pay_currency + "\"}") + if pay_issuer is not None: + taker_pays["issuer"] = pay_issuer + req = {"command":"book_offers","ledger_index":int(ledger), "taker_pays":taker_pays, "taker_gets":taker_gets, "binary":bool(binary), "limit":int(limit)} + if cursor is not None: + req["cursor"] = cursor + await ws.send(json.dumps(req)) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + if "result" in res: + res = res["result"] + for x in res["offers"]: + offers.append(x) + if "cursor" in res: + cursor = res["cursor"] + print(cursor) + else: + print(len(offers)) + return offers + + except websockets.exceptions.connectionclosederror as e: + print(e) + +def compareLedger(aldous, p2p): + p2p = p2p["result"]["ledger"] + p2pHeader = p2p["ledger_data"] + aldousHeader = aldous["header"]["blob"] + if p2pHeader == aldousHeader: + print("Headers match!!!") + else: + print("Header mismatch") + print(aldousHeader) + print(p2pHeader) + return + + p2pTxns = [] + p2pMetas = [] + for x in p2p["transactions"]: + p2pTxns.append(x["tx_blob"]) + p2pMetas.append(x["meta"]) + aldousTxns = [] + aldousMetas = [] + for x in aldous["transactions"]: + aldousTxns.append(x["transaction"]) + aldousMetas.append(x["metadata"]) + + + + p2pTxns.sort() + p2pMetas.sort() + aldousTxns.sort() + aldousMetas.sort() + if p2pTxns == aldousTxns and p2pMetas == aldousMetas: + print("Responses match!!!") + else: + print("Mismatch responses") + print(aldous) + print(p2p) + +def getHashesFromFile(filename): + hashes = [] + with open(filename) as f: + for line in f: + if len(line) == 65: + hashes.append(line[0:64]) + return hashes + + +def getHashes(res): + if "result" in res: + res = res["result"]["ledger"] + + hashes = [] + for x in res["transactions"]: + if "hash" in x: + hashes.append(x["hash"]) + elif "transaction" in x and "hash" in x["transaction"]: + hashes.append(x["transaction"]["hash"]) + else: + hashes.append(x) + return hashes + +import random +import datetime +import ssl +import pathlib +numCalls = 0 +async def ledgers(ip, port, minLedger, maxLedger, transactions, expand, maxCalls): + global numCalls + address = 'ws://' + str(ip) + ':' + str(port) + random.seed() + ledger = 0 + ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + localhost_pem = pathlib.Path(__file__).with_name("cert.pem") + ssl_context.load_verify_locations(localhost_pem) + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + try: + async with websockets.connect(address,max_size=1000000000) as ws: + global numCalls + for i in range(0, maxCalls): + + + ledger = random.randrange(minLedger,maxLedger) + start = datetime.datetime.now().timestamp() + await ws.send(json.dumps({"command":"ledger","ledger_index":int(ledger),"binary":True, "transactions":bool(transactions),"expand":bool(expand)})) + res = json.loads(await ws.recv()) + end = datetime.datetime.now().timestamp() + if (end - start) > 0.1: + print("request took more than 100ms : " + str(end - start)) + numCalls = numCalls + 1 + if "error" in res: + print(res["error"]) + else: + print(res["header"]["blob"]) + + except websockets.exceptions.ConnectionClosedError as e: + print(e) + print(ledger) + +async def getManyHashes(ip, port, minLedger,maxLedger): + + hashes = [] + for x in range(minLedger,maxLedger): + res = await ledger(ip, port, x,True, True, False) + hashes.extend(getHashes(res)) + print(len(hashes)) + return hashes +async def getManyHashes(ip, port, minLedger,maxLedger, numHashes): + + random.seed() + hashes = [] + while len(hashes) < numHashes: + + lgr = random.randrange(minLedger,maxLedger) + res = await ledger(ip, port, lgr,True, True, False) + hashes.extend(getHashes(res)) + print(len(hashes)) + return hashes + + + +async def ledger(ip, port, ledger, binary, transactions, expand): + + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address,max_size=1000000000) as ws: + await ws.send(json.dumps({"command":"ledger","ledger_index":int(ledger),"binary":bool(binary), "transactions":bool(transactions),"expand":bool(expand)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + return res + + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def ledger_range(ip, port): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"ledger_range"})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + if "error" in res: + await ws.send(json.dumps({"command":"server_info"})) + res = json.loads(await ws.recv()) + print(res) + rng = res["result"]["info"]["complete_ledgers"] + if rng == "empty": + return (0,0) + idx = rng.find("-") + return (int(rng[0:idx]),int(rng[idx+1:])) + + res = res["result"] + return (res["ledger_index_min"],res["ledger_index_max"]) + except websockets.exceptions.connectionclosederror as e: + print(e) +async def fee(ip, port): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"fee"})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def server_info(ip, port): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"server_info"})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def ledger_diff(ip, port, base, desired, includeBlobs): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"ledger_diff","base_ledger":int(base),"desired_ledger":int(desired),"include_blobs":bool(includeBlobs)})) + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + except websockets.exceptions.connectionclosederror as e: + print(e) + + +async def perf(ip, port): + res = await ledger_range(ip,port) + time.sleep(10) + res2 = await ledger_range(ip,port) + lps = ((int(res2[1]) - int(res[1])) / 10.0) + print(lps) + + +async def subscribe(ip, port): + address = 'ws://' + str(ip) + ':' + str(port) + try: + async with websockets.connect(address) as ws: + await ws.send(json.dumps({"command":"subscribe","streams":["ledger"]})) + #await ws.send(json.dumps({"command":"subscribe","streams":["book_changes"]})) + #await ws.send(json.dumps({"command":"subscribe","streams":["manifests"]})) + while True: + res = json.loads(await ws.recv()) + print(json.dumps(res,indent=4,sort_keys=True)) + except websockets.exceptions.connectionclosederror as e: + print(e) + +async def verifySubscribe(ip,clioPort,ripdPort): + clioAddress = 'ws://' + str(ip) + ':' + str(clioPort) + ripdAddress = 'ws://' + str(ip) + ':' + str(ripdPort) + ripdTxns = {} + clioTxns = {} + ripdBooks = {} + clioBooks = {} + try: + async with websockets.connect(clioAddress) as ws1: + async with websockets.connect(ripdAddress) as ws2: + await ws1.send(json.dumps({"command":"server_info"})) + res = json.loads(await ws1.recv()) + print(res) + start = int(res["result"]["info"]["complete_ledgers"].split("-")[1]) + end = start + 2 + + streams = ["ledger","transactions"] + books = [{"both":True,"taker_pays":{"currency":"XRP"},"taker_gets":{"currency":"USD","issuer":"rhub8VRN55s94qWKDv6jmDy1pUykJzF3wq"}}, + {"taker": "r9cZA1mLK5R5Am25ArfXFmqgNwjZgnfk59", "taker_gets": {"currency": "XRP"},"taker_pays": {"currency": "USD","issuer": "rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B"}}] + accounts = ["rDzTZxa7NwD9vmNf5dvTbW4FQDNSRsfPv6","rrpNnNLKrartuEqfJGpqyDwPj1AFPg9vn1"] + await ws1.send(json.dumps({"command":"subscribe","streams":streams,"books":books,"accounts":accounts})), + await ws2.send(json.dumps({"command":"subscribe","streams":streams,"books":books,"accounts":accounts})) + + res1 = json.loads(await ws1.recv()) + res2 = json.loads(await ws2.recv()) + print(json.dumps(res1,indent=4,sort_keys=True)) + print(json.dumps(res2,indent=4,sort_keys=True)) + res1 = res1["result"] + res2 = res2["result"] + assert("validated_ledgers" in res1 and "validated_ledgers" in res2) + res1["validated_ledgers"] = "" + res2["validated_ledgers"] = "" + assert(res1 == res2) + idx = 0 + def compareObjects(clio,ripd): + print("sorting") + clio.sort(key = lambda x : x["transaction"]["hash"]) + ripd.sort(key = lambda x : x["transaction"]["hash"]) + clioFiltered = [] + ripdFiltered = [] + for x in clio: + if x not in clioFiltered: + clioFiltered.append(x) + clio = clioFiltered + for x in ripd: + if x not in ripdFiltered: + ripdFiltered.append(x) + ripd = ripdFiltered + + print("comparing") + if clio == ripd: + return True + else: + print("mismatch") + if len(clio) != len(ripd): + print("length mismatch!") + print(len(ripd)) + print(len(clio)) + for x in clio: + print(x["transaction"]["hash"]) + for x in ripd: + print(x["transaction"]["hash"]) + return False + for ripdElt,clioElt in zip(ripd,clio): + if clioElt != ripdElt: + print("mismatch at " + str(z)) + if type(clioElt) is dict: + for t in ripdElt: + if t not in clioElt: + print("missing from clio " + str(t)) + return False + elif clioElt[t] != ripdElt[t]: + print("mismatch at " + str(t)) + compareObjects(clioElt[t],ripdElt[t]) + return False + for t in clioElt: + if t not in clioElt: + print("extra in clio : " + str(t)) + elif type(clioElt) is list: + if len(clioElt) != len(ripdElt): + print("Mismatched list size") + return False + for x,y in zip(ripdElt,clioElt): + if x != y: + print("Mismatch in list") + print(x) + print(y) + return False + return False + + while True: + res1 = json.loads(await ws1.recv()) + if res1["type"] != "ledgerClosed": + continue + else: + break + while True: + res2 = json.loads(await ws2.recv()) + if res2["type"] != "ledgerClosed": + continue + else: + break + + async def getAllTxns(ws): + txns = [] + while True: + res = json.loads(await ws.recv()) + if res["type"] == "transaction": + txns.append(res) + elif res["type"] == "ledgerClosed": + print(json.dumps(res,indent=4,sort_keys=True)) + return txns + return txns + + + + + while True: + print("getting clio") + clioTxns = await getAllTxns(ws1) + print("getting ripd") + ripdTxns = await getAllTxns(ws2) + if not compareObjects(clioTxns,ripdTxns): + print("failed") + assert(False) + print("matched full ledger") + print("txn count = " + str(len(clioTxns))) + + + except websockets.exceptions.connectionclosederror as e: + print(e) + + + + + + +parser = argparse.ArgumentParser(description='test script for xrpl-reporting') +parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_changes","book_offerses","ledger_diff","perf","fee","server_info", "gaps","subscribe","verify_subscribe","call"]) + +parser.add_argument('--ip', default='127.0.0.1') +parser.add_argument('--port', default='8080') +parser.add_argument('--marker') +parser.add_argument('--hash') +parser.add_argument('--account') +parser.add_argument('--ledger') +parser.add_argument('--limit', default='200') +parser.add_argument('--taker_pays_issuer',default='rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B') +parser.add_argument('--taker_pays_currency',default='USD') +parser.add_argument('--taker_gets_issuer') +parser.add_argument('--taker_gets_currency',default='XRP') +parser.add_argument('--p2pIp', default='127.0.0.1') +parser.add_argument('--p2pPort', default='6006') +parser.add_argument('--verify',default=False) +parser.add_argument('--binary',default=True) +parser.add_argument('--forward',default=False) +parser.add_argument('--expand',default=False) +parser.add_argument('--transactions',default=False) +parser.add_argument('--minLedger',default=-1) +parser.add_argument('--maxLedger',default=-1) +parser.add_argument('--filename',default=None) +parser.add_argument('--ledgerIndex', default=-1) +parser.add_argument('--index') +parser.add_argument('--numPages',default=3) +parser.add_argument('--base') +parser.add_argument('--desired') +parser.add_argument('--includeBlobs',default=False) +parser.add_argument('--type',default=None) +parser.add_argument('--cursor',default='0000000000000000000000000000000000000000000000000000000000000000') +parser.add_argument('--numCalls',default=10000) +parser.add_argument('--numRunners',default=1) +parser.add_argument('--count',default=-1) +parser.add_argument('--streams',default=None) +parser.add_argument('--accounts',default=None) +parser.add_argument('--request',default=None) + + + + +args = parser.parse_args() + +def run(args): + asyncio.set_event_loop(asyncio.new_event_loop()) + if args.action == "call": + asyncio.get_event_loop().run_until_complete( + call(args.ip,args.port,args.request)) + return + elif args.action == "server_info": + asyncio.get_event_loop().run_until_complete(server_info(args.ip, args.port)) + return + + rng =asyncio.get_event_loop().run_until_complete(ledger_range(args.ip, args.port)) + if args.ledger is None: + args.ledger = rng[1] + if args.maxLedger == -1: + args.maxLedger = rng[1] + if args.minLedger == -1: + args.minLedger = rng[0] + if args.action == "fee": + asyncio.get_event_loop().run_until_complete(fee(args.ip, args.port)) + elif args.action == "perf": + asyncio.get_event_loop().run_until_complete( + perf(args.ip,args.port)) + elif args.action == "gaps": + missing = [] + for x in range(rng[0],rng[1]): + res = asyncio.get_event_loop().run_until_complete( + ledger(args.ip, args.port, x, True, False, False)) + if "error" in res: + print("missing " + str(x)) + missing.append(x) + print(missing) + elif args.action == "subscribe": + asyncio.get_event_loop().run_until_complete(subscribe(args.ip,args.port)) + elif args.action == "verify_subscribe": + asyncio.get_event_loop().run_until_complete(verifySubscribe(args.ip,args.port,args.p2pPort)) + elif args.action == "account_info": + res1 = asyncio.get_event_loop().run_until_complete( + account_info(args.ip, args.port, args.account, args.ledger, args.binary)) + if args.verify: + res2 = asyncio.get_event_loop().run_until_complete( + account_info(args.p2pIp, args.p2pPort, args.account, args.ledger, args.binary)) + print(compareAccountInfo(res1,res2)) + elif args.action == "txs": + #hashes = asyncio.get_event_loop().run_until_complete(getManyHashes(args.ip,args.port, int(args.minLedger),int(args.maxLedger), int(args.numCalls))) + #for x in hashes: + # print(x) + #return + hashes = getHashesFromFile(args.filename) + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(txs(args.ip, args.port, hashes,int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + elif args.action == "ledgers": + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(ledgers(args.ip, args.port, int(args.minLedger), int(args.maxLedger), args.transactions, args.expand, int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + print("Latency = " + str((end - start) / int(args.numCalls)) + " seconds") + elif args.action == "ledger_entries": + keys = [] + ledger_index = 0 + with open(args.filename) as f: + i = 0 + for line in f: + if ledger_index == 0: + ledger_index = int(line) + elif len(line) == 65: + keys.append(line[0:64]) + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(ledger_entries(args.ip, args.port, ledger_index,keys, int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + elif args.action == "account_txs": + accounts = getAccounts(args.filename) + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(account_txs(args.ip, args.port,accounts, int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + elif args.action == "account_txs_full": + accounts,cursors = getAccountsAndCursors(args.filename) + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(account_txs_full(args.ip, args.port,accounts,cursors,int(args.numCalls), int(args.limit)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + print("Latency = " + str((end - start) / int(args.numCalls)) + " seconds") + elif args.action == "account_infos": + accounts = getAccounts(args.filename) + async def runner(): + + tasks = [] + for x in range(0,int(args.numRunners)): + tasks.append(asyncio.create_task(account_infos(args.ip, args.port,accounts, int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + + elif args.action == "book_changes": + asyncio.get_event_loop().run_until_complete(book_changes(args.ip, args.port, int(args.ledger))) + + elif args.action == "book_offerses": + books = getBooks(args.filename) + async def runner(): + + tasks = [] + for x in range(0, int(args.numRunners)): + tasks.append(asyncio.create_task(book_offerses(args.ip, args.port, int(args.ledger), books, int(args.numCalls)))) + for t in tasks: + await t + + start = datetime.datetime.now().timestamp() + asyncio.run(runner()) + end = datetime.datetime.now().timestamp() + num = int(args.numRunners) * int(args.numCalls) + print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second") + elif args.action == "ledger_entry": + asyncio.get_event_loop().run_until_complete( + ledger_entry(args.ip, args.port, args.index, args.ledger, args.binary)) + elif args.action == "ledger_entries": + res = asyncio.get_event_loop().run_until_complete( + ledger_entries(args.ip, args.port, args.ledger)) + if args.verify: + objects = [] + for x in res: + res2 = asyncio.get_event_loop().run_until_complete( + ledger_entry(args.p2pIp, args.p2pPort,x[0] , args.ledger, True)) + if res2[1] != x[1]: + print("mismatch!") + return + print("Data matches!") + elif args.action == "ledger_diff": + asyncio.get_event_loop().run_until_complete( + ledger_diff(args.ip, args.port, args.base, args.desired, args.includeBlobs)) + elif args.action == "tx": + if args.verify: + args.binary = True + if args.hash is None: + args.hash = getHashes(asyncio.get_event_loop().run_until_complete(ledger(args.ip,args.port,args.ledger,False,True,False)))[0] + res = asyncio.get_event_loop().run_until_complete( + tx(args.ip, args.port, args.hash, args.binary)) + if args.verify: + res2 = asyncio.get_event_loop().run_until_complete( + tx(args.p2pIp, args.p2pPort, args.hash, args.binary)) + print(compareTx(res,res2)) + elif args.action == "account_tx": + if args.verify: + args.binary=True + if args.account is None: + args.hash = getHashes(asyncio.get_event_loop().run_until_complete(ledger(args.ip,args.port,args.ledger,False,True,False)))[0] + + res = asyncio.get_event_loop().run_until_complete(tx(args.ip,args.port,args.hash,False)) + args.account = res["transaction"]["Account"] + + res = asyncio.get_event_loop().run_until_complete( + account_tx(args.ip, args.port, args.account, args.binary, args.forward)) + rng = getMinAndMax(res) + + + + if args.verify: + res2 = asyncio.get_event_loop().run_until_complete( + account_tx(args.p2pIp, args.p2pPort, args.account, args.binary,rng[0],rng[1])) + print(compareAccountTx(res,res2)) + elif args.action == "account_tx_full": + if args.verify: + args.binary=True + if args.account is None: + args.hash = getHashes(asyncio.get_event_loop().run_until_complete(ledger(args.ip,args.port,args.ledger,False,True,False)))[0] + + res = asyncio.get_event_loop().run_until_complete(tx(args.ip,args.port,args.hash,False)) + args.account = res["transaction"]["Account"] + print("starting") + res = asyncio.get_event_loop().run_until_complete( + account_tx_full(args.ip, args.port, args.account, args.binary,args.forward,None,None)) + rng = getMinAndMax(res) + print(len(res["transactions"])) + print(args.account) + txs = set() + for x in res["transactions"]: + txs.add((x["transaction"],x["ledger_sequence"])) + print(len(txs)) + + if args.verify: + print("requesting p2p node") + res2 = asyncio.get_event_loop().run_until_complete( + account_tx_full(args.p2pIp, args.p2pPort, args.account, args.binary,args.forward, rng[0],rng[1],int(args.numPages))) + + print(compareAccountTx(res,res2)) + elif args.action == "ledger_data": + res = asyncio.get_event_loop().run_until_complete( + ledger_data(args.ip, args.port, args.ledger, args.limit, args.binary, args.cursor)) + if args.verify: + writeLedgerData(res,args.filename) + elif args.action == "ledger_data_full": + if args.verify: + args.limit = 2048 + args.binary = True + if args.filename is None: + args.filename = str(args.port) + "." + str(args.ledger) + + res = asyncio.get_event_loop().run_until_complete( + ledger_data_full(args.ip, args.port, args.ledger, bool(args.binary), args.limit,args.type, int(args.count), args.marker)) + print(len(res)) + if args.verify: + writeLedgerData(res,args.filename) + + elif args.action == "ledger": + + if args.verify: + args.binary = True + args.transactions = True + args.expand = True + res = asyncio.get_event_loop().run_until_complete( + ledger(args.ip, args.port, args.ledger, args.binary, args.transactions, args.expand)) + if args.verify: + res2 = asyncio.get_event_loop().run_until_complete( + ledger(args.p2pIp, args.p2pPort, args.ledger, args.binary, args.transactions, args.expand)) + print(compareLedger(res,res2)) + + elif args.action == "ledger_range": + asyncio.get_event_loop().run_until_complete( + ledger_range(args.ip, args.port)) + elif args.action == "book_offers": + if args.verify: + args.binary=True + res = asyncio.get_event_loop().run_until_complete( + book_offers(args.ip, args.port, args.ledger, args.taker_pays_currency, args.taker_pays_issuer, args.taker_gets_currency, args.taker_gets_issuer, args.binary,args.limit)) + if args.verify: + res2 = asyncio.get_event_loop().run_until_complete( + book_offers(args.p2pIp, args.p2pPort, args.ledger, args.taker_pays_currency, args.taker_pays_issuer, args.taker_gets_currency, args.taker_gets_issuer, args.binary, args.limit)) + print(compare_book_offers(res,res2)) + + else: + print("incorrect arguments") + + + + +run(args) diff --git a/unittests/Backend.cpp b/unittests/Backend.cpp new file mode 100644 index 00000000..69581347 --- /dev/null +++ b/unittests/Backend.cpp @@ -0,0 +1,2449 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +class BackendTest : public NoLoggerFixture +{ +}; + +TEST_F(BackendTest, Basic) +{ + boost::asio::io_context ioc; + std::optional work; + work.emplace(ioc); + std::atomic_bool done = false; + + boost::asio::spawn( + ioc, [&done, &work, &ioc](boost::asio::yield_context yield) { + boost::log::core::get()->set_filter( + clio::log_severity >= clio::Severity::WRN); + std::string keyspace = "clio_test_" + + std::to_string(std::chrono::system_clock::now() + .time_since_epoch() + .count()); + boost::json::object cassandraConfig{ + {"database", + {{"type", "cassandra"}, + {"cassandra", + {{"contact_points", "127.0.0.1"}, + {"port", 9042}, + {"keyspace", keyspace.c_str()}, + {"replication_factor", 1}, + {"table_prefix", ""}, + {"max_requests_outstanding", 1000}, + {"indexer_key_shift", 2}, + {"threads", 8}}}}}}; + std::vector configs = {cassandraConfig}; + for (auto& config : configs) + { + auto backend = Backend::make_Backend(ioc, clio::Config{config}); + + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" + "BC54351E" + "DD73" + "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A6DB6" + "FE30" + "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" + "3E2232B3" + "3EF5" + "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" + "A29652EF" + "FD80" + "AC59CD91416E4E13DBBE"; + + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) + { + strBlob += c; + } + return strBlob; + }; + auto binaryStringToUint256 = + [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) + { + strBlob += c; + } + return strBlob; + }; + + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + ripple::LedgerInfo lgrInfo = + deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + + backend->startWrites(); + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfo.seq, + uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); + } + + { + auto retLgr = + backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ( + RPC::ledgerInfoToBlob(lgrInfo), + RPC::ledgerInfoToBlob(*retLgr)); + } + + EXPECT_FALSE( + backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) + .has_value()); + auto lgrInfoOld = lgrInfo; + + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = + ledgerInfoToBinaryString(lgrInfoNext); + + backend->startWrites(); + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence( + lgrInfoNext.seq - 1, yield); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoOld)); + EXPECT_NE( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence( + lgrInfoNext.seq - 2, yield); + EXPECT_FALSE( + backend + ->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) + .has_value()); + + auto txns = backend->fetchAllTransactionsInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + auto hashes = backend->fetchAllTransactionHashesInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } + + // the below dummy data is not expected to be consistent. The + // metadata string does represent valid metadata. Don't assume + // though that the transaction or its hash correspond to the + // metadata, or anything like that. These tests are purely + // binary tests to make sure the same data that goes in, comes + // back out + std::string metaHex = + "201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A2" + "9B46" + "3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A86" + "3300" + "000000000000003400000000000000005529983CBAED30F54747145292" + "1C3C" + "6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7" + "F830" + "F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217" + "CB65" + "D54B640B31521B05000000000000000000000000434E59000000000003" + "60E3" + "E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF9126341776" + "2570" + "D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B61" + "6DCB" + "6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC0" + "9600" + "F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF78368103" + "1100" + "0000000000000000000000434E59000000000004110360E3E0751BD9A5" + "66CD" + "03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F8" + "0B61" + "6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B" + "6686" + "0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05" + "ED5B" + "66860B9501110000000000000000000000000000000000000000021100" + "0000" + "0000000000000000000000000000000000031100000000000000000000" + "0000" + "434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82B" + "A0E1" + "E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9" + "D24B" + "D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B61" + "6DCB" + "6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A" + "6291" + "9C2D18000000000000000000000000434E5900000000000360E3E0751B" + "D9A5" + "66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220" + "CCB3" + "3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98" + "F588" + "C2D55C84718FAFBBD2604AE72200000000310000000000000000320000" + "0000" + "0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C" + "8471" + "8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1" + "E1E5" + "1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E" + "1E90" + "CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0" + "576A" + "CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E7" + "2200" + "000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF" + "9126" + "3417762570D67220CCB33B1370E1E1F1031000"; + std::string txnHex = + "1200072200000000240480FDB920190480FDB5201B03CE1A8964400000" + "033C" + "83A95F65D59D9A62919C2D18000000000000000000000000434E590000" + "0000" + "000360E3E0751BD9A566CD03FA6CAFC78118B82BA06840000000000000" + "0C73" + "21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91" + "D685" + "E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E" + "9911" + "FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC" + "60A0" + "9EED153BF60C992930D68F09F981142252F328CF91263417762570D672" + "20CC" + "B33B1370"; + std::string hashHex = + "0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22" + "D28BF7"; + + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" + "018EFFBE" + "17C5" + "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "07811422" + "52F3" + "28CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" + "004F06"; + + // An NFTokenMint tx + std::string nftTxnHex = + "1200192200000008240011CC9B201B001F71D6202A0000000168400000" + "000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E" + "4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F" + "C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA" + "34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F" + "677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16" + "DE3538F248662FC73C"; + + std::string nftTxnMeta = + "201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E" + "07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000" + "0006751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1" + "C200000028751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "9808B6B90000001D751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CA048C0A300000007751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F" + "2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21" + "D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470" + "733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203" + "F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868" + "747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800" + "006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A" + "751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A" + "000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00" + "00001F751868747470733A2F2F677265677765697362726F642E636F6D" + "E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4" + "5DAE00000014751868747470733A2F2F677265677765697362726F642E" + "636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7" + "3CCE1462A500000009751868747470733A2F2F67726567776569736272" + "6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248" + "662FC73CD89A24C70000002B751868747470733A2F2F67726567776569" + "7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35" + "38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567" + "7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000" + "0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1" + "E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C" + "662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000" + "07751868747470733A2F2F677265677765697362726F642E636F6DE1EC" + "5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5" + "00000029751868747470733A2F2F677265677765697362726F642E636F" + "6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE" + "EE87B80000001E751868747470733A2F2F677265677765697362726F64" + "2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F" + "C73CB30E8CAF00000013751868747470733A2F2F677265677765697362" + "726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2" + "48662FC73CB72E91A200000008751868747470733A2F2F677265677765" + "697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE" + "3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265" + "677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022" + "CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F" + "677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5" + "D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073" + "3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4" + "9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874" + "7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000" + "6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75" + "1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000" + "0020751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E" + "B100000015751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "E4FA33A40000000A751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F" + "2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71" + "B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435" + "40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94" + "883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3" + "ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000" + "066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866" + "2FC73CE1E1F1031000"; + std::string nftTxnHashHex = + "6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757" + "E9284E"; + ripple::uint256 nftID; + EXPECT_TRUE( + nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662" + "FC73CEF7FF5C60000002C")); + + std::string metaBlob = hexStringToBinaryString(metaHex); + std::string txnBlob = hexStringToBinaryString(txnHex); + std::string hashBlob = hexStringToBinaryString(hashHex); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = + hexStringToBinaryString(accountIndexHex); + std::vector affectedAccounts; + + std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex); + std::string nftTxnMetaBlob = + hexStringToBinaryString(nftTxnMeta); + + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = + lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + + ripple::uint256 hash256; + EXPECT_TRUE(hash256.parseHex(hashHex)); + ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob}; + auto journal = ripple::debugLog(); + auto accountsSet = txMeta.getAffectedAccounts(); + for (auto& a : accountsSet) + { + affectedAccounts.push_back(a); + } + std::vector accountTxData; + accountTxData.emplace_back(txMeta, hash256, journal); + + ripple::uint256 nftHash256; + EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex)); + ripple::TxMeta nftTxMeta{ + nftHash256, lgrInfoNext.seq, nftTxnMetaBlob}; + ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()}; + ripple::STTx sttx{it}; + auto const [parsedNFTTxsRef, parsedNFT] = + getNFTDataFromTx(nftTxMeta, sttx); + // need to copy the nft txns so we can std::move later + std::vector parsedNFTTxs; + parsedNFTTxs.insert( + parsedNFTTxs.end(), + parsedNFTTxsRef.begin(), + parsedNFTTxsRef.end()); + EXPECT_EQ(parsedNFTTxs.size(), 1); + EXPECT_TRUE(parsedNFT.has_value()); + EXPECT_EQ(parsedNFT->tokenID, nftID); + std::vector nftData; + nftData.push_back(*parsedNFT); + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeTransaction( + std::string{hashBlob}, + lgrInfoNext.seq, + lgrInfoNext.closeTime.time_since_epoch().count(), + std::string{txnBlob}, + std::string{metaBlob}); + backend->writeAccountTransactions(std::move(accountTxData)); + + // NFT writing not yet implemented for pg + if (config == cassandraConfig) + { + backend->writeNFTs(std::move(nftData)); + backend->writeNFTTransactions(std::move(parsedNFTTxs)); + } + else + { + EXPECT_THROW( + { backend->writeNFTs(std::move(nftData)); }, + std::runtime_error); + EXPECT_THROW( + { + backend->writeNFTTransactions( + std::move(parsedNFTTxs)); + }, + std::runtime_error); + } + + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{accountBlob}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfoNext.seq, + std::string{accountIndexBlob}); + backend->writeSuccessor( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 1); + EXPECT_STREQ( + (const char*)txns[0].transaction.data(), + (const char*)txnBlob.data()); + EXPECT_STREQ( + (const char*)txns[0].metadata.data(), + (const char*)metaBlob.data()); + auto hashes = backend->fetchAllTransactionHashesInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 1); + EXPECT_EQ(ripple::strHex(hashes[0]), hashHex); + for (auto& a : affectedAccounts) + { + auto [txns, cursor] = backend->fetchAccountTransactions( + a, 100, true, {}, yield); + EXPECT_EQ(txns.size(), 1); + EXPECT_EQ(txns[0], txns[0]); + EXPECT_FALSE(cursor); + } + + // NFT fetching not yet implemented for pg + if (config == cassandraConfig) + { + auto nft = + backend->fetchNFT(nftID, lgrInfoNext.seq, yield); + EXPECT_TRUE(nft.has_value()); + auto [nftTxns, cursor] = backend->fetchNFTTransactions( + nftID, 100, true, {}, yield); + EXPECT_EQ(nftTxns.size(), 1); + EXPECT_EQ(nftTxns[0], nftTxns[0]); + EXPECT_FALSE(cursor); + } + else + { + EXPECT_THROW( + { + backend->fetchNFT( + nftID, lgrInfoNext.seq, yield); + }, + std::runtime_error); + EXPECT_THROW( + { + backend->fetchNFTTransactions( + nftID, 100, true, {}, yield); + }, + std::runtime_error); + } + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + // obtain a time-based seed: + unsigned seed = + std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = + lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = + ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle( + accountBlob.begin(), + accountBlob.end(), + std::default_random_engine(seed)); + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = + lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = + ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfoNext.seq, + uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, + uint32_t ledgerSequence) { + std::vector> res{ + numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) + { + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; + } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) + { + obj = std::to_string(ledgerSequence) + obj; + } + return objs; + }; + auto generateTxns = [](size_t numTxns, + uint32_t ledgerSequence) { + std::vector< + std::tuple> + res{numTxns}; + ripple::uint256 base; + base = ledgerSequence * 100000; + for (auto& blob : res) + { + ++base; + std::string hashStr{ + (const char*)base.data(), base.size()}; + std::string txnStr = + "tx" + std::to_string(ledgerSequence) + hashStr; + std::string metaStr = + "meta" + std::to_string(ledgerSequence) + hashStr; + blob = std::make_tuple(hashStr, txnStr, metaStr); + } + return res; + }; + auto generateAccounts = [](uint32_t ledgerSequence, + uint32_t numAccounts) { + std::vector accounts; + ripple::AccountID base; + base = ledgerSequence * 998765; + for (size_t i = 0; i < numAccounts; ++i) + { + ++base; + accounts.push_back(base); + } + return accounts; + }; + auto generateAccountTx = [&](uint32_t ledgerSequence, + auto txns) { + std::vector ret; + auto accounts = generateAccounts(ledgerSequence, 10); + std::srand(std::time(nullptr)); + uint32_t idx = 0; + for (auto& [hash, txn, meta] : txns) + { + AccountTransactionsData data; + data.ledgerSequence = ledgerSequence; + data.transactionIndex = idx; + data.txHash = hash; + for (size_t i = 0; i < 3; ++i) + { + data.accounts.insert( + accounts[std::rand() % accounts.size()]); + } + ++idx; + ret.push_back(data); + } + return ret; + }; + + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle( + lgrInfo.txHash.begin(), + lgrInfo.txHash.end(), + randomEngine); + std::shuffle( + lgrInfo.accountHash.begin(), + lgrInfo.accountHash.end(), + randomEngine); + std::shuffle( + lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, + auto txns, + auto objs, + auto accountTx, + auto state) { + backend->startWrites(); + + backend->writeLedger( + lgrInfo, ledgerInfoToBinaryString(lgrInfo)); + for (auto [hash, txn, meta] : txns) + { + backend->writeTransaction( + std::move(hash), + lgrInfo.seq, + lgrInfo.closeTime.time_since_epoch().count(), + std::move(txn), + std::move(meta)); + } + for (auto [key, obj] : objs) + { + backend->writeLedgerObject( + std::string{key}, lgrInfo.seq, std::string{obj}); + } + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if( + state[lgrInfo.seq - 1].begin(), + state[lgrInfo.seq - 1].end(), + [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) + { + for (size_t i = 0; i < objs.size(); ++i) + { + if (i + 1 < objs.size()) + backend->writeSuccessor( + std::string{objs[i].first}, + lgrInfo.seq, + std::string{objs[i + 1].first}); + else + backend->writeSuccessor( + std::string{objs[i].first}, + lgrInfo.seq, + uint256ToString(Backend::lastKey)); + } + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{ + state[lgrInfo.seq - 1].back().first}, + lgrInfo.seq, + std::string{objs[0].first}); + else + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfo.seq, + std::string{objs[0].first}); + } + + backend->writeAccountTransactions(std::move(accountTx)); + + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; + + auto checkLedger = [&](auto lgrInfo, + auto txns, + auto objs, + auto accountTx) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfo)); + // retLgr = backend->fetchLedgerByHash(lgrInfo.hash); + // EXPECT_TRUE(retLgr); + // EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), + // RPC::ledgerInfoToBlob(lgrInfo)); + auto retTxns = + backend->fetchAllTransactionsInLedger(seq, yield); + for (auto [hash, txn, meta] : txns) + { + bool found = false; + for (auto [retTxn, retMeta, retSeq, retDate] : retTxns) + { + if (std::strncmp( + (const char*)retTxn.data(), + (const char*)txn.data(), + txn.size()) == 0 && + std::strncmp( + (const char*)retMeta.data(), + (const char*)meta.data(), + meta.size()) == 0) + found = true; + } + ASSERT_TRUE(found); + } + for (auto [account, data] : accountTx) + { + std::vector retData; + std::optional cursor; + do + { + uint32_t limit = 10; + auto [txns, retCursor] = + backend->fetchAccountTransactions( + account, limit, false, cursor, yield); + if (retCursor) + EXPECT_EQ(txns.size(), limit); + retData.insert( + retData.end(), txns.begin(), txns.end()); + cursor = retCursor; + } while (cursor); + EXPECT_EQ(retData.size(), data.size()); + for (size_t i = 0; i < retData.size(); ++i) + { + auto [txn, meta, seq, date] = retData[i]; + auto [hash, expTxn, expMeta] = data[i]; + EXPECT_STREQ( + (const char*)txn.data(), + (const char*)expTxn.data()); + EXPECT_STREQ( + (const char*)meta.data(), + (const char*)expMeta.data()); + } + } + std::vector keys; + for (auto [key, obj] : objs) + { + auto retObj = backend->fetchLedgerObject( + binaryStringToUint256(key), seq, yield); + if (obj.size()) + { + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ( + (const char*)obj.data(), + (const char*)retObj->data()); + } + else + { + ASSERT_FALSE(retObj.has_value()); + } + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = + backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) + { + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; + if (obj.size()) + { + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ( + (const char*)obj.data(), + (const char*)retObj.data()); + } + else + { + ASSERT_FALSE(retObj.size()); + } + } + } + + Backend::LedgerPage page; + std::vector retObjs; + size_t numLoops = 0; + do + { + uint32_t limit = 10; + page = backend->fetchLedgerPage( + page.cursor, seq, limit, false, yield); + // if (page.cursor) + // EXPECT_EQ(page.objects.size(), limit); + retObjs.insert( + retObjs.end(), + page.objects.begin(), + page.objects.end()); + ++numLoops; + } while (page.cursor); + + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) + { + if (ripple::strHex(obj.first) == + ripple::strHex(retObj.key)) + { + found = true; + ASSERT_EQ( + ripple::strHex(obj.second), + ripple::strHex(retObj.blob)); + } + } + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; + + std::map< + uint32_t, + std::vector>> + state; + std::map< + uint32_t, + std::vector< + std::tuple>> + allTxns; + std::unordered_map< + std::string, + std::pair> + allTxnsMap; + std::map< + uint32_t, + std::map>> + allAccountTx; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) + { + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{ + (const char*)rec.txHash.data(), + rec.txHash.size()}); + } + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); + } + } + + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) + { + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{ + (const char*)rec.txHash.data(), + rec.txHash.size()}); + } + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); + } + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; + for (auto [seq, diff] : state) + { + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } + } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; + + auto flattenAccountTx = [&](uint32_t max) { + std::unordered_map< + ripple::AccountID, + std::vector< + std::tuple>> + accountTx; + for (auto [seq, map] : allAccountTx) + { + if (seq > max) + break; + for (auto& [account, hashes] : map) + { + for (auto& hash : hashes) + { + auto& [txn, meta] = allTxnsMap[hash]; + accountTx[account].push_back( + std::make_tuple(hash, txn, meta)); + } + } + } + for (auto& [account, data] : accountTx) + std::reverse(data.begin(), data.end()); + return accountTx; + }; + + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger( + lgrInfos[seq], + allTxns[seq], + flat, + flattenAccountTx(seq)); + } + } + + done = true; + work.reset(); + }); + + ioc.run(); + EXPECT_EQ(done, true); +} + +TEST_F(BackendTest, cache) +{ + using namespace Backend; + boost::log::core::get()->set_filter( + clio::log_severity >= clio::Severity::WRN); + SimpleCache cache; + ASSERT_FALSE(cache.isFull()); + cache.setFull(); + + // Nothing in cache + { + ASSERT_TRUE(cache.isFull()); + ASSERT_EQ(cache.size(), 0); + ASSERT_FALSE(cache.get(ripple::uint256{12}, 0)); + ASSERT_FALSE(cache.getSuccessor(firstKey, 0)); + ASSERT_FALSE(cache.getPredecessor(lastKey, 0)); + } + + // insert + uint32_t curSeq = 1; + std::vector objs; + objs.push_back({}); + objs[0] = {ripple::uint256{42}, {0xCC}}; + cache.update(objs, curSeq); + { + auto& obj = objs[0]; + ASSERT_TRUE(cache.isFull()); + ASSERT_EQ(cache.size(), 1); + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.getSuccessor(obj.key, curSeq)); + ASSERT_FALSE(cache.getPredecessor(obj.key, curSeq)); + auto succ = cache.getSuccessor(firstKey, curSeq); + ASSERT_TRUE(succ); + ASSERT_EQ(*succ, obj); + auto pred = cache.getPredecessor(lastKey, curSeq); + ASSERT_TRUE(pred); + ASSERT_EQ(pred, obj); + } + // update + curSeq++; + objs[0].blob = {0x01}; + cache.update(objs, curSeq); + { + auto& obj = objs[0]; + ASSERT_EQ(cache.size(), 1); + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_TRUE(cache.isFull()); + ASSERT_FALSE(cache.getSuccessor(obj.key, curSeq)); + ASSERT_FALSE(cache.getPredecessor(obj.key, curSeq)); + auto succ = cache.getSuccessor(firstKey, curSeq); + ASSERT_TRUE(succ); + ASSERT_EQ(*succ, obj); + auto pred = cache.getPredecessor(lastKey, curSeq); + ASSERT_TRUE(pred); + ASSERT_EQ(*pred, obj); + } + // empty update + curSeq++; + cache.update({}, curSeq); + { + auto& obj = objs[0]; + ASSERT_EQ(cache.size(), 1); + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_TRUE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 2)); + ASSERT_EQ(*cache.get(obj.key, curSeq - 1), obj.blob); + ASSERT_FALSE(cache.getSuccessor(obj.key, curSeq)); + ASSERT_FALSE(cache.getPredecessor(obj.key, curSeq)); + auto succ = cache.getSuccessor(firstKey, curSeq); + ASSERT_TRUE(succ); + ASSERT_EQ(*succ, obj); + auto pred = cache.getPredecessor(lastKey, curSeq); + ASSERT_TRUE(pred); + ASSERT_EQ(*pred, obj); + } + // delete + curSeq++; + objs[0].blob = {}; + cache.update(objs, curSeq); + { + auto& obj = objs[0]; + ASSERT_EQ(cache.size(), 0); + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_TRUE(cache.isFull()); + ASSERT_FALSE(cache.getSuccessor(obj.key, curSeq)); + ASSERT_FALSE(cache.getPredecessor(obj.key, curSeq)); + ASSERT_FALSE(cache.getSuccessor(firstKey, curSeq)); + ASSERT_FALSE(cache.getPredecessor(lastKey, curSeq)); + } + // random non-existent object + { + ASSERT_FALSE(cache.get(ripple::uint256{23}, curSeq)); + } + + // insert several objects + curSeq++; + objs.resize(10); + for (size_t i = 0; i < objs.size(); ++i) + { + objs[i] = { + ripple::uint256{i * 100 + 1}, + {(unsigned char)i, (unsigned char)i * 2, (unsigned char)i + 1}}; + } + cache.update(objs, curSeq); + { + ASSERT_EQ(cache.size(), 10); + for (auto& obj : objs) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + } + + std::optional succ = {{firstKey, {}}}; + size_t idx = 0; + while ((succ = cache.getSuccessor(succ->key, curSeq))) + { + ASSERT_EQ(*succ, objs[idx++]); + } + ASSERT_EQ(idx, objs.size()); + } + + // insert several more objects + curSeq++; + auto objs2 = objs; + for (size_t i = 0; i < objs.size(); ++i) + { + objs2[i] = { + ripple::uint256{i * 100 + 50}, + {(unsigned char)i, (unsigned char)i * 3, (unsigned char)i + 5}}; + } + cache.update(objs2, curSeq); + { + ASSERT_EQ(cache.size(), 20); + for (auto& obj : objs) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, curSeq - 1); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq - 2)); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq + 1)); + } + std::optional succ = {{firstKey, {}}}; + size_t idx = 0; + while ((succ = cache.getSuccessor(succ->key, curSeq))) + { + if (idx % 2 == 0) + ASSERT_EQ(*succ, objs[(idx++) / 2]); + else + ASSERT_EQ(*succ, objs2[(idx++) / 2]); + } + ASSERT_EQ(idx, objs.size() + objs2.size()); + } + + // mix of inserts, updates and deletes + curSeq++; + for (size_t i = 0; i < objs.size(); ++i) + { + if (i % 2 == 0) + objs[i].blob = {}; + else if (i % 2 == 1) + std::reverse(objs[i].blob.begin(), objs[i].blob.end()); + } + cache.update(objs, curSeq); + { + ASSERT_EQ(cache.size(), 15); + for (size_t i = 0; i < objs.size(); ++i) + { + auto& obj = objs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + if (i % 2 == 0) + { + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 2)); + } + else + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq - 1)); + ASSERT_FALSE(cache.get(obj.key, curSeq - 2)); + } + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, curSeq - 1); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, curSeq - 2)); + } + + auto allObjs = objs; + allObjs.clear(); + std::copy_if( + objs.begin(), + objs.end(), + std::back_inserter(allObjs), + [](auto obj) { return obj.blob.size() > 0; }); + std::copy(objs2.begin(), objs2.end(), std::back_inserter(allObjs)); + std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { + return a.key < b.key; + }); + std::optional succ = {{firstKey, {}}}; + size_t idx = 0; + while ((succ = cache.getSuccessor(succ->key, curSeq))) + { + ASSERT_EQ(*succ, allObjs[idx++]); + } + ASSERT_EQ(idx, allObjs.size()); + } +} + +TEST_F(BackendTest, cacheBackground) +{ + using namespace Backend; + boost::log::core::get()->set_filter( + clio::log_severity >= clio::Severity::WRN); + SimpleCache cache; + ASSERT_FALSE(cache.isFull()); + ASSERT_EQ(cache.size(), 0); + + uint32_t startSeq = 10; + uint32_t curSeq = startSeq; + + std::vector bObjs; + bObjs.resize(100); + for (size_t i = 0; i < bObjs.size(); ++i) + { + bObjs[i].key = ripple::uint256{i * 3 + 1}; + bObjs[i].blob = {(unsigned char)i + 1}; + } + { + auto objs = bObjs; + objs.clear(); + std::copy(bObjs.begin(), bObjs.begin() + 10, std::back_inserter(objs)); + cache.update(objs, startSeq); + ASSERT_EQ(cache.size(), 10); + ASSERT_FALSE(cache.isFull()); + for (auto& obj : objs) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + } + // some updates + curSeq++; + std::vector objs1; + for (size_t i = 0; i < bObjs.size(); ++i) + { + if (i % 5 == 0) + objs1.push_back(bObjs[i]); + } + for (auto& obj : objs1) + { + std::reverse(obj.blob.begin(), obj.blob.end()); + } + cache.update(objs1, curSeq); + + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < 10; i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + if (newObj == objs1.end()) + { + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + + { + auto objs = bObjs; + objs.clear(); + std::copy( + bObjs.begin() + 10, bObjs.begin() + 20, std::back_inserter(objs)); + cache.update(objs, startSeq, true); + } + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < 20; i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + if (newObj == objs1.end()) + { + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + + // some inserts + curSeq++; + auto objs2 = objs1; + objs2.clear(); + for (size_t i = 0; i < bObjs.size(); ++i) + { + if (i % 7 == 0) + { + auto obj = bObjs[i]; + obj.key = ripple::uint256{(i + 1) * 1000}; + obj.blob = {(unsigned char)(i + 1) * 100}; + objs2.push_back(obj); + } + } + cache.update(objs2, curSeq); + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < 20; i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + if (newObj == objs1.end()) + { + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + + { + auto objs = bObjs; + objs.clear(); + std::copy( + bObjs.begin() + 20, bObjs.begin() + 30, std::back_inserter(objs)); + cache.update(objs, startSeq, true); + } + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < 30; i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + if (newObj == objs1.end()) + { + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + + // some deletes + curSeq++; + auto objs3 = objs1; + objs3.clear(); + for (size_t i = 0; i < bObjs.size(); ++i) + { + if (i % 6 == 0) + { + auto obj = bObjs[i]; + obj.blob = {}; + objs3.push_back(obj); + } + } + cache.update(objs3, curSeq); + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { + return o.key == obj.key; + }) == objs3.end()) + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + else + { + ASSERT_FALSE(cacheObj); + } + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (auto& obj : objs3) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < 30; i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { + return o.key == obj.key; + }); + if (delObj != objs3.end()) + { + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + else if (newObj == objs1.end()) + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + { + auto objs = bObjs; + objs.clear(); + std::copy(bObjs.begin() + 30, bObjs.end(), std::back_inserter(objs)); + cache.update(objs, startSeq, true); + } + { + for (auto& obj : objs1) + { + auto cacheObj = cache.get(obj.key, curSeq); + if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { + return o.key == obj.key; + }) == objs3.end()) + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + else + { + ASSERT_FALSE(cacheObj); + } + } + for (auto& obj : objs2) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (auto& obj : objs3) + { + auto cacheObj = cache.get(obj.key, curSeq); + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + for (size_t i = 0; i < bObjs.size(); i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { + return o.key == obj.key; + }); + if (delObj != objs3.end()) + { + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + else if (newObj == objs1.end()) + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + } + else + { + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + } + cache.setFull(); + auto allObjs = bObjs; + allObjs.clear(); + for (size_t i = 0; i < bObjs.size(); i++) + { + auto& obj = bObjs[i]; + auto cacheObj = cache.get(obj.key, curSeq); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { + return o.key == obj.key; + }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { + return o.key == obj.key; + }); + if (delObj != objs3.end()) + { + ASSERT_FALSE(cacheObj); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + else if (newObj == objs1.end()) + { + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + cacheObj = cache.get(obj.key, startSeq); + ASSERT_TRUE(cacheObj); + ASSERT_EQ(*cacheObj, obj.blob); + allObjs.push_back(obj); + } + else + { + allObjs.push_back(*newObj); + ASSERT_EQ(*cacheObj, newObj->blob); + ASSERT_FALSE(cache.get(obj.key, startSeq)); + } + } + for (auto& obj : objs2) + { + allObjs.push_back(obj); + } + std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { + return a.key < b.key; + }); + std::optional succ = {{firstKey, {}}}; + size_t idx = 0; + while ((succ = cache.getSuccessor(succ->key, curSeq))) + { + ASSERT_EQ(*succ, allObjs[idx++]); + } + ASSERT_EQ(idx, allObjs.size()); +} + +TEST_F(BackendTest, cacheIntegration) +{ + boost::asio::io_context ioc; + std::optional work; + work.emplace(ioc); + std::atomic_bool done = false; + + boost::asio::spawn( + ioc, [&ioc, &done, &work](boost::asio::yield_context yield) { + boost::log::core::get()->set_filter( + clio::log_severity >= clio::Severity::WRN); + std::string keyspace = "clio_test_" + + std::to_string(std::chrono::system_clock::now() + .time_since_epoch() + .count()); + boost::json::object cassandraConfig{ + {"database", + {{"type", "cassandra"}, + {"cassandra", + {{"contact_points", "127.0.0.1"}, + {"port", 9042}, + {"keyspace", keyspace.c_str()}, + {"replication_factor", 1}, + {"table_prefix", ""}, + {"max_requests_outstanding", 1000}, + {"indexer_key_shift", 2}, + {"threads", 8}}}}}}; + std::vector configs = {cassandraConfig}; + for (auto& config : configs) + { + auto backend = Backend::make_Backend(ioc, clio::Config{config}); + backend->cache().setFull(); + + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" + "BC54351E" + "DD73" + "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A6DB6" + "FE30" + "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" + "3E2232B3" + "3EF5" + "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" + "A29652EF" + "FD80" + "AC59CD91416E4E13DBBE"; + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" + "018EFFBE" + "17C5" + "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "07811422" + "52F3" + "28CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" + "004F06"; + + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) + { + strBlob += c; + } + return strBlob; + }; + auto binaryStringToUint256 = + [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) + { + strBlob += c; + } + return strBlob; + }; + + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = + hexStringToBinaryString(accountIndexHex); + ripple::LedgerInfo lgrInfo = + deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + + backend->startWrites(); + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfo.seq, + uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); + } + + { + auto retLgr = + backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ( + RPC::ledgerInfoToBlob(lgrInfo), + RPC::ledgerInfoToBlob(*retLgr)); + } + + EXPECT_FALSE( + backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) + .has_value()); + auto lgrInfoOld = lgrInfo; + + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = + ledgerInfoToBinaryString(lgrInfoNext); + + backend->startWrites(); + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence( + lgrInfoNext.seq - 1, yield); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoOld)); + + EXPECT_NE( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence( + lgrInfoNext.seq - 2, yield); + EXPECT_FALSE( + backend + ->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) + .has_value()); + + auto txns = backend->fetchAllTransactionsInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + auto hashes = backend->fetchAllTransactionHashesInLedger( + lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } + + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = + lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{accountBlob}); + auto key = + ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update( + {{*key, {accountBlob.begin(), accountBlob.end()}}}, + lgrInfoNext.seq); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfoNext.seq, + std::string{accountIndexBlob}); + backend->writeSuccessor( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfoNext)); + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + // obtain a time-based seed: + unsigned seed = + std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = + lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = + ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle( + accountBlob.begin(), + accountBlob.end(), + std::default_random_engine(seed)); + auto key = + ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update( + {{*key, {accountBlob.begin(), accountBlob.end()}}}, + lgrInfoNext.seq); + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = + lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = + ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger( + lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + auto key = + ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {}}}, lgrInfoNext.seq); + backend->writeLedgerObject( + std::string{accountIndexBlob}, + lgrInfoNext.seq, + std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfoNext.seq, + uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = + backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject( + key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ( + (const char*)obj->data(), + (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject( + key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, + uint32_t ledgerSequence) { + std::vector> res{ + numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) + { + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; + } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) + { + obj = std::to_string(ledgerSequence) + obj; + } + return objs; + }; + + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle( + lgrInfo.txHash.begin(), + lgrInfo.txHash.end(), + randomEngine); + std::shuffle( + lgrInfo.accountHash.begin(), + lgrInfo.accountHash.end(), + randomEngine); + std::shuffle( + lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { + backend->startWrites(); + + backend->writeLedger( + lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo))); + std::vector cacheUpdates; + for (auto [key, obj] : objs) + { + backend->writeLedgerObject( + std::string{key}, lgrInfo.seq, std::string{obj}); + auto key256 = ripple::uint256::fromVoidChecked(key); + cacheUpdates.push_back( + {*key256, {obj.begin(), obj.end()}}); + } + backend->cache().update(cacheUpdates, lgrInfo.seq); + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if( + state[lgrInfo.seq - 1].begin(), + state[lgrInfo.seq - 1].end(), + [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) + { + for (size_t i = 0; i < objs.size(); ++i) + { + if (i + 1 < objs.size()) + backend->writeSuccessor( + std::string{objs[i].first}, + lgrInfo.seq, + std::string{objs[i + 1].first}); + else + backend->writeSuccessor( + std::string{objs[i].first}, + lgrInfo.seq, + uint256ToString(Backend::lastKey)); + } + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{ + state[lgrInfo.seq - 1].back().first}, + lgrInfo.seq, + std::string{objs[0].first}); + else + backend->writeSuccessor( + uint256ToString(Backend::firstKey), + lgrInfo.seq, + std::string{objs[0].first}); + } + + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; + + auto checkLedger = [&](auto lgrInfo, auto objs) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfo)); + retLgr = backend->fetchLedgerByHash(lgrInfo.hash, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ( + RPC::ledgerInfoToBlob(*retLgr), + RPC::ledgerInfoToBlob(lgrInfo)) + << "retLgr seq:" << retLgr->seq + << "; lgrInfo seq:" << lgrInfo.seq + << "; retLgr hash:" << retLgr->hash + << "; lgrInfo hash:" << lgrInfo.hash + << "; retLgr parentHash:" << retLgr->parentHash + << "; lgr Info parentHash:" << lgrInfo.parentHash; + + std::vector keys; + for (auto [key, obj] : objs) + { + auto retObj = backend->fetchLedgerObject( + binaryStringToUint256(key), seq, yield); + if (obj.size()) + { + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ( + (const char*)obj.data(), + (const char*)retObj->data()); + } + else + { + ASSERT_FALSE(retObj.has_value()); + } + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = + backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) + { + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; + if (obj.size()) + { + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ( + (const char*)obj.data(), + (const char*)retObj.data()); + } + else + { + ASSERT_FALSE(retObj.size()); + } + } + } + Backend::LedgerPage page; + std::vector retObjs; + size_t numLoops = 0; + do + { + uint32_t limit = 10; + page = backend->fetchLedgerPage( + page.cursor, seq, limit, false, yield); + // if (page.cursor) + // EXPECT_EQ(page.objects.size(), limit); + retObjs.insert( + retObjs.end(), + page.objects.begin(), + page.objects.end()); + ++numLoops; + } while (page.cursor); + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) + { + if (ripple::strHex(obj.first) == + ripple::strHex(retObj.key)) + { + found = true; + ASSERT_EQ( + ripple::strHex(obj.second), + ripple::strHex(retObj.blob)); + } + } + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; + + std::map< + uint32_t, + std::vector>> + state; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; + for (auto [seq, diff] : state) + { + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } + } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; + + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger(lgrInfos[seq], flat); + } + } + + done = true; + work.reset(); + }); + + ioc.run(); +} diff --git a/unittests/Config.cpp b/unittests/Config.cpp new file mode 100644 index 00000000..0c5804f2 --- /dev/null +++ b/unittests/Config.cpp @@ -0,0 +1,251 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include + +#include +#include +#include +#include + +using namespace clio; +using namespace boost::log; +using namespace std; +namespace json = boost::json; + +constexpr static auto JSONData = R"JSON( + { + "arr": [ + { "first": 1234 }, + { "second": true }, + { "inner_section": [{ "inner": "works" }] }, + ["127.0.0.1", "192.168.0.255"] + ], + "section": { + "test": { + "str": "hello", + "int": 9042, + "bool": true + } + }, + "top": 420 + } +)JSON"; + +class ConfigTest : public NoLoggerFixture +{ +protected: + Config cfg{json::parse(JSONData)}; +}; + +TEST_F(ConfigTest, SanityCheck) +{ + // throw on wrong key format etc.: + ASSERT_ANY_THROW((void)cfg.value("")); + ASSERT_ANY_THROW((void)cfg.value("a.")); + ASSERT_ANY_THROW((void)cfg.value(".a")); + ASSERT_ANY_THROW((void)cfg.valueOr("", false)); + ASSERT_ANY_THROW((void)cfg.valueOr("a.", false)); + ASSERT_ANY_THROW((void)cfg.valueOr(".a", false)); + ASSERT_ANY_THROW((void)cfg.maybeValue("")); + ASSERT_ANY_THROW((void)cfg.maybeValue("a.")); + ASSERT_ANY_THROW((void)cfg.maybeValue(".a")); + ASSERT_ANY_THROW((void)cfg.valueOrThrow("", "custom")); + ASSERT_ANY_THROW((void)cfg.valueOrThrow("a.", "custom")); + ASSERT_ANY_THROW((void)cfg.valueOrThrow(".a", "custom")); + ASSERT_ANY_THROW((void)cfg.contains("")); + ASSERT_ANY_THROW((void)cfg.contains("a.")); + ASSERT_ANY_THROW((void)cfg.contains(".a")); + ASSERT_ANY_THROW((void)cfg.section("")); + ASSERT_ANY_THROW((void)cfg.section("a.")); + ASSERT_ANY_THROW((void)cfg.section(".a")); + + // valid path, value does not exists -> optional functions should not throw + ASSERT_ANY_THROW((void)cfg.value("b")); + ASSERT_EQ(cfg.valueOr("b", false), false); + ASSERT_EQ(cfg.maybeValue("b"), std::nullopt); + ASSERT_ANY_THROW((void)cfg.valueOrThrow("b", "custom")); +} + +TEST_F(ConfigTest, Access) +{ + ASSERT_EQ(cfg.value("top"), 420); + ASSERT_EQ(cfg.value("section.test.str"), "hello"); + ASSERT_EQ(cfg.value("section.test.int"), 9042); + ASSERT_EQ(cfg.value("section.test.bool"), true); + + ASSERT_ANY_THROW((void)cfg.value( + "section.test.bool")); // wrong type requested + ASSERT_ANY_THROW((void)cfg.value("section.doesnotexist")); + + ASSERT_EQ(cfg.valueOr("section.test.str", "fallback"), "hello"); + ASSERT_EQ( + cfg.valueOr("section.test.nonexistent", "fallback"), + "fallback"); + ASSERT_EQ(cfg.valueOr("section.test.bool", false), true); + + ASSERT_ANY_THROW( + (void)cfg.valueOr("section.test.bool", 1234)); // wrong type requested +} + +TEST_F(ConfigTest, ErrorHandling) +{ + try + { + (void)cfg.valueOrThrow("section.test.int", "msg"); + ASSERT_FALSE(true); // should not get here + } + catch (std::runtime_error const& e) + { + ASSERT_STREQ(e.what(), "msg"); + } + + ASSERT_EQ(cfg.valueOrThrow("section.test.bool", ""), true); + + auto arr = cfg.array("arr"); + try + { + (void)arr[3].array()[1].valueOrThrow("msg"); // wrong type + ASSERT_FALSE(true); // should not get here + } + catch (std::runtime_error const& e) + { + ASSERT_STREQ(e.what(), "msg"); + } + + ASSERT_EQ(arr[3].array()[1].valueOrThrow(""), "192.168.0.255"); + + try + { + (void)cfg.arrayOrThrow("nonexisting.key", "msg"); + ASSERT_FALSE(true); // should not get here + } + catch (std::runtime_error const& e) + { + ASSERT_STREQ(e.what(), "msg"); + } + + ASSERT_EQ(cfg.arrayOrThrow("arr", "")[0].value("first"), 1234); +} + +TEST_F(ConfigTest, Section) +{ + auto sub = cfg.section("section.test"); + + ASSERT_EQ(sub.value("str"), "hello"); + ASSERT_EQ(sub.value("int"), 9042); + ASSERT_EQ(sub.value("bool"), true); +} + +TEST_F(ConfigTest, Array) +{ + auto arr = cfg.array("arr"); + + ASSERT_EQ(arr.size(), 4); + ASSERT_EQ(arr[0].value("first"), 1234); + + // check twice to verify that previous array(key) access did not destroy the + // store by using move + ASSERT_EQ(arr[2].array("inner_section")[0].value("inner"), "works"); + ASSERT_EQ(arr[2].array("inner_section")[0].value("inner"), "works"); + + ASSERT_EQ(arr[3].array()[1].value(), "192.168.0.255"); + + vector exp{"192.168.0.255", "127.0.0.1"}; + for (auto inner = arr[3].array(); auto const& el : inner) + { + ASSERT_EQ(el.value(), exp.back()); + exp.pop_back(); + } + + ASSERT_TRUE(exp.empty()); +} + +/** + * @brief Simple custom data type with json parsing support + */ +struct Custom +{ + string a; + int b; + bool c; + + friend Custom + tag_invoke(json::value_to_tag, json::value const& value) + { + assert(value.is_object()); + auto const& obj = value.as_object(); + return { + obj.at("str").as_string().c_str(), + obj.at("int").as_int64(), + obj.at("bool").as_bool()}; + } +}; + +TEST_F(ConfigTest, Extend) +{ + auto custom = cfg.value("section.test"); + + ASSERT_EQ(custom.a, "hello"); + ASSERT_EQ(custom.b, 9042); + ASSERT_EQ(custom.c, true); +} + +/** + * @brief Simple temporary file util + */ +class TmpFile +{ +public: + TmpFile(std::string const& data) + : tmpPath_{boost::filesystem::unique_path().string()} + { + std::ofstream of; + of.open(tmpPath_); + of << data; + of.close(); + } + ~TmpFile() + { + std::remove(tmpPath_.c_str()); + } + std::string + path() const + { + return tmpPath_; + } + +private: + std::string tmpPath_; +}; + +TEST_F(ConfigTest, File) +{ + auto tmp = TmpFile(JSONData); + auto conf = ConfigReader::open(tmp.path()); + + ASSERT_EQ(conf.value("top"), 420); + + auto doesntexist = ConfigReader::open("nope"); + ASSERT_EQ(doesntexist.valueOr("found", false), false); +} diff --git a/unittests/DOSGuard.cpp b/unittests/DOSGuard.cpp new file mode 100644 index 00000000..3a1a626c --- /dev/null +++ b/unittests/DOSGuard.cpp @@ -0,0 +1,177 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +#include +#include + +using namespace testing; +using namespace clio; +using namespace std; +namespace json = boost::json; + +namespace { +constexpr static auto JSONData = R"JSON( + { + "dos_guard": { + "max_fetches": 100, + "sweep_interval": 1, + "max_connections": 2, + "max_requests": 3, + "whitelist": ["127.0.0.1"] + } + } +)JSON"; + +constexpr static auto JSONData2 = R"JSON( + { + "dos_guard": { + "max_fetches": 100, + "sweep_interval": 0.1, + "max_connections": 2, + "whitelist": ["127.0.0.1"] + } + } +)JSON"; + +constexpr static auto IP = "127.0.0.2"; + +class FakeSweepHandler +{ +private: + using guard_type = BasicDOSGuard; + guard_type* dosGuard_; + +public: + void + setup(guard_type* guard) + { + dosGuard_ = guard; + } + + void + sweep() + { + dosGuard_->clear(); + } +}; +}; // namespace + +class DOSGuardTest : public NoLoggerFixture +{ +protected: + Config cfg{json::parse(JSONData)}; + FakeSweepHandler sweepHandler; + BasicDOSGuard guard{cfg, sweepHandler}; +}; + +TEST_F(DOSGuardTest, Whitelisting) +{ + EXPECT_TRUE(guard.isWhiteListed("127.0.0.1")); + EXPECT_FALSE(guard.isWhiteListed(IP)); +} + +TEST_F(DOSGuardTest, ConnectionCount) +{ + EXPECT_TRUE(guard.isOk(IP)); + guard.increment(IP); // one connection + EXPECT_TRUE(guard.isOk(IP)); + guard.increment(IP); // two connections + EXPECT_TRUE(guard.isOk(IP)); + guard.increment(IP); // > two connections, can't connect more + EXPECT_FALSE(guard.isOk(IP)); + + guard.decrement(IP); + EXPECT_TRUE(guard.isOk(IP)); // can connect again +} + +TEST_F(DOSGuardTest, FetchCount) +{ + EXPECT_TRUE(guard.add(IP, 50)); // half of allowence + EXPECT_TRUE(guard.add(IP, 50)); // now fully charged + EXPECT_FALSE(guard.add(IP, 1)); // can't add even 1 anymore + EXPECT_FALSE(guard.isOk(IP)); + + guard.clear(); // force clear the above fetch count + EXPECT_TRUE(guard.isOk(IP)); // can fetch again +} + +TEST_F(DOSGuardTest, ClearFetchCountOnTimer) +{ + EXPECT_TRUE(guard.add(IP, 50)); // half of allowence + EXPECT_TRUE(guard.add(IP, 50)); // now fully charged + EXPECT_FALSE(guard.add(IP, 1)); // can't add even 1 anymore + EXPECT_FALSE(guard.isOk(IP)); + + sweepHandler.sweep(); // pretend sweep called from timer + EXPECT_TRUE(guard.isOk(IP)); // can fetch again +} + +TEST_F(DOSGuardTest, RequestLimit) +{ + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.isOk(IP)); + EXPECT_FALSE(guard.request(IP)); + EXPECT_FALSE(guard.isOk(IP)); + guard.clear(); + EXPECT_TRUE(guard.isOk(IP)); // can request again +} + +TEST_F(DOSGuardTest, RequestLimitOnTimer) +{ + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.request(IP)); + EXPECT_TRUE(guard.isOk(IP)); + EXPECT_FALSE(guard.request(IP)); + EXPECT_FALSE(guard.isOk(IP)); + sweepHandler.sweep(); + EXPECT_TRUE(guard.isOk(IP)); // can request again +} + +template +struct BasicDOSGuardMock : public BaseDOSGuard +{ + BasicDOSGuardMock(SweepHandler& handler) + { + handler.setup(this); + } + + MOCK_METHOD(void, clear, (), (noexcept, override)); +}; + +class DOSGuardIntervalSweepHandlerTest : public SyncAsioContextTest +{ +protected: + Config cfg{json::parse(JSONData2)}; + IntervalSweepHandler sweepHandler{cfg, ctx}; + BasicDOSGuardMock guard{sweepHandler}; +}; + +TEST_F(DOSGuardIntervalSweepHandlerTest, SweepAfterInterval) +{ + EXPECT_CALL(guard, clear()).Times(AtLeast(2)); + ctx.run_for(std::chrono::milliseconds(300)); +} diff --git a/unittests/Logger.cpp b/unittests/Logger.cpp new file mode 100644 index 00000000..e976551d --- /dev/null +++ b/unittests/Logger.cpp @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +using namespace clio; + +// Used as a fixture for tests with enabled logging +class LoggerTest : public LoggerFixture +{ +}; + +// Used as a fixture for tests with disabled logging +class NoLoggerTest : public NoLoggerFixture +{ +}; + +TEST_F(LoggerTest, Basic) +{ + Logger log{"General"}; + log.info() << "Info line logged"; + checkEqual("General:NFO Info line logged"); + + LogService::debug() << "Debug line with numbers " << 12345; + checkEqual("General:DBG Debug line with numbers 12345"); + + LogService::warn() << "Warning is logged"; + checkEqual("General:WRN Warning is logged"); +} + +TEST_F(LoggerTest, Filtering) +{ + Logger log{"General"}; + log.trace() << "Should not be logged"; + checkEmpty(); + + log.warn() << "Warning is logged"; + checkEqual("General:WRN Warning is logged"); + + Logger tlog{"Trace"}; + tlog.trace() << "Trace line logged for 'Trace' component"; + checkEqual("Trace:TRC Trace line logged for 'Trace' component"); +} + +TEST_F(NoLoggerTest, Basic) +{ + Logger log{"Trace"}; + log.trace() << "Nothing"; + checkEmpty(); + + LogService::fatal() << "Still nothing"; + checkEmpty(); +} diff --git a/unittests/Playground.cpp b/unittests/Playground.cpp new file mode 100644 index 00000000..99d5be89 --- /dev/null +++ b/unittests/Playground.cpp @@ -0,0 +1,31 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +using namespace clio; + +/* + * Use this file for temporary tests and implementations. + * Note: Please don't push your temporary work to the repo. + */ + +// TEST(PlaygroundTest, MyTest) +// { +// } diff --git a/unittests/ProfilerTest.cpp b/unittests/ProfilerTest.cpp new file mode 100644 index 00000000..b9cefd30 --- /dev/null +++ b/unittests/ProfilerTest.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +using namespace util; +TEST(TimedTest, HasReturnValue) +{ + auto [ret, time] = timed([]() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return 8; + }); + + ASSERT_EQ(ret, 8); + ASSERT_NE(time, 0); +} + +TEST(TimedTest, ReturnVoid) +{ + auto time = timed( + []() { std::this_thread::sleep_for(std::chrono::milliseconds(5)); }); + + ASSERT_NE(time, 0); +} + +struct FunctorTest +{ + void + operator()() const + { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } +}; + +TEST(TimedTest, Functor) +{ + auto time = timed(FunctorTest()); + + ASSERT_NE(time, 0); +} + +TEST(TimedTest, MovedLambda) +{ + auto f = []() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return 8; + }; + auto [ret, time] = timed(std::move(f)); + + ASSERT_EQ(ret, 8); + ASSERT_NE(time, 0); +} + +TEST(TimedTest, ChangeToNs) +{ + auto f = []() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return 8; + }; + auto [ret, time] = timed(std::move(f)); + ASSERT_EQ(ret, 8); + ASSERT_GE(time, 5 * 1000000); +} + +TEST(TimedTest, NestedLambda) +{ + double timeNested; + auto f = [&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + timeNested = timed([]() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + }); + return 8; + }; + auto [ret, time] = timed(std::move(f)); + ASSERT_EQ(ret, 8); + ASSERT_GE(timeNested, 5); + ASSERT_GE(time, 10 * 1000000); +} + +TEST(TimedTest, FloatSec) +{ + auto f = []() { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return 8; + }; + auto [ret, time] = timed>(std::move(f)); + ASSERT_EQ(ret, 8); + ASSERT_GE(time, 0); +} diff --git a/unittests/README.md b/unittests/README.md new file mode 100644 index 00000000..e67495dc --- /dev/null +++ b/unittests/README.md @@ -0,0 +1,25 @@ +# Unit Testing +The correctness of new implementations can be verified via running unit tests. Below are the information on how to run unit tests. +## Requirements +### 1. Cassandra cluster +Have access to a **local (127.0.0.1)** Cassandra cluster, opened at port **9042**. Please ensure that the cluster is successfully running before running Unit Tests. +## Running +To run the unit tests, first build Clio as normal, then execute `./clio_tests` to run the unit tests. + +## Tests +Below is a list of currently available unit tests. Please keep in mind that this list should be constantly updated with new unit tests as new features are added to the project. + +- BackendTest.basic +- Backend.cache +- Backend.cacheBackground +- Backend.cacheIntegration + +# Adding Unit Tests +To add unit tests, append a new test block in the unittests/main.cpp file with the following format: + +```cpp +TEST(module_name, test_name) +{ + // Test code goes here +} +``` diff --git a/unittests/SubscriptionManagerTest.cpp b/unittests/SubscriptionManagerTest.cpp new file mode 100644 index 00000000..b65c24a1 --- /dev/null +++ b/unittests/SubscriptionManagerTest.cpp @@ -0,0 +1,880 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +using namespace std::chrono_literals; +namespace json = boost::json; +using namespace Backend; +using ::testing::Return; + +// common const +constexpr static auto CURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000"; +constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; +constexpr static auto ACCOUNT1 = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto LEDGERHASH = + "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH2 = + "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto TXNID = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; + +/* + * test subscription factory method and report function + */ +TEST(SubscriptionManagerTest, InitAndReport) +{ + constexpr static auto ReportReturn = R"({ + "ledger":0, + "transactions":0, + "transactions_proposed":0, + "manifests":0, + "validations":0, + "account":0, + "accounts_proposed":0, + "books":0, + "book_changes":0 + })"; + clio::Config cfg; + auto backend = std::make_shared(cfg); + auto subManager = + SubscriptionManager::make_SubscriptionManager(cfg, backend); + EXPECT_EQ(subManager->report(), json::parse(ReportReturn)); +} + +void +CheckSubscriberMessage( + std::string out, + std::shared_ptr session, + int retry = 10) +{ + auto sessionPtr = static_cast(session.get()); + while (retry-- != 0) + { + std::this_thread::sleep_for(20ms); + if ((!sessionPtr->message.empty()) && + json::parse(sessionPtr->message) == json::parse(out)) + { + return; + } + } + EXPECT_TRUE(false) << "Could not wait the subscriber message, expect:" + << out << " Get:" << sessionPtr->message; +} + +// Fixture contains test target and mock backend +class SubscriptionManagerSimpleBackendTest : public MockBackendTest +{ +protected: + clio::Config cfg; + std::shared_ptr subManagerPtr; + util::TagDecoratorFactory tagDecoratorFactory{cfg}; + std::shared_ptr session; + void + SetUp() override + { + MockBackendTest::SetUp(); + subManagerPtr = + SubscriptionManager::make_SubscriptionManager(cfg, mockBackendPtr); + session = std::make_shared(tagDecoratorFactory); + } + void + TearDown() override + { + MockBackendTest::TearDown(); + subManagerPtr.reset(); + } +}; + +/* + * test report function and unsub functions + */ +TEST_F(SubscriptionManagerSimpleBackendTest, ReportCurrentSubscriber) +{ + constexpr static auto ReportReturn = R"({ + "ledger":0, + "transactions":2, + "transactions_proposed":2, + "manifests":2, + "validations":2, + "account":2, + "accounts_proposed":2, + "books":2, + "book_changes":2 + })"; + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + subManagerPtr->subBookChanges(session1); + subManagerPtr->subBookChanges(session2); + subManagerPtr->subManifest(session1); + subManagerPtr->subManifest(session2); + subManagerPtr->subProposedTransactions(session1); + subManagerPtr->subProposedTransactions(session2); + subManagerPtr->subTransactions(session1); + subManagerPtr->subTransactions(session2); + subManagerPtr->subValidation(session1); + subManagerPtr->subValidation(session2); + auto account = GetAccountIDWithString(ACCOUNT1); + subManagerPtr->subAccount(account, session1); + subManagerPtr->subAccount(account, session2); + subManagerPtr->subProposedAccount(account, session1); + subManagerPtr->subProposedAccount(account, session2); + auto issue1 = GetIssue(CURRENCY, ISSUER); + ripple::Book book{ripple::xrpIssue(), issue1}; + subManagerPtr->subBook(book, session1); + subManagerPtr->subBook(book, session2); + std::this_thread::sleep_for(20ms); + EXPECT_EQ(subManagerPtr->report(), json::parse(ReportReturn)); + subManagerPtr->unsubBookChanges(session1); + subManagerPtr->unsubManifest(session1); + subManagerPtr->unsubProposedTransactions(session1); + subManagerPtr->unsubTransactions(session1); + subManagerPtr->unsubValidation(session1); + subManagerPtr->unsubAccount(account, session1); + subManagerPtr->unsubProposedAccount(account, session1); + subManagerPtr->unsubBook(book, session1); + std::this_thread::sleep_for(20ms); + auto checkResult = [](json::object reportReturn, int result) { + EXPECT_EQ(reportReturn["book_changes"], result); + EXPECT_EQ(reportReturn["validations"], result); + EXPECT_EQ(reportReturn["transactions_proposed"], result); + EXPECT_EQ(reportReturn["transactions"], result); + EXPECT_EQ(reportReturn["manifests"], result); + EXPECT_EQ(reportReturn["accounts_proposed"], result); + EXPECT_EQ(reportReturn["account"], result); + EXPECT_EQ(reportReturn["books"], result); + }; + checkResult(subManagerPtr->report(), 1); + subManagerPtr->cleanup(session2); + subManagerPtr->cleanup(session2); // clean a removed session + std::this_thread::sleep_for(20ms); + checkResult(subManagerPtr->report(), 0); +} + +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerLedgerUnSub) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + boost::asio::io_context ctx; + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + // mock fetchLedgerBySequence return this ledger + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // mock doFetchLedgerObject return fee setting ledger object + auto feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + subManagerPtr->subLedger(yield, session); + }); + ctx.run(); + std::this_thread::sleep_for(20ms); + auto report = subManagerPtr->report(); + EXPECT_EQ(report["ledger"], 1); + subManagerPtr->cleanup(session); + subManagerPtr->unsubLedger(session); + std::this_thread::sleep_for(20ms); + report = subManagerPtr->report(); + EXPECT_EQ(report["ledger"], 0); +} + +/* + * test Manifest + * Subscription Manager forward the manifest message to subscribers + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerManifestTest) +{ + subManagerPtr->subManifest(session); + constexpr static auto dummyManifest = R"({"manifest":"test"})"; + subManagerPtr->forwardManifest(json::parse(dummyManifest).get_object()); + CheckSubscriberMessage(dummyManifest, session); +} + +/* + * test Validation + * Subscription Manager forward the validation message to subscribers + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerValidation) +{ + subManagerPtr->subValidation(session); + constexpr static auto dummyValidation = R"({"validation":"test"})"; + subManagerPtr->forwardValidation(json::parse(dummyValidation).get_object()); + CheckSubscriberMessage(dummyValidation, session); +} + +/* + * test ProposedTransaction + * We don't need the valid transaction in this test, subscription manager just + * forward the message to subscriber + */ +TEST_F( + SubscriptionManagerSimpleBackendTest, + SubscriptionManagerProposedTransaction) +{ + subManagerPtr->subProposedTransactions(session); + constexpr static auto dummyTransaction = R"({ + "transaction": + { + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" + } + })"; + subManagerPtr->forwardProposedTransaction( + json::parse(dummyTransaction).get_object()); + CheckSubscriberMessage(dummyTransaction, session); +} + +/* + * test ProposedTransaction for one account + * we need to construct a valid account in the transaction + * this test subscribe the proposed transaction for two accounts + * but only forward a transaction with one of them + * check the correct session is called + */ +TEST_F( + SubscriptionManagerSimpleBackendTest, + SubscriptionManagerAccountProposedTransaction) +{ + auto account = GetAccountIDWithString(ACCOUNT1); + subManagerPtr->subProposedAccount(account, session); + + std::shared_ptr sessionIdle = + std::make_shared(tagDecoratorFactory); + auto accountIdle = GetAccountIDWithString(ACCOUNT2); + subManagerPtr->subProposedAccount(accountIdle, sessionIdle); + + constexpr static auto dummyTransaction = R"({ + "transaction": + { + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" + } + })"; + subManagerPtr->forwardProposedTransaction( + json::parse(dummyTransaction).get_object()); + CheckSubscriberMessage(dummyTransaction, session); + auto rawIdle = (MockSession*)(sessionIdle.get()); + EXPECT_EQ("", rawIdle->message); +} + +/* + * test ledger stream + * check 1 subscribe response, 2 publish message + * mock backend to return fee ledger object + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerLedger) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + boost::asio::io_context ctx; + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + // mock fetchLedgerBySequence return this ledger + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // mock doFetchLedgerObject return fee setting ledger object + auto feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + // check the function response + // Information about the ledgers on hand and current fee schedule. This + // includes the same fields as a ledger stream message, except that it omits + // the type and txn_count fields + constexpr static auto LedgerResponse = R"({ + "validated_ledgers":"10-30", + "ledger_index":30, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_time":0, + "fee_ref":4, + "fee_base":1, + "reserve_base":3, + "reserve_inc":2 + })"; + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto res = subManagerPtr->subLedger(yield, session); + // check the response + EXPECT_EQ(res, json::parse(LedgerResponse)); + }); + ctx.run(); + // test publish + auto ledgerinfo2 = CreateLedgerInfo(LEDGERHASH, 31); + auto fee2 = ripple::Fees(); + fee2.reserve = 10; + subManagerPtr->pubLedger(ledgerinfo2, fee2, "10-31", 8); + constexpr static auto LedgerPub = R"({ + "type":"ledgerClosed", + "ledger_index":31, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_time":0, + "fee_ref":0, + "fee_base":0, + "reserve_base":10, + "reserve_inc":0, + "validated_ledgers":"10-31", + "txn_count":8 + })"; + CheckSubscriberMessage(LedgerPub, session); +} + +/* + * test book change + * create a book change meta data for + * XRP vs A token + * the transaction is just placeholder + * Book change computing only needs meta data + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerBookChange) +{ + subManagerPtr->subBookChanges(session); + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 32); + auto transactions = std::vector{}; + auto trans1 = TransactionAndMetadata(); + ripple::STObject obj = + CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + ripple::STObject metaObj = + CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 1, 3, 3, 1); + trans1.metadata = metaObj.getSerializer().peekData(); + transactions.push_back(trans1); + subManagerPtr->pubBookChanges(ledgerinfo, transactions); + constexpr static auto BookChangePublish = R"({ + "type":"bookChanges", + "ledger_index":32, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_time":0, + "changes":[ + { + "currency_a":"XRP_drops", + "currency_b":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD/0158415500000000C1F76FF6ECB0BAC600000000", + "volume_a":"2", + "volume_b":"2", + "high":"-1", + "low":"-1", + "open":"-1", + "close":"-1" + } + ] + })"; + CheckSubscriberMessage(BookChangePublish, session, 20); +} + +/* + * test transaction stream + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransaction) +{ + subManagerPtr->subTransactions(session); + + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + + auto trans1 = TransactionAndMetadata(); + ripple::STObject obj = + CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + // create an empty meta object + ripple::STArray metaArray{0}; + ripple::STObject metaObj(ripple::sfTransactionMetaData); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, 22); + trans1.metadata = metaObj.getSerializer().peekData(); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + constexpr static auto TransactionPublish = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"1", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TransactionType":"Payment", + "hash":"51D2AAA6B8E4E16EF22F6424854283D8391B56875858A711B8CE4D5B9A422CC2", + "date":0 + }, + "meta":{ + "AffectedNodes":[], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS", + "delivered_amount":"unavailable" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + CheckSubscriberMessage(TransactionPublish, session); +} + +/* + * test transaction for offer creation + * check owner_funds + * mock backend return a trustline + */ +TEST_F( + SubscriptionManagerSimpleBackendTest, + SubscriptionManagerTransactionOfferCreation) +{ + subManagerPtr->subTransactions(session); + + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + auto trans1 = TransactionAndMetadata(); + ripple::STObject obj = CreateCreateOfferTransactionObject( + ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + ripple::STArray metaArray{0}; + ripple::STObject metaObj(ripple::sfTransactionMetaData); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, 22); + trans1.metadata = metaObj.getSerializer().peekData(); + + ripple::STObject line(ripple::sfIndexes); + line.setFieldU16(ripple::sfLedgerEntryType, ripple::ltRIPPLE_STATE); + line.setFieldAmount(ripple::sfLowLimit, ripple::STAmount(10, false)); + line.setFieldAmount(ripple::sfHighLimit, ripple::STAmount(100, false)); + line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{TXNID}); + line.setFieldU32(ripple::sfPreviousTxnLgrSeq, 3); + line.setFieldU32(ripple::sfFlags, 0); + auto issue2 = GetIssue(CURRENCY, ISSUER); + line.setFieldAmount(ripple::sfBalance, ripple::STAmount(issue2, 100)); + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + ON_CALL(*rawBackendPtr, doFetchLedgerObject) + .WillByDefault(Return(line.getSerializer().peekData())); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + constexpr static auto TransactionForOwnerFund = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TakerGets":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"1" + }, + "TakerPays":"3", + "TransactionType":"OfferCreate", + "hash":"EE8775B43A67F4803DECEC5E918E0EA9C56D8ED93E512EBE9F2891846509AAAB", + "date":0, + "owner_funds":"100" + }, + "meta":{ + "AffectedNodes":[], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + CheckSubscriberMessage(TransactionForOwnerFund, session); +} + +constexpr static auto TransactionForOwnerFundFrozen = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TakerGets":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"1" + }, + "TakerPays":"3", + "TransactionType":"OfferCreate", + "hash":"EE8775B43A67F4803DECEC5E918E0EA9C56D8ED93E512EBE9F2891846509AAAB", + "date":0, + "owner_funds":"0" + }, + "meta":{ + "AffectedNodes":[], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." +})"; + +/* + * test transaction for offer creation + * check owner_funds when line is frozen + * mock backend return a trustline + */ +TEST_F( + SubscriptionManagerSimpleBackendTest, + SubscriptionManagerTransactionOfferCreationFrozenLine) +{ + subManagerPtr->subTransactions(session); + + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + auto trans1 = TransactionAndMetadata(); + ripple::STObject obj = CreateCreateOfferTransactionObject( + ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + ripple::STArray metaArray{0}; + ripple::STObject metaObj(ripple::sfTransactionMetaData); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, 22); + trans1.metadata = metaObj.getSerializer().peekData(); + + ripple::STObject line(ripple::sfIndexes); + line.setFieldU16(ripple::sfLedgerEntryType, ripple::ltRIPPLE_STATE); + line.setFieldAmount(ripple::sfLowLimit, ripple::STAmount(10, false)); + line.setFieldAmount(ripple::sfHighLimit, ripple::STAmount(100, false)); + line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{TXNID}); + line.setFieldU32(ripple::sfPreviousTxnLgrSeq, 3); + line.setFieldU32(ripple::sfFlags, ripple::lsfHighFreeze); + line.setFieldAmount( + ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + ON_CALL(*rawBackendPtr, doFetchLedgerObject) + .WillByDefault(Return(line.getSerializer().peekData())); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + CheckSubscriberMessage(TransactionForOwnerFundFrozen, session); +} + +/* + * test transaction for offer creation + * check owner_funds when issue global frozen + * mock backend return a frozen account setting + */ +TEST_F( + SubscriptionManagerSimpleBackendTest, + SubscriptionManagerTransactionOfferCreationGlobalFrozen) +{ + subManagerPtr->subTransactions(session); + + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + auto trans1 = TransactionAndMetadata(); + ripple::STObject obj = CreateCreateOfferTransactionObject( + ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + ripple::STArray metaArray{0}; + ripple::STObject metaObj(ripple::sfTransactionMetaData); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, 22); + trans1.metadata = metaObj.getSerializer().peekData(); + + ripple::STObject line(ripple::sfIndexes); + line.setFieldU16(ripple::sfLedgerEntryType, ripple::ltRIPPLE_STATE); + line.setFieldAmount(ripple::sfLowLimit, ripple::STAmount(10, false)); + line.setFieldAmount(ripple::sfHighLimit, ripple::STAmount(100, false)); + line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{TXNID}); + line.setFieldU32(ripple::sfPreviousTxnLgrSeq, 3); + line.setFieldU32(ripple::sfFlags, ripple::lsfHighFreeze); + auto issueAccount = GetAccountIDWithString(ISSUER); + line.setFieldAmount( + ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + auto kk = ripple::keylet::account(issueAccount).key; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(testing::_, testing::_, testing::_)) + .WillByDefault(Return(line.getSerializer().peekData())); + ripple::STObject accountRoot = CreateAccountRootObject( + ISSUER, ripple::lsfGlobalFreeze, 1, 10, 2, TXNID, 3); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(kk, testing::_, testing::_)) + .WillByDefault(Return(accountRoot.getSerializer().peekData())); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + CheckSubscriberMessage(TransactionForOwnerFundFrozen, session); +} + +/* + * test subscribe account + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerAccount) +{ + auto account = GetAccountIDWithString(ACCOUNT1); + subManagerPtr->subAccount(account, session); + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + + ripple::STObject obj = + CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + auto trans1 = TransactionAndMetadata(); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + ripple::STArray metaArray{1}; + ripple::STObject node(ripple::sfModifiedNode); + // emplace account into meta, trigger publish + ripple::STObject finalFields(ripple::sfFinalFields); + finalFields.setAccountID(ripple::sfAccount, account); + node.emplace_back(finalFields); + node.setFieldU16(ripple::sfLedgerEntryType, ripple::ltACCOUNT_ROOT); + metaArray.push_back(node); + ripple::STObject metaObj(ripple::sfTransactionMetaData); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, 22); + trans1.metadata = metaObj.getSerializer().peekData(); + + subManagerPtr->pubTransaction(trans1, ledgerinfo); + constexpr static auto AccountPublish = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"1", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TransactionType":"Payment", + "hash":"51D2AAA6B8E4E16EF22F6424854283D8391B56875858A711B8CE4D5B9A422CC2", + "date":0 + }, + "meta":{ + "AffectedNodes":[ + { + "ModifiedNode":{ + "FinalFields":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" + }, + "LedgerEntryType":"AccountRoot" + } + } + ], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS", + "delivered_amount":"unavailable" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + CheckSubscriberMessage(AccountPublish, session); +} + +/* + * test subscribe order book + * Create/Delete/Update offer node will trigger publish + */ +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerOrderBook) +{ + auto issue1 = GetIssue(CURRENCY, ISSUER); + ripple::Book book{ripple::xrpIssue(), issue1}; + subManagerPtr->subBook(book, session); + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); + + auto trans1 = TransactionAndMetadata(); + auto obj = CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + trans1.transaction = obj.getSerializer().peekData(); + trans1.ledgerSequence = 32; + + auto metaObj = + CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 3, 1, 1, 3); + trans1.metadata = metaObj.getSerializer().peekData(); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + + constexpr static auto OrderbookPublish = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"1", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TransactionType":"Payment", + "hash":"51D2AAA6B8E4E16EF22F6424854283D8391B56875858A711B8CE4D5B9A422CC2", + "date":0 + }, + "meta":{ + "AffectedNodes":[ + { + "ModifiedNode":{ + "FinalFields":{ + "TakerGets":"3", + "TakerPays":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"1" + } + }, + "LedgerEntryType":"Offer", + "PreviousFields":{ + "TakerGets":"1", + "TakerPays":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"3" + } + } + } + } + ], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS", + "delivered_amount":"unavailable" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + CheckSubscriberMessage(OrderbookPublish, session); + + // trigger by offer cancel meta data + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + subManagerPtr->subBook(book, session1); + metaObj = CreateMetaDataForCancelOffer(CURRENCY, ISSUER, 22, 3, 1); + trans1.metadata = metaObj.getSerializer().peekData(); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + constexpr static auto OrderbookCancelPublish = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"1", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TransactionType":"Payment", + "hash":"51D2AAA6B8E4E16EF22F6424854283D8391B56875858A711B8CE4D5B9A422CC2", + "date":0 + }, + "meta":{ + "AffectedNodes":[ + { + "DeletedNode":{ + "FinalFields":{ + "TakerGets":"3", + "TakerPays":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"1" + } + }, + "LedgerEntryType":"Offer" + } + } + ], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS", + "delivered_amount":"unavailable" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + CheckSubscriberMessage(OrderbookCancelPublish, session1); + // trigger by offer create meta data + constexpr static auto OrderbookCreatePublish = R"({ + "transaction":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"1", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Fee":"1", + "Sequence":32, + "SigningPubKey":"74657374", + "TransactionType":"Payment", + "hash":"51D2AAA6B8E4E16EF22F6424854283D8391B56875858A711B8CE4D5B9A422CC2", + "date":0 + }, + "meta":{ + "AffectedNodes":[ + { + "CreatedNode":{ + "NewFields":{ + "TakerGets":"3", + "TakerPays":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD", + "value":"1" + } + }, + "LedgerEntryType":"Offer" + } + } + ], + "TransactionIndex":22, + "TransactionResult":"tesSUCCESS", + "delivered_amount":"unavailable" + }, + "type":"transaction", + "validated":true, + "status":"closed", + "ledger_index":33, + "ledger_hash":"1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", + "engine_result_code":0, + "engine_result":"tesSUCCESS", + "engine_result_message":"The transaction was applied. Only final in a validated ledger." + })"; + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + subManagerPtr->subBook(book, session2); + metaObj = CreateMetaDataForCreateOffer(CURRENCY, ISSUER, 22, 3, 1); + trans1.metadata = metaObj.getSerializer().peekData(); + subManagerPtr->pubTransaction(trans1, ledgerinfo); + CheckSubscriberMessage(OrderbookCreatePublish, session2); +} diff --git a/unittests/SubscriptionTest.cpp b/unittests/SubscriptionTest.cpp new file mode 100644 index 00000000..5f3f3a1f --- /dev/null +++ b/unittests/SubscriptionTest.cpp @@ -0,0 +1,218 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include + +namespace json = boost::json; + +TEST(MessageTest, Message) +{ + auto m = Message{"test"}; + EXPECT_STREQ(m.data(), "test"); + EXPECT_EQ(m.size(), 4); +} + +// io_context +class SubscriptionTest : public SyncAsioContextTest +{ +protected: + clio::Config cfg; + util::TagDecoratorFactory tagDecoratorFactory{cfg}; +}; + +class SubscriptionMapTest : public SubscriptionTest +{ +}; + +// subscribe/unsubscribe the same session would not change the count +TEST_F(SubscriptionTest, SubscriptionCount) +{ + Subscription sub(ctx); + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + sub.subscribe(session1); + sub.subscribe(session2); + ctx.run(); + EXPECT_EQ(sub.count(), 2); + sub.subscribe(session1); + ctx.restart(); + ctx.run(); + EXPECT_EQ(sub.count(), 2); + EXPECT_FALSE(sub.empty()); + sub.unsubscribe(session1); + ctx.restart(); + ctx.run(); + EXPECT_EQ(sub.count(), 1); + sub.unsubscribe(session1); + ctx.restart(); + ctx.run(); + EXPECT_EQ(sub.count(), 1); + sub.unsubscribe(session2); + ctx.restart(); + ctx.run(); + EXPECT_EQ(sub.count(), 0); + EXPECT_TRUE(sub.empty()); +} + +// send interface will be called when publish called +TEST_F(SubscriptionTest, SubscriptionPublish) +{ + Subscription sub(ctx); + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + sub.subscribe(session1); + sub.subscribe(session2); + ctx.run(); + EXPECT_EQ(sub.count(), 2); + sub.publish(std::make_shared("message")); + ctx.restart(); + ctx.run(); + MockSession* p1 = (MockSession*)(session1.get()); + EXPECT_EQ(p1->message, "message"); + MockSession* p2 = (MockSession*)(session2.get()); + EXPECT_EQ(p2->message, "message"); + sub.unsubscribe(session1); + ctx.restart(); + ctx.run(); + sub.publish(std::make_shared("message2")); + ctx.restart(); + ctx.run(); + EXPECT_EQ(p1->message, "message"); + EXPECT_EQ(p2->message, "messagemessage2"); +} + +// when error happen during send(), the subsciber will be removed after +TEST_F(SubscriptionTest, SubscriptionDeadRemoveSubscriber) +{ + Subscription sub(ctx); + std::shared_ptr session1(new MockDeadSession(tagDecoratorFactory)); + sub.subscribe(session1); + ctx.run(); + EXPECT_EQ(sub.count(), 1); + // trigger dead + sub.publish(std::make_shared("message")); + ctx.restart(); + ctx.run(); + EXPECT_EQ(session1->dead(), true); + sub.publish(std::make_shared("message")); + ctx.restart(); + ctx.run(); + EXPECT_EQ(sub.count(), 0); +} + +TEST_F(SubscriptionMapTest, SubscriptionMapCount) +{ + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session3 = + std::make_shared(tagDecoratorFactory); + SubscriptionMap subMap(ctx); + subMap.subscribe(session1, "topic1"); + subMap.subscribe(session2, "topic1"); + subMap.subscribe(session3, "topic2"); + ctx.run(); + EXPECT_EQ(subMap.count(), 3); + subMap.subscribe(session1, "topic1"); + subMap.subscribe(session2, "topic1"); + ctx.restart(); + ctx.run(); + EXPECT_EQ(subMap.count(), 3); + subMap.unsubscribe(session1, "topic1"); + ctx.restart(); + ctx.run(); + subMap.unsubscribe(session1, "topic1"); + subMap.unsubscribe(session2, "topic1"); + subMap.unsubscribe(session3, "topic2"); + ctx.restart(); + ctx.run(); + EXPECT_EQ(subMap.count(), 0); + subMap.unsubscribe(session3, "topic2"); + subMap.unsubscribe(session3, "no exist"); + ctx.restart(); + ctx.run(); + EXPECT_EQ(subMap.count(), 0); +} + +TEST_F(SubscriptionMapTest, SubscriptionMapPublish) +{ + std::shared_ptr session1 = + std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + SubscriptionMap subMap(ctx); + const std::string topic1 = "topic1"; + const std::string topic2 = "topic2"; + const std::string topic1Message = "topic1Message"; + const std::string topic2Message = "topic2Message"; + subMap.subscribe(session1, topic1); + subMap.subscribe(session2, topic2); + ctx.run(); + EXPECT_EQ(subMap.count(), 2); + auto message1 = std::make_shared(topic1Message.data()); + subMap.publish(message1, topic1); // lvalue + subMap.publish( + std::make_shared(topic2Message.data()), topic2); // rvalue + ctx.restart(); + ctx.run(); + MockSession* p1 = (MockSession*)(session1.get()); + EXPECT_EQ(p1->message, topic1Message); + MockSession* p2 = (MockSession*)(session2.get()); + EXPECT_EQ(p2->message, topic2Message); +} + +TEST_F(SubscriptionMapTest, SubscriptionMapDeadRemoveSubscriber) +{ + std::shared_ptr session1(new MockDeadSession(tagDecoratorFactory)); + std::shared_ptr session2 = + std::make_shared(tagDecoratorFactory); + SubscriptionMap subMap(ctx); + const std::string topic1 = "topic1"; + const std::string topic2 = "topic2"; + const std::string topic1Message = "topic1Message"; + const std::string topic2Message = "topic2Message"; + subMap.subscribe(session1, topic1); + subMap.subscribe(session2, topic2); + ctx.run(); + EXPECT_EQ(subMap.count(), 2); + auto message1 = std::make_shared(topic1Message.data()); + subMap.publish(message1, topic1); // lvalue + subMap.publish( + std::make_shared(topic2Message.data()), topic2); // rvalue + ctx.restart(); + ctx.run(); + MockDeadSession* p1 = (MockDeadSession*)(session1.get()); + EXPECT_EQ(p1->dead(), true); + MockSession* p2 = (MockSession*)(session2.get()); + EXPECT_EQ(p2->message, topic2Message); + subMap.publish(message1, topic1); + ctx.restart(); + ctx.run(); + EXPECT_EQ(subMap.count(), 1); +} diff --git a/unittests/rpc/BaseTests.cpp b/unittests/rpc/BaseTests.cpp new file mode 100644 index 00000000..c5d320d7 --- /dev/null +++ b/unittests/rpc/BaseTests.cpp @@ -0,0 +1,415 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +using namespace clio; +using namespace std; + +using namespace RPCng; +using namespace RPCng::validation; + +namespace json = boost::json; + +class RPCBaseTest : public NoLoggerFixture +{ +}; + +TEST_F(RPCBaseTest, CheckType) +{ + auto const jstr = json::value("a string"); + ASSERT_TRUE(checkType(jstr)); + ASSERT_FALSE(checkType(jstr)); + + auto const juint = json::value(123u); + ASSERT_TRUE(checkType(juint)); + ASSERT_TRUE(checkType(juint)); + ASSERT_FALSE(checkType(juint)); + + auto const jint = json::value(123); + ASSERT_TRUE(checkType(jint)); + ASSERT_TRUE(checkType(jint)); + ASSERT_FALSE(checkType(jint)); + + auto const jbool = json::value(true); + ASSERT_TRUE(checkType(jbool)); + ASSERT_FALSE(checkType(jbool)); + + auto const jdouble = json::value(0.123); + ASSERT_TRUE(checkType(jdouble)); + ASSERT_TRUE(checkType(jdouble)); + ASSERT_FALSE(checkType(jdouble)); + + auto const jarr = json::value({1, 2, 3}); + ASSERT_TRUE(checkType(jarr)); + ASSERT_FALSE(checkType(jarr)); +} + +TEST_F(RPCBaseTest, TypeValidator) +{ + auto spec = RpcSpec{ + {"uint", Type{}}, + {"int", Type{}}, + {"str", Type{}}, + {"double", Type{}}, + {"bool", Type{}}, + {"arr", Type{}}, + }; + + auto passingInput = json::parse(R"({ + "uint": 123, + "int": 321, + "str": "a string", + "double": 1.0, + "bool": true, + "arr": [] + })"); + ASSERT_TRUE(spec.validate(passingInput)); + + { + auto failingInput = json::parse(R"({ "uint": "a string" })"); + ASSERT_FALSE(spec.validate(failingInput)); + } + { + auto failingInput = json::parse(R"({ "int": "a string" })"); + ASSERT_FALSE(spec.validate(failingInput)); + } + { + auto failingInput = json::parse(R"({ "str": 1234 })"); + ASSERT_FALSE(spec.validate(failingInput)); + } + { + auto failingInput = json::parse(R"({ "double": "a string" })"); + ASSERT_FALSE(spec.validate(failingInput)); + } + { + auto failingInput = json::parse(R"({ "bool": "a string" })"); + ASSERT_FALSE(spec.validate(failingInput)); + } + { + auto failingInput = json::parse(R"({ "arr": "a string" })"); + ASSERT_FALSE(spec.validate(failingInput)); + } +} + +TEST_F(RPCBaseTest, TypeValidatorMultipleTypes) +{ + auto spec = RpcSpec{ + // either int or string + {"test", Type{}}, + }; + + auto passingInput = json::parse(R"({ "test": "1234" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto passingInput2 = json::parse(R"({ "test": 1234 })"); + ASSERT_TRUE(spec.validate(passingInput2)); + + auto failingInput = json::parse(R"({ "test": true })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, RequiredValidator) +{ + auto spec = RpcSpec{ + {"required", Required{}}, + }; + + auto passingInput = json::parse(R"({ "required": "present" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto passingInput2 = json::parse(R"({ "required": true })"); + ASSERT_TRUE(spec.validate(passingInput2)); + + auto failingInput = json::parse(R"({})"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, BetweenValidator) +{ + auto spec = RpcSpec{ + {"amount", Between{10u, 20u}}, + }; + + auto passingInput = json::parse(R"({ "amount": 15 })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto passingInput2 = json::parse(R"({ "amount": 10 })"); + ASSERT_TRUE(spec.validate(passingInput2)); + + auto passingInput3 = json::parse(R"({ "amount": 20 })"); + ASSERT_TRUE(spec.validate(passingInput3)); + + auto failingInput = json::parse(R"({ "amount": 9 })"); + ASSERT_FALSE(spec.validate(failingInput)); + + auto failingInput2 = json::parse(R"({ "amount": 21 })"); + ASSERT_FALSE(spec.validate(failingInput2)); +} + +TEST_F(RPCBaseTest, OneOfValidator) +{ + auto spec = RpcSpec{ + {"currency", OneOf{"XRP", "USD"}}, + }; + + auto passingInput = json::parse(R"({ "currency": "XRP" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto passingInput2 = json::parse(R"({ "currency": "USD" })"); + ASSERT_TRUE(spec.validate(passingInput2)); + + auto failingInput = json::parse(R"({ "currency": "PRX" })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, EqualToValidator) +{ + auto spec = RpcSpec{ + {"exact", EqualTo{"CaseSensitive"}}, + }; + + auto passingInput = json::parse(R"({ "exact": "CaseSensitive" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "exact": "Different" })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, ArrayAtValidator) +{ + // clang-format off + auto spec = RpcSpec{ + {"arr", Required{}, Type{}, ValidateArrayAt{0, { + {"limit", Required{}, Type{}, Between{0, 100}}, + }}}, + }; + // clang-format on + + auto passingInput = json::parse(R"({ "arr": [{"limit": 42}] })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "arr": [{"limit": "not int"}] })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, IfTypeValidator) +{ + // clang-format off + auto spec = RpcSpec{ + {"mix", Required{}, + Type{}, + IfType{ + Section{{ "limit", Required{}, Type{}, Between{0, 100}}}, + Section{{ "limit2", Required{}, Type{}, Between{0, 100}}} + }, + IfType{Uint256HexStringValidator,} + }}; + // clang-format on + // if json object pass + auto passingInput = + json::parse(R"({ "mix": {"limit": 42, "limit2": 22} })"); + ASSERT_TRUE(spec.validate(passingInput)); + // if string pass + passingInput = json::parse( + R"({ "mix": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + // if json object fail at first requirement + auto failingInput = json::parse(R"({ "mix": {"limit": "not int"} })"); + ASSERT_FALSE(spec.validate(failingInput)); + // if json object fail at second requirement + failingInput = json::parse(R"({ "mix": {"limit": 22, "limit2": "y"} })"); + ASSERT_FALSE(spec.validate(failingInput)); + + // if string fail + failingInput = json::parse(R"({ "mix": "not hash" })"); + ASSERT_FALSE(spec.validate(failingInput)); + + // type check fail + failingInput = json::parse(R"({ "mix": 1213 })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, WithCustomError) +{ + auto const spec = RpcSpec{ + {"transaction", + WithCustomError{ + Uint256HexStringValidator, + RPC::Status{ripple::rpcBAD_FEATURE, "MyCustomError"}}}, + {"other", + WithCustomError{ + Type{}, + RPC::Status{ripple::rpcALREADY_MULTISIG, "MyCustomError2"}}}}; + + auto const passingInput = json::parse( + R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", "other": "1"})"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse( + R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515B"})"); + auto err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "MyCustomError"); + ASSERT_EQ(err.error(), ripple::rpcBAD_FEATURE); + + failingInput = json::parse(R"({ "other": 1})"); + err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "MyCustomError2"); + ASSERT_EQ(err.error(), ripple::rpcALREADY_MULTISIG); +} + +TEST_F(RPCBaseTest, CustomValidator) +{ + // clang-format off + auto customFormatCheck = CustomValidator{ + [](json::value const& value, std::string_view key) -> MaybeError { + return value.as_string().size() == 34 ? + MaybeError{} : Error{RPC::Status{"Uh oh"}}; + } + }; + // clang-format on + + auto spec = RpcSpec{ + {"taker", customFormatCheck}, + }; + + auto passingInput = + json::parse(R"({ "taker": "r9cZA1mLK5R5Am25ArfXFmqgNwjZgnfk59" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "taker": "wrongformat" })"); + ASSERT_FALSE(spec.validate(failingInput)); +} + +TEST_F(RPCBaseTest, LedgerIndexValidator) +{ + auto spec = RpcSpec{ + {"ledgerIndex", LedgerIndexValidator}, + }; + auto passingInput = json::parse(R"({ "ledgerIndex": "validated" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + passingInput = json::parse(R"({ "ledgerIndex": "256" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + passingInput = json::parse(R"({ "ledgerIndex": 256 })"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "ledgerIndex": "wrongformat" })"); + auto err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "ledgerIndexMalformed"); +} + +TEST_F(RPCBaseTest, AccountValidator) +{ + auto spec = RpcSpec{ + {"account", AccountValidator}, + }; + auto failingInput = json::parse(R"({ "account": 256 })"); + ASSERT_FALSE(spec.validate(failingInput)); + + failingInput = + json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jp" })"); + ASSERT_FALSE(spec.validate(failingInput)); + + failingInput = json::parse( + R"({ "account": "02000000000000000000000000000000000000000000000000000000000000000" })"); + ASSERT_FALSE(spec.validate(failingInput)); + + auto passingInput = + json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" })"); + ASSERT_TRUE(spec.validate(passingInput)); + + passingInput = json::parse( + R"({ "account": "020000000000000000000000000000000000000000000000000000000000000000" })"); + ASSERT_TRUE(spec.validate(passingInput)); +} + +TEST_F(RPCBaseTest, MarkerValidator) +{ + auto spec = RpcSpec{ + {"marker", MarkerValidator}, + }; + auto failingInput = json::parse(R"({ "marker": 256 })"); + ASSERT_FALSE(spec.validate(failingInput)); + + failingInput = json::parse(R"({ "marker": "testtest" })"); + ASSERT_FALSE(spec.validate(failingInput)); + + failingInput = json::parse(R"({ "marker": "ABAB1234:1H" })"); + ASSERT_FALSE(spec.validate(failingInput)); + + auto passingInput = json::parse(R"({ "account": "ABAB1234:123" })"); + ASSERT_TRUE(spec.validate(passingInput)); +} + +TEST_F(RPCBaseTest, Uint256HexStringValidator) +{ + auto const spec = RpcSpec{{"transaction", Uint256HexStringValidator}}; + auto const passingInput = json::parse( + R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"})"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "transaction": 256})"); + auto err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "transactionNotString"); + + failingInput = json::parse( + R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC"})"); + err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "transactionMalformed"); +} + +TEST_F(RPCBaseTest, CurrencyValidator) +{ + auto const spec = RpcSpec{{"currency", CurrencyValidator}}; + auto passingInput = json::parse(R"({ "currency": "GBP"})"); + ASSERT_TRUE(spec.validate(passingInput)); + + passingInput = json::parse( + R"({ "currency": "0158415500000000C1F76FF6ECB0BAC600000000"})"); + ASSERT_TRUE(spec.validate(passingInput)); + + auto failingInput = json::parse(R"({ "currency": 256})"); + auto err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "currencyNotString"); + + failingInput = json::parse(R"({ "currency": "12314"})"); + err = spec.validate(failingInput); + ASSERT_FALSE(err); + ASSERT_EQ(err.error().message, "malformedCurrency"); +} diff --git a/unittests/rpc/ErrorTests.cpp b/unittests/rpc/ErrorTests.cpp new file mode 100644 index 00000000..d7b7ba81 --- /dev/null +++ b/unittests/rpc/ErrorTests.cpp @@ -0,0 +1,152 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +using namespace RPC; +using namespace std; + +namespace { +void +check( + boost::json::object const& j, + std::string_view error, + uint32_t errorCode, + std::string_view errorMessage) +{ + EXPECT_TRUE(j.contains("error")); + EXPECT_TRUE(j.contains("error_code")); + EXPECT_TRUE(j.contains("error_message")); + EXPECT_TRUE(j.contains("status")); + EXPECT_TRUE(j.contains("type")); + + EXPECT_TRUE(j.at("error").is_string()); + EXPECT_TRUE(j.at("error_code").is_uint64()); + EXPECT_TRUE(j.at("error_message").is_string()); + EXPECT_TRUE(j.at("status").is_string()); + EXPECT_TRUE(j.at("type").is_string()); + + EXPECT_STREQ(j.at("status").as_string().c_str(), "error"); + EXPECT_STREQ(j.at("type").as_string().c_str(), "response"); + + EXPECT_STREQ(j.at("error").as_string().c_str(), error.data()); + EXPECT_EQ(j.at("error_code").as_uint64(), errorCode); + EXPECT_STREQ( + j.at("error_message").as_string().c_str(), errorMessage.data()); +} +} // namespace + +TEST(RPCErrorsTest, StatusAsBool) +{ + // Only rpcSUCCESS status should return false + EXPECT_FALSE(Status{RippledError::rpcSUCCESS}); + + // true should be returned for any error state, we just test a few + CombinedError const errors[]{ + RippledError::rpcINVALID_PARAMS, + RippledError::rpcUNKNOWN_COMMAND, + RippledError::rpcTOO_BUSY, + RippledError::rpcNO_NETWORK, + RippledError::rpcACT_MALFORMED, + RippledError::rpcBAD_MARKET, + ClioError::rpcMALFORMED_CURRENCY, + }; + + for (auto const& ec : errors) + EXPECT_TRUE(Status{ec}); +} + +TEST(RPCErrorsTest, SuccessToJSON) +{ + auto const status = Status{RippledError::rpcSUCCESS}; + check(makeError(status), "unknown", 0, "An unknown error code."); +} + +TEST(RPCErrorsTest, RippledErrorToJSON) +{ + auto const status = Status{RippledError::rpcINVALID_PARAMS}; + check(makeError(status), "invalidParams", 31, "Invalid parameters."); +} + +TEST(RPCErrorsTest, RippledErrorFromStringToJSON) +{ + auto const j = makeError(Status{"veryCustomError"}); + EXPECT_STREQ(j.at("error").as_string().c_str(), "veryCustomError"); +} + +TEST(RPCErrorsTest, RippledErrorToJSONCustomMessage) +{ + auto const status = Status{RippledError::rpcINVALID_PARAMS, "custom"}; + check(makeError(status), "invalidParams", 31, "custom"); +} + +TEST(RPCErrorsTest, RippledErrorToJSONCustomStrCodeAndMessage) +{ + auto const status = + Status{RippledError::rpcINVALID_PARAMS, "customCode", "customMessage"}; + check(makeError(status), "customCode", 31, "customMessage"); +} + +TEST(RPCErrorsTest, ClioErrorToJSON) +{ + auto const status = Status{ClioError::rpcMALFORMED_CURRENCY}; + check(makeError(status), "malformedCurrency", 5000, "Malformed currency."); +} + +TEST(RPCErrorsTest, ClioErrorToJSONCustomMessage) +{ + auto const status = Status{ClioError::rpcMALFORMED_CURRENCY, "custom"}; + check(makeError(status), "malformedCurrency", 5000, "custom"); +} + +TEST(RPCErrorsTest, ClioErrorToJSONCustomStrCodeAndMessage) +{ + auto const status = + Status{ClioError::rpcMALFORMED_CURRENCY, "customCode", "customMessage"}; + check(makeError(status), "customCode", 5000, "customMessage"); +} + +TEST(RPCErrorsTest, InvalidClioErrorToJSON) +{ + EXPECT_ANY_THROW((void)makeError(static_cast(999999))); +} + +TEST(RPCErrorsTest, WarningToJSON) +{ + auto j = makeWarning(WarningCode::warnRPC_OUTDATED); + EXPECT_TRUE(j.contains("id")); + EXPECT_TRUE(j.contains("message")); + + EXPECT_TRUE(j.at("id").is_int64()); + EXPECT_TRUE(j.at("message").is_string()); + + EXPECT_EQ( + j.at("id").as_int64(), + static_cast(WarningCode::warnRPC_OUTDATED)); + EXPECT_STREQ( + j.at("message").as_string().c_str(), "This server may be out of date"); +} + +TEST(RPCErrorsTest, InvalidWarningToJSON) +{ + EXPECT_ANY_THROW((void)makeWarning(static_cast(999999))); +} diff --git a/unittests/rpc/RPCHelpersTest.cpp b/unittests/rpc/RPCHelpersTest.cpp new file mode 100644 index 00000000..f71e06e6 --- /dev/null +++ b/unittests/rpc/RPCHelpersTest.cpp @@ -0,0 +1,417 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include + +#include + +using namespace RPC; +using namespace testing; + +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto INDEX1 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; + +class RPCHelpersTest : public MockBackendTest, public SyncAsioContextTest +{ + void + SetUp() override + { + MockBackendTest::SetUp(); + SyncAsioContextTest::SetUp(); + } + void + TearDown() override + { + MockBackendTest::TearDown(); + SyncAsioContextTest::TearDown(); + } +}; + +TEST_F(RPCHelpersTest, TraverseOwnedNodesNotAccount) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + // fetch account object return emtpy + ON_CALL(*rawBackendPtr, doFetchLedgerObject) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto account = GetAccountIDWithString(ACCOUNT); + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, 10, "", yield, [](auto) { + + }); + auto status = std::get_if(&ret); + EXPECT_TRUE(status != nullptr); + EXPECT_EQ(*status, RippledError::rpcACT_NOT_FOUND); + }); + ctx.run(); +} + +TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidIndexNotHex) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto account = GetAccountIDWithString(ACCOUNT); + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, 10, "nothex,10", yield, [](auto) { + + }); + auto status = std::get_if(&ret); + EXPECT_TRUE(status != nullptr); + EXPECT_EQ(*status, ripple::rpcINVALID_PARAMS); + EXPECT_EQ(status->message, "Malformed cursor"); + }); + ctx.run(); +} + +TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidPageNotInt) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto account = GetAccountIDWithString(ACCOUNT); + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, 10, "nothex,abc", yield, [](auto) { + + }); + auto status = std::get_if(&ret); + EXPECT_TRUE(status != nullptr); + EXPECT_EQ(*status, ripple::rpcINVALID_PARAMS); + EXPECT_EQ(status->message, "Malformed cursor"); + }); + ctx.run(); +} + +// limit = 10, return 2 objects +TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarker) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + // return owner index + ripple::STObject ownerDir = CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + + // return two payment channel objects + std::vector bbs; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + bbs.push_back(channel1.getSerializer().peekData()); + bbs.push_back(channel1.getSerializer().peekData()); + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + boost::asio::spawn(ctx, [this, &account](boost::asio::yield_context yield) { + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, 10, {}, yield, [](auto) { + + }); + auto cursor = std::get_if(&ret); + EXPECT_TRUE(cursor != nullptr); + EXPECT_EQ( + cursor->toString(), + "0000000000000000000000000000000000000000000000000000000000000000," + "0"); + }); + ctx.run(); +} + +// limit = 10, return 10 objects and marker +TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnSamePageMarker) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + std::vector bbs; + + int objectsCount = 11; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + bbs.push_back(channel1.getSerializer().peekData()); + objectsCount--; + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, 99); + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + boost::asio::spawn(ctx, [this, &account](boost::asio::yield_context yield) { + auto count = 0; + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, 10, {}, yield, [&](auto) { count++; }); + auto cursor = std::get_if(&ret); + EXPECT_TRUE(cursor != nullptr); + EXPECT_EQ(count, 10); + EXPECT_EQ(cursor->toString(), fmt::format("{},0", INDEX1)); + }); + ctx.run(); +} + +// 10 objects per page, limit is 15, return the second page as marker +TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnOtherPageMarker) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto ownerDirKk = ripple::keylet::ownerDir(account).key; + constexpr static auto nextPage = 99; + constexpr static auto limit = 15; + auto ownerDir2Kk = + ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + + std::vector bbs; + + int objectsCount = 10; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + objectsCount--; + } + objectsCount = 15; + while (objectsCount != 0) + { + bbs.push_back(channel1.getSerializer().peekData()); + objectsCount--; + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, nextPage); + // first page 's next page is 99 + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + ripple::STObject ownerDir2 = CreateOwnerDirLedgerObject(indexes, INDEX1); + // second page's next page is 0 + ownerDir2.setFieldU64(ripple::sfIndexNext, 0); + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir2.getSerializer().peekData())); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto count = 0; + auto ret = traverseOwnedNodes( + *mockBackendPtr, account, 9, limit, {}, yield, [&](auto) { + count++; + }); + auto cursor = std::get_if(&ret); + EXPECT_TRUE(cursor != nullptr); + EXPECT_EQ(count, limit); + EXPECT_EQ(cursor->toString(), fmt::format("{},{}", INDEX1, nextPage)); + }); + ctx.run(); +} + +// Send a valid marker +TEST_F(RPCHelpersTest, TraverseOwnedNodesWithMarkerReturnSamePageMarker) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto ownerDir2Kk = + ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; + constexpr static auto limit = 8; + constexpr static auto pageNum = 99; + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + + std::vector bbs; + + int objectsCount = 10; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + objectsCount--; + } + objectsCount = 10; + while (objectsCount != 0) + { + bbs.push_back(channel1.getSerializer().peekData()); + objectsCount--; + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, 0); + // return ownerdir when search by marker + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto count = 0; + auto ret = traverseOwnedNodes( + *mockBackendPtr, + account, + 9, + limit, + fmt::format("{},{}", INDEX1, pageNum), + yield, + [&](auto) { count++; }); + auto cursor = std::get_if(&ret); + EXPECT_TRUE(cursor != nullptr); + EXPECT_EQ(count, limit); + EXPECT_EQ(cursor->toString(), fmt::format("{},{}", INDEX1, pageNum)); + }); + ctx.run(); +} + +// Send a valid marker, but marker contain an unexisting index +// return empty +TEST_F(RPCHelpersTest, TraverseOwnedNodesWithUnexistingIndexMarker) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto ownerDir2Kk = + ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; + constexpr static auto limit = 8; + constexpr static auto pageNum = 99; + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + int objectsCount = 10; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + objectsCount--; + } + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, 0); + // return ownerdir when search by marker + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto count = 0; + auto ret = traverseOwnedNodes( + *mockBackendPtr, + account, + 9, + limit, + fmt::format("{},{}", INDEX2, pageNum), + yield, + [&](auto) { count++; }); + auto cursor = std::get_if(&ret); + EXPECT_TRUE(cursor != nullptr); + EXPECT_EQ(count, 0); + EXPECT_EQ( + cursor->toString(), + "00000000000000000000000000000000000000000000000000000000000000" + "00,0"); + }); + ctx.run(); +} diff --git a/unittests/rpc/handlers/AccountChannelsTest.cpp b/unittests/rpc/handlers/AccountChannelsTest.cpp new file mode 100644 index 00000000..7b3b4689 --- /dev/null +++ b/unittests/rpc/handlers/AccountChannelsTest.cpp @@ -0,0 +1,881 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +using namespace RPCng; +namespace json = boost::json; +using namespace testing; + +constexpr static auto LEDGERHASH = + "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto ACCOUNT3 = "rB9BMzh27F3Q6a5FtGPDayQoCCEdiRdqcK"; +constexpr static auto INDEX1 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = + "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; + +class RPCAccountHandlerTest : public HandlerBaseTest +{ +}; + +TEST_F(RPCAccountHandlerTest, NonHexLedgerHash) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": 10, + "ledger_hash": "xxx" + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "ledger_hashMalformed"); + }); + ctx.run(); +} + +TEST_F(RPCAccountHandlerTest, NonStringLedgerHash) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": 10, + "ledger_hash": 123 + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "ledger_hashNotString"); + }); + ctx.run(); +} + +TEST_F(RPCAccountHandlerTest, InvalidLedgerIndexString) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": 10, + "ledger_index": "notvalidated" + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerIndexMalformed"); + }); + ctx.run(); +} + +TEST_F(RPCAccountHandlerTest, MarkerNotString) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "marker":9 + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "markerNotString"); + }); + ctx.run(); +} + +// error case : invalid marker +// marker format is composed of a comma separated index and start hint. The +// former will be read as hex, and the latter using boost lexical cast. +TEST_F(RPCAccountHandlerTest, InvalidMarker) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "marker": "123invalid" + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "Malformed cursor"); + }); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "marker":401 + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + }); + ctx.run(); +} + +// the limit is between 10 400 +TEST_F(RPCAccountHandlerTest, IncorrectLimit) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit":9 + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + }); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit":401 + }})", + ACCOUNT)); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + }); +} + +// error case: account invalid format, length is incorrect +TEST_F(RPCAccountHandlerTest, AccountInvalidFormat) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(R"({ + "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jp" + })"); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "accountMalformed"); + }); + ctx.run(); +} + +// error case: account invalid format +TEST_F(RPCAccountHandlerTest, AccountNotString) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const input = json::parse(R"({ + "account": 12 + })"); + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "accountNotString"); + }); + ctx.run(); +} + +// error case ledger non exist via hash +TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + // mock fetchLedgerByHash return empty + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); + + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_hash": "{}" + }})", + ACCOUNT, + LEDGERHASH)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +// error case ledger non exist via index +TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIndex) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + // mock fetchLedgerBySequence return empty + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_index": "4" + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +// error case ledger > max seq via hash +// idk why this case will happen in reality +TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash2) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + // mock fetchLedgerByHash return ledger but seq is 31 > 30 + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_hash": "{}" + }})", + ACCOUNT, + LEDGERHASH)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +// error case ledger > max seq via index +TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIndex2) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + // no need to check from db,call fetchLedgerBySequence 0 time + // differ from previous logic + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(0); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_index": "31" + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +// error case account not exist +TEST_F(RPCAccountHandlerTest, NonExistAccount) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); + // fetch account object return emtpy + ON_CALL(*rawBackendPtr, doFetchLedgerObject) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_hash": "{}" + }})", + ACCOUNT, + LEDGERHASH)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{AccountChannelsHandler{mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "actNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "accountNotFound"); + }); + ctx.run(); +} + +// normal case when only provide account +TEST_F(RPCAccountHandlerTest, DefaultParameterTest) +{ + constexpr static auto correntOutput = R"({ + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_index":30, + "validated":true, + "limit":50, + "channels":[ + { + "channel_id":"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321", + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "account_destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "amount":"100", + "balance":"10", + "settle_delay":32, + "public_key":"aBMxWrnPUnvwZPfsmTyVizxEGsGheAu3Tsn6oPRgyjgvd2NggFxz", + "public_key_hex":"020000000000000000000000000000000000000000000000000000000000000000" + }, + { + "channel_id":"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322", + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "account_destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "amount":"100", + "balance":"10", + "settle_delay":32, + "public_key":"aBMxWrnPUnvwZPfsmTyVizxEGsGheAu3Tsn6oPRgyjgvd2NggFxz", + "public_key_hex":"020000000000000000000000000000000000000000000000000000000000000000" + } + ] + })"; + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + auto fake = Blob{'f', 'a', 'k', 'e'}; + // return a non empty account + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + + // return owner index containing 2 indexes + ripple::STObject ownerDir = CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + // return two payment channel objects + std::vector bbs; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + bbs.push_back(channel1.getSerializer().peekData()); + bbs.push_back(channel1.getSerializer().peekData()); + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}" + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ(json::parse(correntOutput), *output); + }); + ctx.run(); +} + +// normal case : limit is used +TEST_F(RPCAccountHandlerTest, UseLimit) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + auto fake = Blob{'f', 'a', 'k', 'e'}; + // return a non empty account + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + + // return owner index + std::vector indexes; + std::vector bbs; + + auto repetitions = 50; + while (repetitions--) + { + indexes.push_back(ripple::uint256{INDEX1}); + ripple::STObject channel = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + bbs.push_back(channel.getSerializer().peekData()); + } + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + // it should not appear in return marker,marker is the current page + ownerDir.setFieldU64(ripple::sfIndexNext, 99); + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": 20 + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + + EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 20); + EXPECT_THAT( + (*output).as_object().at("marker").as_string().c_str(), + EndsWith(",0")); + }); + ctx.run(); +} + +// normal case : destination is used +TEST_F(RPCAccountHandlerTest, UseDestination) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + auto fake = Blob{'f', 'a', 'k', 'e'}; + // return a non empty account + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + + // return owner index + std::vector indexes; + std::vector bbs; + + // 10 pay channel to ACCOUNT2 + auto repetitions = 10; + while (repetitions--) + { + indexes.push_back(ripple::uint256{INDEX1}); + ripple::STObject channel = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + bbs.push_back(channel.getSerializer().peekData()); + } + + // 20 pay channel to ACCOUNT3 + repetitions = 20; + while (repetitions--) + { + indexes.push_back(ripple::uint256{INDEX1}); + ripple::STObject channel = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT3, 100, 10, 32, TXNID, 28); + bbs.push_back(channel.getSerializer().peekData()); + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": 30, + "destination_account":"{}" + }})", + ACCOUNT, + ACCOUNT3)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 20); + }); + ctx.run(); +} + +// normal case : but the lines is emtpy +TEST_F(RPCAccountHandlerTest, EmptyChannel) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + auto fake = Blob{'f', 'a', 'k', 'e'}; + // return a non empty account + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + + // return owner index + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({}, INDEX1); + + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}" + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 0); + }); + ctx.run(); +} + +// Return expiration cancel_offer source_tag destination_tag when available +TEST_F(RPCAccountHandlerTest, OptionalResponseField) +{ + constexpr static auto correntOutput = R"({ + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_index":30, + "validated":true, + "limit":50, + "channels":[ + { + "channel_id":"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321", + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "account_destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "amount":"100", + "balance":"10", + "settle_delay":32, + "public_key":"aBMxWrnPUnvwZPfsmTyVizxEGsGheAu3Tsn6oPRgyjgvd2NggFxz", + "public_key_hex":"020000000000000000000000000000000000000000000000000000000000000000", + "expiration": 100, + "cancel_after": 200, + "source_tag": 300, + "destination_tag": 400 + }, + { + "channel_id":"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322", + "account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "account_destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "amount":"100", + "balance":"10", + "settle_delay":32, + "public_key":"aBMxWrnPUnvwZPfsmTyVizxEGsGheAu3Tsn6oPRgyjgvd2NggFxz", + "public_key_hex":"020000000000000000000000000000000000000000000000000000000000000000", + "expiration": 100, + "cancel_after": 200, + "source_tag": 300, + "destination_tag": 400 + } + ] + })"; + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto owneDirKk = ripple::keylet::ownerDir(account).key; + auto fake = Blob{'f', 'a', 'k', 'e'}; + // return a non empty account + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + + // return owner index + ripple::STObject ownerDir = CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + // return two payment channel objects + std::vector bbs; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + channel1.setFieldU32(ripple::sfExpiration, 100); + channel1.setFieldU32(ripple::sfCancelAfter, 200); + channel1.setFieldU32(ripple::sfSourceTag, 300); + channel1.setFieldU32(ripple::sfDestinationTag, 400); + bbs.push_back(channel1.getSerializer().peekData()); + bbs.push_back(channel1.getSerializer().peekData()); + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}" + }})", + ACCOUNT)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ(json::parse(correntOutput), *output); + }); + ctx.run(); +} + +// normal case : test marker output correct +TEST_F(RPCAccountHandlerTest, MarkerOutput) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + auto ownerDirKk = ripple::keylet::ownerDir(account).key; + constexpr static auto nextPage = 99; + constexpr static auto limit = 15; + auto ownerDir2Kk = + ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + + std::vector bbs; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + // owner dir contains 10 indexes + int objectsCount = 10; + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + objectsCount--; + } + // return 15 objects + objectsCount = 15; + while (objectsCount != 0) + { + bbs.push_back(channel1.getSerializer().peekData()); + objectsCount--; + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, nextPage); + // first page 's next page is 99 + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + ripple::STObject ownerDir2 = CreateOwnerDirLedgerObject(indexes, INDEX1); + // second page's next page is 0 + ownerDir2.setFieldU64(ripple::sfIndexNext, 0); + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir2.getSerializer().peekData())); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": {} + }})", + ACCOUNT, + limit)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ( + (*output).as_object().at("marker").as_string().c_str(), + fmt::format("{},{}", INDEX1, nextPage)); + EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 15); + }); + ctx.run(); +} + +// normal case : handler marker correctly +TEST_F(RPCAccountHandlerTest, MarkerInput) +{ + MockBackend* rawBackendPtr = + static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto account = GetAccountIDWithString(ACCOUNT); + auto accountKk = ripple::keylet::account(account).key; + constexpr static auto nextPage = 99; + constexpr static auto limit = 15; + auto ownerDirKk = + ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // fetch account object return something + auto fake = Blob{'f', 'a', 'k', 'e'}; + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) + .WillByDefault(Return(fake)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); + + std::vector bbs; + ripple::STObject channel1 = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + int objectsCount = limit; + std::vector indexes; + while (objectsCount != 0) + { + // return owner index + indexes.push_back(ripple::uint256{INDEX1}); + bbs.push_back(channel1.getSerializer().peekData()); + objectsCount--; + } + + ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); + ownerDir.setFieldU64(ripple::sfIndexNext, 0); + ON_CALL( + *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const input = json::parse(fmt::format( + R"({{ + "account": "{}", + "limit": {}, + "marker": "{},{}" + }})", + ACCOUNT, + limit, + INDEX1, + nextPage)); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_TRUE((*output).as_object().if_contains("marker") == nullptr); + // the first item is the marker itself, so the result will have limit-1 + // items + EXPECT_EQ( + (*output).as_object().at("channels").as_array().size(), limit - 1); + }); + ctx.run(); +} diff --git a/unittests/rpc/handlers/AccountCurrenciesTest.cpp b/unittests/rpc/handlers/AccountCurrenciesTest.cpp new file mode 100644 index 00000000..bda9e3fb --- /dev/null +++ b/unittests/rpc/handlers/AccountCurrenciesTest.cpp @@ -0,0 +1,295 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +using namespace RPCng; +namespace json = boost::json; +using namespace testing; + +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; +constexpr static auto LEDGERHASH = + "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = + "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto TXNID = + "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; + +class RPCAccountCurrenciesHandlerTest : public HandlerBaseTest +{ +}; + +TEST_F(RPCAccountCurrenciesHandlerTest, AccountNotExsit) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + + ON_CALL(*rawBackendPtr, fetchLedgerBySequence) + .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}" + }})", + ACCOUNT)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "actNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "accountNotFound"); + }); + ctx.run(); +} + +TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaSequence) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // return empty ledgerinfo + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) + .WillByDefault(Return(std::optional{})); + + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}" + }})", + ACCOUNT)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaHash) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); + // return empty ledgerinfo + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) + .WillByDefault(Return(std::optional{})); + + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}", + "ledger_hash":"{}" + }})", + ACCOUNT, + LEDGERHASH)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +TEST_F(RPCAccountCurrenciesHandlerTest, DefaultParameter) +{ + auto constexpr static OUTPUT = R"({ + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_index":30, + "validated":true, + "receive_currencies":[ + "EUR", + "JPY" + ], + "send_currencies":[ + "EUR", + "USD" + ] + })"; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) + .WillByDefault(Return(ledgerinfo)); + // return valid account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) + .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + + auto const ownerDir = CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX1}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}}, + INDEX1); + auto const ownerDirKk = + ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + // ACCOUNT can receive USD 10 from ACCOUNT2 and send USD 20 to ACCOUNT2, now + // the balance is 100, ACCOUNT can only send USD to ACCOUNT2 + auto const line1 = CreateRippleStateLedgerObject( + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + // ACCOUNT2 can receive JPY 10 from ACCOUNT and send JPY 20 to ACCOUNT, now + // the balance is 100, ACCOUNT2 can only send JPY to ACCOUNT + auto const line2 = CreateRippleStateLedgerObject( + ACCOUNT, "JPY", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123); + // ACCOUNT can receive EUR 10 from ACCOUNT and send EUR 20 to ACCOUNT2, now + // the balance is 8, ACCOUNT can receive/send EUR to/from ACCOUNT2 + auto const line3 = CreateRippleStateLedgerObject( + ACCOUNT, "EUR", ISSUER, 8, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + std::vector bbs; + bbs.push_back(line1.getSerializer().peekData()); + bbs.push_back(line2.getSerializer().peekData()); + bbs.push_back(line3.getSerializer().peekData()); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}" + }})", + ACCOUNT)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ(*output, json::parse(OUTPUT)); + }); + ctx.run(); +} + +TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderHash) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); + EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) + .WillByDefault(Return(ledgerinfo)); + // return valid account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) + .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + + auto const ownerDir = + CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = + ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + std::vector bbs; + auto const line1 = CreateRippleStateLedgerObject( + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + bbs.push_back(line1.getSerializer().peekData()); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}", + "ledger_hash":"{}" + }})", + ACCOUNT, + LEDGERHASH)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + }); + ctx.run(); +} + +TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderSeq) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + auto const ledgerSeq = 29; + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(ledgerSeq, _)) + .WillByDefault(Return(ledgerinfo)); + // return valid account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) + .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + + auto const ownerDir = + CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = + ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, ledgerSeq, _)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + std::vector bbs; + auto const line1 = CreateRippleStateLedgerObject( + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + bbs.push_back(line1.getSerializer().peekData()); + + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + auto const static input = boost::json::parse(fmt::format( + R"({{ + "account":"{}", + "ledger_index":{} + }})", + ACCOUNT, + ledgerSeq)); + auto const handler = AnyHandler{AccountCurrenciesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + EXPECT_EQ( + (*output).as_object().at("ledger_index").as_uint64(), ledgerSeq); + }); + ctx.run(); +} diff --git a/unittests/rpc/handlers/DefaultProcessorTests.cpp b/unittests/rpc/handlers/DefaultProcessorTests.cpp new file mode 100644 index 00000000..d08ff2cf --- /dev/null +++ b/unittests/rpc/handlers/DefaultProcessorTests.cpp @@ -0,0 +1,79 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include + +#include + +using namespace testing; +using namespace std; + +using namespace RPCng; +using namespace RPCng::validation; +using namespace unittests::detail; + +namespace json = boost::json; + +class RPCDefaultProcessorTest : public NoLoggerFixture +{ +}; + +TEST_F(RPCDefaultProcessorTest, ValidInput) +{ + HandlerMock handler; + RPCng::detail::DefaultProcessor processor; + + auto const input = json::parse(R"({ "something": "works" })"); + auto const spec = RpcSpec{{"something", Required{}}}; + auto const data = InOutFake{"works"}; + EXPECT_CALL(handler, spec()).WillOnce(ReturnRef(spec)); + EXPECT_CALL(handler, process(Eq(data))).WillOnce(Return(data)); + + auto const ret = processor(handler, input); + ASSERT_TRUE(ret); // no error +} + +TEST_F(RPCDefaultProcessorTest, NoInputVaildCall) +{ + HandlerWithoutInputMock handler; + RPCng::detail::DefaultProcessor processor; + + auto const data = InOutFake{"works"}; + auto const input = json::parse(R"({})"); + EXPECT_CALL(handler, process()).WillOnce(Return(data)); + + auto const ret = processor(handler, input); + ASSERT_TRUE(ret); // no error +} + +TEST_F(RPCDefaultProcessorTest, InvalidInput) +{ + HandlerMock handler; + RPCng::detail::DefaultProcessor processor; + + auto const input = json::parse(R"({ "other": "nope" })"); + auto const spec = RpcSpec{{"something", Required{}}}; + EXPECT_CALL(handler, spec()).WillOnce(ReturnRef(spec)); + + auto const ret = processor(handler, input); + ASSERT_FALSE(ret); // returns error +} diff --git a/unittests/rpc/handlers/GatewayBalancesTest.cpp b/unittests/rpc/handlers/GatewayBalancesTest.cpp new file mode 100644 index 00000000..ac7b5d11 --- /dev/null +++ b/unittests/rpc/handlers/GatewayBalancesTest.cpp @@ -0,0 +1,727 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +using namespace RPCng; +namespace json = boost::json; +using namespace testing; + +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto ACCOUNT3 = "raHGBERMka3KZsfpTQUAtumxmvpqhFLyrk"; +constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; +constexpr static auto LEDGERHASH = + "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = + "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = + "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto TXNID = + "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; + +class RPCGatewayBalancesHandlerTest : public HandlerBaseTest +{ +}; + +struct ParameterTestBundle +{ + std::string testName; + std::string testJson; + std::string expectedError; + std::string expectedErrorMessage; +}; + +struct ParameterTest : public RPCGatewayBalancesHandlerTest, + public WithParamInterface +{ + struct NameGenerator + { + template + std::string + operator()(const testing::TestParamInfo& info) const + { + auto bundle = static_cast(info.param); + return bundle.testName; + } + }; +}; + +TEST_P(ParameterTest, CheckError) +{ + auto bundle = GetParam(); + auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = + handler.process(json::parse(bundle.testJson), yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), bundle.expectedError); + EXPECT_EQ( + err.at("error_message").as_string(), bundle.expectedErrorMessage); + }); + ctx.run(); +} + +auto +generateParameterTestBundles() +{ + return std::vector{ + ParameterTestBundle{ + "AccountNotString", + R"({ + "account": 1213 + })", + "invalidParams", + "accountNotString"}, + ParameterTestBundle{ + "AccountMissing", + R"({ + })", + "invalidParams", + "Required field 'account' missing"}, + ParameterTestBundle{ + "AccountInvalid", + R"({ + "account": "1213" + })", + "invalidParams", + "accountMalformed"}, + ParameterTestBundle{ + "LedgerIndexInvalid", + fmt::format( + R"({{ + "account": "{}", + "ledger_index": "meh" + }})", + ACCOUNT), + "invalidParams", + "ledgerIndexMalformed"}, + ParameterTestBundle{ + "LedgerHashInvalid", + fmt::format( + R"({{ + "account": "{}", + "ledger_hash": "meh" + }})", + ACCOUNT), + "invalidParams", + "ledger_hashMalformed"}, + ParameterTestBundle{ + "LedgerHashNotString", + fmt::format( + R"({{ + "account": "{}", + "ledger_hash": 12 + }})", + ACCOUNT), + "invalidParams", + "ledger_hashNotString"}, + ParameterTestBundle{ + "WalletsNotStringOrArray", + fmt::format( + R"({{ + "account": "{}", + "hotwallet": 12 + }})", + ACCOUNT), + "invalidParams", + "hotwalletNotStringOrArray"}, + ParameterTestBundle{ + "WalletsNotStringAccount", + fmt::format( + R"({{ + "account": "{}", + "hotwallet": [12] + }})", + ACCOUNT), + "invalidParams", + "hotwalletMalformed"}, + ParameterTestBundle{ + "WalletsInvalidAccount", + fmt::format( + R"({{ + "account": "{}", + "hotwallet": ["12"] + }})", + ACCOUNT), + "invalidParams", + "hotwalletMalformed"}, + ParameterTestBundle{ + "WalletInvalidAccount", + fmt::format( + R"({{ + "account": "{}", + "hotwallet": "12" + }})", + ACCOUNT), + "invalidParams", + "hotwalletMalformed"}, + }; +} + +INSTANTIATE_TEST_SUITE_P( + RPCGatewayBalancesHandler, + ParameterTest, + testing::ValuesIn(generateParameterTestBundles()), + ParameterTest::NameGenerator()); + +TEST_F(RPCGatewayBalancesHandlerTest, LedgerNotFound) +{ + auto const seq = 123; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(300); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // return empty ledgerinfo + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) + .WillByDefault(Return(std::optional{})); + + auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process( + json::parse(fmt::format( + R"({{ + "account": "{}", + "ledger_index": "{}" + }})", + ACCOUNT, + seq)), + yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "lgrNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "ledgerNotFound"); + }); + ctx.run(); +} + +TEST_F(RPCGatewayBalancesHandlerTest, AccountNotFound) +{ + auto const seq = 300; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(seq); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) + .WillByDefault(Return(ledgerinfo)); + + // return empty account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + + auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process( + json::parse(fmt::format( + R"({{ + "account": "{}" + }})", + ACCOUNT)), + yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "actNotFound"); + EXPECT_EQ(err.at("error_message").as_string(), "accountNotFound"); + }); + ctx.run(); +} + +TEST_F(RPCGatewayBalancesHandlerTest, InvalidHotWallet) +{ + auto const seq = 300; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(seq); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) + .WillByDefault(Return(ledgerinfo)); + + // return valid account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) + .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + + // return valid owner dir + auto const ownerDir = + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = + ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) + .WillByDefault(Return(ownerDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + // create a valid line, balance is 0 + auto const line1 = CreateRippleStateLedgerObject( + ACCOUNT, "USD", ISSUER, 0, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + std::vector bbs; + bbs.push_back(line1.getSerializer().peekData()); + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process( + json::parse(fmt::format( + R"({{ + "account": "{}", + "hotwallet": "{}" + }})", + ACCOUNT, + ACCOUNT2)), + yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "invalidHotWallet"); + }); + ctx.run(); +} + +struct NormalTestBundle +{ + std::string testName; + ripple::STObject mockedDir; + std::vector mockedObjects; + std::string expectedJson; + std::string hotwallet; +}; + +struct NormalPathTest : public RPCGatewayBalancesHandlerTest, + public WithParamInterface +{ + struct NameGenerator + { + template + std::string + operator()(const testing::TestParamInfo& info) const + { + auto bundle = static_cast(info.param); + return bundle.testName; + } + }; +}; + +TEST_P(NormalPathTest, CheckOutput) +{ + auto const& bundle = GetParam(); + auto const seq = 300; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(seq); // max + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) + .WillByDefault(Return(ledgerinfo)); + + // return valid account + auto const accountKk = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) + .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + + // return valid owner dir + auto const ownerDir = + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = + ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) + .WillByDefault(Return(bundle.mockedDir.getSerializer().peekData())); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); + + std::vector bbs; + std::transform( + bundle.mockedObjects.begin(), + bundle.mockedObjects.end(), + std::back_inserter(bbs), + [](auto const& obj) { return obj.getSerializer().peekData(); }); + ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); + + auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process( + json::parse(fmt::format( + R"({{ + "account": "{}", + {} + }})", + ACCOUNT, + bundle.hotwallet)), + yield); + ASSERT_TRUE(output); + EXPECT_EQ(output.value(), json::parse(bundle.expectedJson)); + }); + ctx.run(); +} + +auto +generateNormalPathTestBundles() +{ + auto frozenState = CreateRippleStateLedgerObject( + ACCOUNT, "JPY", ISSUER, -50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); + frozenState.setFieldU32(ripple::sfFlags, ripple::lsfLowFreeze); + + auto overflowState = CreateRippleStateLedgerObject( + ACCOUNT, "JPY", ISSUER, 50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); + int64_t min64 = -9922966390934554; + overflowState.setFieldAmount( + ripple::sfBalance, + ripple::STAmount(GetIssue("JPY", ISSUER), min64, 80)); + return std::vector{ + NormalTestBundle{ + "AllBranches", + CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}}, + INDEX1), + std::vector{// hotwallet + CreateRippleStateLedgerObject( + ACCOUNT, + "USD", + ISSUER, + -10, + ACCOUNT, + 100, + ACCOUNT2, + 200, + TXNID, + 123), + // hotwallet + CreateRippleStateLedgerObject( + ACCOUNT, + "CNY", + ISSUER, + -20, + ACCOUNT, + 100, + ACCOUNT2, + 200, + TXNID, + 123), + // positive balance -> asset + CreateRippleStateLedgerObject( + ACCOUNT, + "EUR", + ISSUER, + 30, + ACCOUNT, + 100, + ACCOUNT3, + 200, + TXNID, + 123), + // positive balance -> asset + CreateRippleStateLedgerObject( + ACCOUNT, + "JPY", + ISSUER, + 40, + ACCOUNT, + 100, + ACCOUNT3, + 200, + TXNID, + 123), + // obligation + CreateRippleStateLedgerObject( + ACCOUNT, + "JPY", + ISSUER, + -50, + ACCOUNT, + 10, + ACCOUNT3, + 20, + TXNID, + 123), + frozenState + + }, + fmt::format( + R"({{ + "obligations":{{ + "JPY":"50" + }}, + "balances":{{ + "{}":[ + {{ + "currency":"USD", + "value":"10" + }}, + {{ + "currency":"CNY", + "value":"20" + }} + ] + }}, + "frozen_balances":{{ + "{}":[ + {{ + "currency":"JPY", + "value":"50" + }} + ] + }}, + "assets":{{ + "{}":[ + {{ + "currency":"EUR", + "value":"30" + }}, + {{ + "currency":"JPY", + "value":"40" + }} + ] + }}, + "account":"{}", + "ledger_index":300, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652" + }})", + ACCOUNT2, + ACCOUNT3, + ACCOUNT3, + ACCOUNT), + fmt::format(R"("hotwallet": "{}")", ACCOUNT2)}, + NormalTestBundle{ + "NoHotwallet", + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1), + std::vector{CreateRippleStateLedgerObject( + ACCOUNT, + "JPY", + ISSUER, + -50, + ACCOUNT, + 10, + ACCOUNT3, + 20, + TXNID, + 123)}, + fmt::format( + R"({{ + "obligations":{{ + "JPY":"50" + }}, + "account":"{}", + "ledger_index":300, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652" + }})", + ACCOUNT), + R"("ledger_index" : "validated")"}, + NormalTestBundle{ + "ObligationOverflow", + CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), + std::vector{overflowState, overflowState}, + fmt::format( + R"({{ + "obligations":{{ + "JPY":"9922966390934554e80" + }}, + "account":"{}", + "overflow":true, + "ledger_index":300, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652" + }})", + ACCOUNT), + R"("ledger_index" : "validated")"}, + NormalTestBundle{ + "HighID", + CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}}, + INDEX1), + std::vector{// hotwallet + CreateRippleStateLedgerObject( + ACCOUNT, + "USD", + ISSUER, + 10, + ACCOUNT2, + 100, + ACCOUNT, + 200, + TXNID, + 123), + // hotwallet + CreateRippleStateLedgerObject( + ACCOUNT, + "CNY", + ISSUER, + 20, + ACCOUNT2, + 100, + ACCOUNT, + 200, + TXNID, + 123), + CreateRippleStateLedgerObject( + ACCOUNT, + "EUR", + ISSUER, + 30, + ACCOUNT3, + 100, + ACCOUNT, + 200, + TXNID, + 123), + CreateRippleStateLedgerObject( + ACCOUNT, + "JPY", + ISSUER, + -50, + ACCOUNT3, + 10, + ACCOUNT, + 20, + TXNID, + 123)}, + fmt::format( + R"({{ + "obligations":{{ + "EUR":"30" + }}, + "balances":{{ + "{}":[ + {{ + "currency":"USD", + "value":"10" + }}, + {{ + "currency":"CNY", + "value":"20" + }} + ] + }}, + "assets":{{ + "{}":[ + {{ + "currency":"JPY", + "value":"50" + }} + ] + }}, + "account":"{}", + "ledger_index":300, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652" + }})", + ACCOUNT2, + ACCOUNT3, + ACCOUNT), + fmt::format(R"("hotwallet": "{}")", ACCOUNT2)}, + NormalTestBundle{ + "HotWalletArray", + CreateOwnerDirLedgerObject( + {ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}, + ripple::uint256{INDEX2}}, + INDEX1), + std::vector{ + CreateRippleStateLedgerObject( + ACCOUNT, + "USD", + ISSUER, + -10, + ACCOUNT, + 100, + ACCOUNT2, + 200, + TXNID, + 123), + CreateRippleStateLedgerObject( + ACCOUNT, + "CNY", + ISSUER, + -20, + ACCOUNT, + 100, + ACCOUNT2, + 200, + TXNID, + 123), + CreateRippleStateLedgerObject( + ACCOUNT, + "EUR", + ISSUER, + -30, + ACCOUNT, + 100, + ACCOUNT3, + 200, + TXNID, + 123) + + }, + fmt::format( + R"({{ + "balances":{{ + "{}":[ + {{ + "currency":"EUR", + "value":"30" + }} + ], + "{}":[ + {{ + "currency":"USD", + "value":"10" + }}, + {{ + "currency":"CNY", + "value":"20" + }} + ] + }}, + "account":"{}", + "ledger_index":300, + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652" + }})", + ACCOUNT3, + ACCOUNT2, + ACCOUNT), + fmt::format(R"("hotwallet": ["{}", "{}"])", ACCOUNT2, ACCOUNT3)}}; +} + +INSTANTIATE_TEST_SUITE_P( + RPCGatewayBalancesHandler, + NormalPathTest, + testing::ValuesIn(generateNormalPathTestBundles()), + NormalPathTest::NameGenerator()); diff --git a/unittests/rpc/handlers/LedgerEntryTest.cpp b/unittests/rpc/handlers/LedgerEntryTest.cpp new file mode 100644 index 00000000..01fca9a7 --- /dev/null +++ b/unittests/rpc/handlers/LedgerEntryTest.cpp @@ -0,0 +1,1041 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +using namespace RPCng; +namespace json = boost::json; +using namespace testing; + +constexpr static auto INDEX1 = + "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto RANGEMIN = 10; +constexpr static auto RANGEMAX = 30; +constexpr static auto LEDGERHASH = + "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; + +class RPCLedgerEntryTest : public HandlerBaseTest +{ +}; + +struct ParamTestCaseBundle +{ + std::string testName; + std::string testJson; + std::string expectedError; + std::string expectedErrorMessage; +}; + +// parameterized test cases for parameters check +struct LedgerEntryParameterTest : public RPCLedgerEntryTest, + public WithParamInterface +{ + struct NameGenerator + { + template + std::string + operator()(const testing::TestParamInfo& info) const + { + auto bundle = static_cast(info.param); + return bundle.testName; + } + }; +}; + +// TODO: because we extract the error generation from the handler to framework +// the error messages need one round fine tuning +static auto +generateTestValuesForParametersTest() +{ + return std::vector{ + ParamTestCaseBundle{ + "InvalidBinaryType", + R"({ + "index": + "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD", + "binary": "invalid" + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidAccountRootFormat", + R"({ + "account_root": "invalid" + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidAccountRootNotString", + R"({ + "account_root": 123 + })", + "invalidParams", + "account_rootNotString"}, + + ParamTestCaseBundle{ + "UnknownOption", + R"({ + })", + "invalidParams", + "unknownOption"}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthType", + R"({ + "deposit_preauth": 123 + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthString", + R"({ + "deposit_preauth": "invalid" + })", + "invalidParams", + "deposit_preauthMalformed"}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthEmtpyJson", + R"({ + "deposit_preauth": { + } + })", + "invalidParams", + "Required field 'owner' missing"}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthJsonWrongAccount", + R"({ + "deposit_preauth": { + "owner": "invalid", + "authorized": "invalid" + } + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthJsonOwnerNotString", + R"({ + "deposit_preauth": { + "owner": 123, + "authorized": 123 + } + })", + "invalidParams", + "ownerNotString"}, + + ParamTestCaseBundle{ + "InvalidDepositPreauthJsonAuthorizedNotString", + fmt::format( + R"({{ + "deposit_preauth": {{ + "owner": "{}", + "authorized": 123 + }} + }})", + ACCOUNT), + "invalidParams", + "authorizedNotString"}, + + ParamTestCaseBundle{ + "InvalidTicketType", + R"({ + "ticket": 123 + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidTicketIndex", + R"({ + "ticket": "invalid" + })", + "invalidParams", + "ticketMalformed"}, + + ParamTestCaseBundle{ + "InvalidTicketEmptyJson", + R"({ + "ticket": {} + })", + "invalidParams", + "Required field 'account' missing"}, + + ParamTestCaseBundle{ + "InvalidTicketJsonAccountNotString", + R"({ + "ticket": { + "account": 123, + "ticket_seq": 123 + } + })", + "invalidParams", + "accountNotString"}, + + ParamTestCaseBundle{ + "InvalidTicketJsonAccountInvalid", + R"({ + "ticket": { + "account": "123", + "ticket_seq": 123 + } + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidTicketJsonSeqNotInt", + fmt::format( + R"({{ + "ticket": {{ + "account": "{}", + "ticket_seq": "123" + }} + }})", + ACCOUNT), + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidOfferType", + R"({ + "offer": 123 + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidOfferIndex", + R"({ + "offer": "invalid" + })", + "invalidParams", + "offerMalformed"}, + + ParamTestCaseBundle{ + "InvalidOfferEmptyJson", + R"({ + "offer": {} + })", + "invalidParams", + "Required field 'account' missing"}, + + ParamTestCaseBundle{ + "InvalidOfferJsonAccountNotString", + R"({ + "ticket": { + "account": 123, + "seq": 123 + } + })", + "invalidParams", + "accountNotString"}, + + ParamTestCaseBundle{ + "InvalidOfferJsonAccountInvalid", + R"({ + "ticket": { + "account": "123", + "seq": 123 + } + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidOfferJsonSeqNotInt", + fmt::format( + R"({{ + "offer": {{ + "account": "{}", + "seq": "123" + }} + }})", + ACCOUNT), + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidEscrowType", + R"({ + "escrow": 123 + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidEscrowIndex", + R"({ + "escrow": "invalid" + })", + "invalidParams", + "escrowMalformed"}, + + ParamTestCaseBundle{ + "InvalidEscrowEmptyJson", + R"({ + "escrow": {} + })", + "invalidParams", + "Required field 'owner' missing"}, + + ParamTestCaseBundle{ + "InvalidEscrowJsonAccountNotString", + R"({ + "escrow": { + "owner": 123, + "seq": 123 + } + })", + "invalidParams", + "ownerNotString"}, + + ParamTestCaseBundle{ + "InvalidEscrowJsonAccountInvalid", + R"({ + "ticket": { + "account": "123", + "seq": 123 + } + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidEscrowJsonSeqNotInt", + fmt::format( + R"({{ + "escrow": {{ + "owner": "{}", + "seq": "123" + }} + }})", + ACCOUNT), + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidRippleStateType", + R"({ + "ripple_state": "123" + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidRippleStateMissField", + R"({ + "ripple_state": { + "currency": "USD" + } + })", + "invalidParams", + "Required field 'accounts' missing"}, + + ParamTestCaseBundle{ + "InvalidRippleStateEmtpyJson", + R"({ + "ripple_state": { + } + })", + "invalidParams", + "Required field 'accounts' missing"}, + + ParamTestCaseBundle{ + "InvalidRippleStateOneAccount", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}"] + }} + }})", + ACCOUNT), + "invalidParams", + "malformedAccounts"}, + + ParamTestCaseBundle{ + "InvalidRippleStateSameAccounts", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}","{}"], + "currency": "USD" + }} + }})", + ACCOUNT, + ACCOUNT), + "invalidParams", + "malformedAccounts"}, + + ParamTestCaseBundle{ + "InvalidRippleStateWrongAccountsNotString", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}",123], + "currency": "USD" + }} + }})", + ACCOUNT), + "invalidParams", + "malformedAccounts"}, + + ParamTestCaseBundle{ + "InvalidRippleStateWrongAccountsFormat", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}","123"], + "currency": "USD" + }} + }})", + ACCOUNT), + "malformedAddress", + "malformedAddresses"}, + + ParamTestCaseBundle{ + "InvalidRippleStateWrongCurrency", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}","{}"], + "currency": "XXXX" + }} + }})", + ACCOUNT, + ACCOUNT2), + "malformedCurrency", + "malformedCurrency"}, + + ParamTestCaseBundle{ + "InvalidRippleStateWrongCurrencyNotString", + fmt::format( + R"({{ + "ripple_state": {{ + "accounts" : ["{}","{}"], + "currency": 123 + }} + }})", + ACCOUNT, + ACCOUNT2), + "invalidParams", + "currencyNotString"}, + + ParamTestCaseBundle{ + "InvalidDirectoryType", + R"({ + "directory": 123 + })", + "invalidParams", + "Invalid parameters."}, + + ParamTestCaseBundle{ + "InvalidDirectoryIndex", + R"({ + "directory": "123" + })", + "invalidParams", + "directoryMalformed"}, + + ParamTestCaseBundle{ + "InvalidDirectoryEmtpyJson", + R"({ + "directory": {} + })", + "invalidParams", + "missingOwnerOrDirRoot"}, + + ParamTestCaseBundle{ + "InvalidDirectoryWrongOwnerNotString", + R"({ + "directory": { + "owner": 123 + } + })", + "invalidParams", + "ownerNotString"}, + + ParamTestCaseBundle{ + "InvalidDirectoryWrongOwnerFormat", + R"({ + "directory": { + "owner": "123" + } + })", + "malformedAddress", + "Malformed address."}, + + ParamTestCaseBundle{ + "InvalidDirectoryWrongDirFormat", + R"({ + "directory": { + "dir_root": "123" + } + })", + "invalidParams", + "dir_rootMalformed"}, + + ParamTestCaseBundle{ + "InvalidDirectoryWrongDirNotString", + R"({ + "directory": { + "dir_root": 123 + } + })", + "invalidParams", + "dir_rootNotString"}, + + ParamTestCaseBundle{ + "InvalidDirectoryDirOwnerConflict", + fmt::format( + R"({{ + "directory": {{ + "dir_root": "{}", + "owner": "{}" + }} + }})", + INDEX1, + ACCOUNT), + "invalidParams", + "mayNotSpecifyBothDirRootAndOwner"}, + + ParamTestCaseBundle{ + "InvalidDirectoryDirSubIndexNotInt", + fmt::format( + R"({{ + "directory": {{ + "dir_root": "{}", + "sub_index": "not int" + }} + }})", + INDEX1), + "invalidParams", + "Invalid parameters."}}; +} + +INSTANTIATE_TEST_CASE_P( + RPCLedgerEntryGroup1, + LedgerEntryParameterTest, + ValuesIn(generateTestValuesForParametersTest()), + LedgerEntryParameterTest::NameGenerator{}); + +TEST_P(LedgerEntryParameterTest, InvalidParams) +{ + auto const testBundle = GetParam(); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(testBundle.testJson); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); + std::cout << err.at("error").as_string() << std::endl; + std::cout << err.at("error_message").as_string() << std::endl; + EXPECT_EQ( + err.at("error_message").as_string(), + testBundle.expectedErrorMessage); + }); + ctx.run(); +} + +// parameterized test cases for index +struct IndexTest : public HandlerBaseTest, + public WithParamInterface +{ + struct NameGenerator + { + template + std::string + operator()(const testing::TestParamInfo& info) const + { + return static_cast(info.param); + } + }; +}; + +// content of index, payment_channel, check fields is ledger index +INSTANTIATE_TEST_CASE_P( + RPCLedgerEntryGroup3, + IndexTest, + Values("index", "payment_channel", "check"), + IndexTest::NameGenerator{}); + +TEST_P(IndexTest, InvalidIndexUint256) +{ + auto const index = GetParam(); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "{}": "invalid" + }})", + index)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), index + "Malformed"); + }); + ctx.run(); +} + +TEST_P(IndexTest, InvalidIndexNotString) +{ + auto const index = GetParam(); + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "{}": 123 + }})", + index)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), index + "NotString"); + }); + ctx.run(); +} + +TEST_F(RPCLedgerEntryTest, LedgerEntryNotFound) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(RANGEMIN); // min + mockBackendPtr->updateRange(RANGEMAX); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) + .WillByDefault(Return(ledgerinfo)); + + // return null for ledger entry + auto const key = + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(key, RANGEMAX, _)) + .WillByDefault(Return(std::optional{})); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "account_root": "{}" + }})", + ACCOUNT)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "entryNotFound"); + }); + ctx.run(); +} + +struct NormalPathTestBundle +{ + std::string testName; + std::string testJson; + ripple::uint256 expectedIndex; + ripple::STObject mockedEntity; +}; + +struct RPCLedgerEntryNormalPathTest + : public RPCLedgerEntryTest, + public WithParamInterface +{ + struct NameGenerator + { + template + std::string + operator()(const testing::TestParamInfo& info) const + { + auto bundle = static_cast(info.param); + return bundle.testName; + } + }; +}; + +static auto +generateTestValuesForNormalPathTest() +{ + auto account1 = GetAccountIDWithString(ACCOUNT); + auto account2 = GetAccountIDWithString(ACCOUNT2); + ripple::Currency currency; + ripple::to_currency(currency, "USD"); + + return std::vector{ + NormalPathTestBundle{ + "Index", + fmt::format( + R"({{ + "binary": true, + "index": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateAccountRootObject( + ACCOUNT2, ripple::lsfGlobalFreeze, 1, 10, 2, INDEX1, 3)}, + NormalPathTestBundle{ + "Payment_channel", + fmt::format( + R"({{ + "binary": true, + "payment_channel": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400)}, + NormalPathTestBundle{ + "Check", + fmt::format( + R"({{ + "binary": true, + "check": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateCheckLedgerObject(ACCOUNT, ACCOUNT2)}, + NormalPathTestBundle{ + "DirectoryIndex", + fmt::format( + R"({{ + "binary": true, + "directory": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateOwnerDirLedgerObject( + std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + NormalPathTestBundle{ + "OfferIndex", + fmt::format( + R"({{ + "binary": true, + "offer": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateOfferLedgerObject(ACCOUNT, 100, 200, "USD", ACCOUNT2)}, + NormalPathTestBundle{ + "EscrowIndex", + fmt::format( + R"({{ + "binary": true, + "escrow": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateEscrowLedgerObject(ACCOUNT, ACCOUNT2)}, + NormalPathTestBundle{ + "TicketIndex", + fmt::format( + R"({{ + "binary": true, + "ticket": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateTicketLedgerObject(ACCOUNT, 0)}, + NormalPathTestBundle{ + "DepositPreauthIndex", + fmt::format( + R"({{ + "binary": true, + "deposit_preauth": "{}" + }})", + INDEX1), + ripple::uint256{INDEX1}, + CreateDepositPreauthLedgerObject(ACCOUNT, ACCOUNT2)}, + NormalPathTestBundle{ + "AccountRoot", + fmt::format( + R"({{ + "binary": true, + "account_root": "{}" + }})", + ACCOUNT), + ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key, + CreateAccountRootObject(ACCOUNT, 0, 1, 1, 1, INDEX1, 1)}, + NormalPathTestBundle{ + "DirectoryViaDirRoot", + fmt::format( + R"({{ + "binary": true, + "directory": {{ + "dir_root": "{}", + "sub_index": 2 + }} + }})", + INDEX1), + ripple::keylet::page(ripple::uint256{INDEX1}, 2).key, + CreateOwnerDirLedgerObject( + std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + NormalPathTestBundle{ + "DirectoryViaOwner", + fmt::format( + R"({{ + "binary": true, + "directory": {{ + "owner": "{}", + "sub_index": 2 + }} + }})", + ACCOUNT), + ripple::keylet::page(ripple::keylet::ownerDir(account1), 2).key, + CreateOwnerDirLedgerObject( + std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + NormalPathTestBundle{ + "DirectoryViaDefaultSubIndex", + fmt::format( + R"({{ + "binary": true, + "directory": {{ + "owner": "{}" + }} + }})", + ACCOUNT), + // default sub_index is 0 + ripple::keylet::page(ripple::keylet::ownerDir(account1), 0).key, + CreateOwnerDirLedgerObject( + std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + NormalPathTestBundle{ + "Escrow", + fmt::format( + R"({{ + "binary": true, + "escrow": {{ + "owner": "{}", + "seq": 1 + }} + }})", + ACCOUNT), + ripple::keylet::escrow(account1, 1).key, + CreateEscrowLedgerObject(ACCOUNT, ACCOUNT2)}, + NormalPathTestBundle{ + "DepositPreauth", + fmt::format( + R"({{ + "binary": true, + "deposit_preauth": {{ + "owner": "{}", + "authorized": "{}" + }} + }})", + ACCOUNT, + ACCOUNT2), + ripple::keylet::depositPreauth(account1, account2).key, + CreateDepositPreauthLedgerObject(ACCOUNT, ACCOUNT2)}, + NormalPathTestBundle{ + "RippleState", + fmt::format( + R"({{ + "binary": true, + "ripple_state": {{ + "accounts": ["{}","{}"], + "currency": "USD" + }} + }})", + ACCOUNT, + ACCOUNT2), + ripple::keylet::line(account1, account2, currency).key, + CreateRippleStateLedgerObject( + ACCOUNT, + "USD", + ACCOUNT2, + 100, + ACCOUNT, + 10, + ACCOUNT2, + 20, + INDEX1, + 123)}, + NormalPathTestBundle{ + "Ticket", + fmt::format( + R"({{ + "binary": true, + "ticket": {{ + "account": "{}", + "ticket_seq": 2 + }} + }})", + ACCOUNT), + ripple::getTicketIndex(account1, 2), + CreateTicketLedgerObject(ACCOUNT, 0)}, + NormalPathTestBundle{ + "Offer", + fmt::format( + R"({{ + "binary": true, + "offer": {{ + "account": "{}", + "seq": 2 + }} + }})", + ACCOUNT), + ripple::keylet::offer(account1, 2).key, + CreateOfferLedgerObject(ACCOUNT, 100, 200, "USD", ACCOUNT2)}}; +} + +INSTANTIATE_TEST_CASE_P( + RPCLedgerEntryGroup2, + RPCLedgerEntryNormalPathTest, + ValuesIn(generateTestValuesForNormalPathTest()), + RPCLedgerEntryNormalPathTest::NameGenerator{}); + +// Test for normal path +// Check the index in response matches the computed index accordingly +TEST_P(RPCLedgerEntryNormalPathTest, NormalPath) +{ + auto const testBundle = GetParam(); + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(RANGEMIN); // min + mockBackendPtr->updateRange(RANGEMAX); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) + .WillByDefault(Return(ledgerinfo)); + + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(testBundle.expectedIndex, RANGEMAX, _)) + .WillByDefault( + Return(testBundle.mockedEntity.getSerializer().peekData())); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(testBundle.testJson); + auto const output = handler.process(req, yield); + ASSERT_TRUE(output); + EXPECT_EQ(output.value().at("ledger_hash").as_string(), LEDGERHASH); + EXPECT_EQ(output.value().at("ledger_index").as_uint64(), RANGEMAX); + EXPECT_EQ( + output.value().at("node_binary").as_string(), + ripple::strHex(testBundle.mockedEntity.getSerializer().peekData())); + EXPECT_EQ( + ripple::uint256(output.value().at("index").as_string().c_str()), + testBundle.expectedIndex); + }); + ctx.run(); +} + +// this testcase will test the deserialization of ledger entry +TEST_F(RPCLedgerEntryTest, BinaryFalse) +{ + static auto constexpr OUT = R"({ + "ledger_hash":"4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652", + "ledger_index":30, + "validated":true, + "index":"05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD", + "node":{ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Amount":"100", + "Balance":"200", + "Destination":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "Flags":0, + "LedgerEntryType":"PayChannel", + "OwnerNode":"0", + "PreviousTxnID":"05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD", + "PreviousTxnLgrSeq":400, + "PublicKey":"020000000000000000000000000000000000000000000000000000000000000000", + "SettleDelay":300, + "index":"05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD" + } + })"; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(RANGEMIN); // min + mockBackendPtr->updateRange(RANGEMAX); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) + .WillByDefault(Return(ledgerinfo)); + + // return valid ledger entry which can be deserialized + auto const ledgerEntry = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) + .WillByDefault(Return(ledgerEntry.getSerializer().peekData())); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "payment_channel": "{}" + }})", + INDEX1)); + auto const output = handler.process(req, yield); + ASSERT_TRUE(output); + EXPECT_EQ(*output, json::parse(OUT)); + }); + ctx.run(); +} + +TEST_F(RPCLedgerEntryTest, UnexpectedLedgerType) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(RANGEMIN); // min + mockBackendPtr->updateRange(RANGEMAX); // max + // return valid ledgerinfo + auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); + EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) + .WillByDefault(Return(ledgerinfo)); + + // return valid ledger entry which can be deserialized + auto const ledgerEntry = CreatePaymentChannelLedgerObject( + ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); + ON_CALL( + *rawBackendPtr, + doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) + .WillByDefault(Return(ledgerEntry.getSerializer().peekData())); + + boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "check": "{}" + }})", + INDEX1)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + }); + ctx.run(); +} diff --git a/unittests/rpc/handlers/PingTest.cpp b/unittests/rpc/handlers/PingTest.cpp new file mode 100644 index 00000000..2d926b59 --- /dev/null +++ b/unittests/rpc/handlers/PingTest.cpp @@ -0,0 +1,37 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +using namespace RPCng; + +class RPCHandlerTest : public NoLoggerFixture +{ +}; + +// example handler tests +TEST_F(RPCHandlerTest, Ping) +{ + auto const handler = AnyHandler{PingHandler{}}; + auto const output = handler.process(boost::json::parse(R"({})")); + ASSERT_TRUE(output); + EXPECT_EQ(output.value(), boost::json::parse(R"({})")); +} diff --git a/unittests/rpc/handlers/TestHandlerTests.cpp b/unittests/rpc/handlers/TestHandlerTests.cpp new file mode 100644 index 00000000..89f602ad --- /dev/null +++ b/unittests/rpc/handlers/TestHandlerTests.cpp @@ -0,0 +1,112 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include + +using namespace std; +using namespace RPCng; +using namespace RPCng::validation; +using namespace unittests::detail; + +namespace json = boost::json; + +class RPCTestHandlerTest : public NoLoggerFixture +{ +}; + +// example handler tests +TEST_F(RPCTestHandlerTest, HandlerSuccess) +{ + auto const handler = AnyHandler{HandlerFake{}}; + auto const input = json::parse(R"({ + "hello": "world", + "limit": 10 + })"); + + auto const output = handler.process(input); + ASSERT_TRUE(output); + + auto const val = output.value(); + EXPECT_EQ(val.as_object().at("computed").as_string(), "world_10"); +} + +TEST_F(RPCTestHandlerTest, CoroutineHandlerSuccess) +{ + auto const handler = AnyHandler{CoroutineHandlerFake{}}; + auto const input = json::parse(R"({ + "hello": "world", + "limit": 10 + })"); + boost::asio::io_context ctx; + boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { + auto const output = handler.process(input, yield); + ASSERT_TRUE(output); + + auto const val = output.value(); + EXPECT_EQ(val.as_object().at("computed").as_string(), "world_10"); + }); + ctx.run(); +} + +TEST_F(RPCTestHandlerTest, NoInputHandlerSuccess) +{ + auto const handler = AnyHandler{NoInputHandlerFake{}}; + auto const output = handler.process(json::parse(R"({})")); + ASSERT_TRUE(output); + + auto const val = output.value(); + EXPECT_EQ(val.as_object().at("computed").as_string(), "test"); +} + +TEST_F(RPCTestHandlerTest, HandlerErrorHandling) +{ + auto const handler = AnyHandler{HandlerFake{}}; + auto const input = json::parse(R"({ + "hello": "not world", + "limit": 10 + })"); + + auto const output = handler.process(input); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidParams"); + EXPECT_EQ(err.at("error_message").as_string(), "Invalid parameters."); + EXPECT_EQ(err.at("error_code").as_uint64(), 31); +} + +TEST_F(RPCTestHandlerTest, HandlerInnerErrorHandling) +{ + auto const handler = AnyHandler{FailingHandlerFake{}}; + auto const input = json::parse(R"({ + "hello": "world", + "limit": 10 + })"); + + // validation succeeds but handler itself returns error + auto const output = handler.process(input); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "Very custom error"); +} diff --git a/unittests/rpc/handlers/TxTest.cpp b/unittests/rpc/handlers/TxTest.cpp new file mode 100644 index 00000000..11e9521f --- /dev/null +++ b/unittests/rpc/handlers/TxTest.cpp @@ -0,0 +1,275 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include + +using namespace RPCng; +namespace json = boost::json; +using namespace testing; + +constexpr static auto TXNID = + "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; +constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; +constexpr static auto CURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000"; + +class RPCTxTest : public HandlerBaseTest +{ +}; + +TEST_F(RPCTxTest, ExcessiveLgrRange) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}", + "min_ledger": 1, + "max_ledger":1002 + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "excessiveLgrRange"); + EXPECT_EQ( + err.at("error_message").as_string(), "Ledger range exceeds 1000."); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, InvalidLgrRange) +{ + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}", + "max_ledger": 1, + "min_ledger": 10 + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "invalidLgrRange"); + EXPECT_EQ( + err.at("error_message").as_string(), "Ledger range is invalid."); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, TxnNotFound) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}" + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); + EXPECT_EQ( + err.at("error_message").as_string(), "Transaction not found."); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, TxnNotFoundInGivenRangeSearchAllFalse) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(10); // min + mockBackendPtr->updateRange(30); // max + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}", + "min_ledger": 1, + "max_ledger":1000 + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); + EXPECT_EQ( + err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("searched_all").as_bool(), false); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, TxnNotFoundInGivenRangeSearchAllTrue) +{ + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + mockBackendPtr->updateRange(1); // min + mockBackendPtr->updateRange(1000); // max + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) + .WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}", + "min_ledger": 1, + "max_ledger":1000 + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_FALSE(output); + + auto const err = RPC::makeError(output.error()); + EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); + EXPECT_EQ( + err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("searched_all").as_bool(), true); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, DefaultParameter) +{ + auto constexpr static OUT = R"({ + "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "Fee":"2", + "Sequence":100, + "SigningPubKey":"74657374", + "TakerGets":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", + "value":"200" + }, + "TakerPays":"300", + "TransactionType":"OfferCreate", + "hash":"2E2FBAAFF767227FE4381C4BE9855986A6B9F96C62F6E443731AB36F7BBB8A08", + "meta":{ + "AffectedNodes":[ + { + "CreatedNode":{ + "LedgerEntryType":"Offer", + "NewFields":{ + "TakerGets":"200", + "TakerPays":{ + "currency":"0158415500000000C1F76FF6ECB0BAC600000000", + "issuer":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", + "value":"300" + } + } + } + } + ], + "TransactionIndex":100, + "TransactionResult":"tesSUCCESS" + }, + "date":123456, + "ledger_index":100 + })"; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + TransactionAndMetadata tx; + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) + .getSerializer() + .peekData(); + tx.transaction = CreateCreateOfferTransactionObject( + ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) + .getSerializer() + .peekData(); + tx.date = 123456; + tx.ledgerSequence = 100; + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) + .WillByDefault(Return(tx)); + EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}" + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_TRUE(output); + EXPECT_EQ(*output, json::parse(OUT)); + }); + ctx.run(); +} + +TEST_F(RPCTxTest, ReturnBinary) +{ + auto constexpr static OUT = R"({ + "meta":"201C00000064F8E311006FE864D50AA87BEE5380000158415500000000C1F76FF6ECB0BAC6000000004B4E9C06F24296074F7BC48F92A97916C6DC5EA96540000000000000C8E1E1F1031000", + "tx":"120007240000006464400000000000012C65D5071AFD498D00000158415500000000C1F76FF6ECB0BAC600000000D31252CF902EF8DD8451243869B38667CBD89DF368400000000000000273047465737481144B4E9C06F24296074F7BC48F92A97916C6DC5EA9", + "hash":"05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD", + "date":123456, + "ledger_index":100 + })"; + auto const rawBackendPtr = static_cast(mockBackendPtr.get()); + TransactionAndMetadata tx; + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) + .getSerializer() + .peekData(); + tx.transaction = CreateCreateOfferTransactionObject( + ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) + .getSerializer() + .peekData(); + tx.date = 123456; + tx.ledgerSequence = 100; + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) + .WillByDefault(Return(tx)); + EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { + auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; + auto const req = json::parse(fmt::format( + R"({{ + "command": "tx", + "transaction": "{}", + "binary": true + }})", + TXNID)); + auto const output = handler.process(req, yield); + ASSERT_TRUE(output); + EXPECT_EQ(*output, json::parse(OUT)); + }); + ctx.run(); +} diff --git a/unittests/rpc/handlers/impl/FakesAndMocks.h b/unittests/rpc/handlers/impl/FakesAndMocks.h new file mode 100644 index 00000000..4e2d7c10 --- /dev/null +++ b/unittests/rpc/handlers/impl/FakesAndMocks.h @@ -0,0 +1,220 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace unittests::detail { + +// input data for the test handlers below +struct TestInput +{ + std::string hello; + std::optional limit; +}; + +// output data produced by the test handlers below +struct TestOutput +{ + std::string computed; +}; + +// must be implemented as per rpc/common/Concepts.h +inline TestInput +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) +{ + std::optional optLimit; + if (jv.as_object().contains("limit")) + optLimit = jv.at("limit").as_int64(); + + return {jv.as_object().at("hello").as_string().c_str(), optLimit}; +} + +// must be implemented as per rpc/common/Concepts.h +inline void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + TestOutput const& output) +{ + jv = {{"computed", output.computed}}; +} + +// example handler +class HandlerFake +{ +public: + using Input = TestInput; + using Output = TestOutput; + using Result = RPCng::HandlerReturnType; + + RPCng::RpcSpecConstRef + spec() const + { + using namespace RPCng::validation; + + // clang-format off + static const RPCng::RpcSpec rpcSpec = { + {"hello", Required{}, Type{}, EqualTo{"world"}}, + {"limit", Type{}, Between{0, 100}} // optional field + }; + // clang-format on + + return rpcSpec; + } + + Result + process(Input input) const + { + return Output{ + input.hello + '_' + std::to_string(input.limit.value_or(0))}; + } +}; + +// example handler +class CoroutineHandlerFake +{ +public: + using Input = TestInput; + using Output = TestOutput; + using Result = RPCng::HandlerReturnType; + + RPCng::RpcSpecConstRef + spec() const + { + using namespace RPCng::validation; + + // clang-format off + static const RPCng::RpcSpec rpcSpec = { + {"hello", Required{}, Type{}, EqualTo{"world"}}, + {"limit", Type{}, Between{0, 100}} // optional field + }; + // clang-format on + + return rpcSpec; + } + + Result + process(Input input, boost::asio::yield_context& yield) const + { + return Output{ + input.hello + '_' + std::to_string(input.limit.value_or(0))}; + } +}; + +class NoInputHandlerFake +{ +public: + using Output = TestOutput; + using Result = RPCng::HandlerReturnType; + + Result + process() const + { + return Output{"test"}; + } +}; + +// example handler that returns custom error +class FailingHandlerFake +{ +public: + using Input = TestInput; + using Output = TestOutput; + using Result = RPCng::HandlerReturnType; + + RPCng::RpcSpecConstRef + spec() const + { + using namespace RPCng::validation; + + // clang-format off + static const RPCng::RpcSpec rpcSpec = { + {"hello", Required{}, Type{}, EqualTo{"world"}}, + {"limit", Type{}, Between{0u, 100u}} // optional field + }; + // clang-format on + + return rpcSpec; + } + + Result + process([[maybe_unused]] Input input) const + { + // always fail + return RPCng::Error{RPC::Status{"Very custom error"}}; + } +}; + +struct InOutFake +{ + std::string something; + + // Note: no spaceship comparison possible for std::string + friend bool + operator==(InOutFake const& lhs, InOutFake const& rhs) = default; +}; + +// must be implemented as per rpc/common/Concepts.h +inline InOutFake +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) +{ + return {jv.as_object().at("something").as_string().c_str()}; +} + +// must be implemented as per rpc/common/Concepts.h +inline void +tag_invoke( + boost::json::value_from_tag, + boost::json::value& jv, + InOutFake const& output) +{ + jv = {{"something", output.something}}; +} + +struct HandlerMock +{ + using Input = InOutFake; + using Output = InOutFake; + using Result = RPCng::HandlerReturnType; + + MOCK_METHOD(RPCng::RpcSpecConstRef, spec, (), (const)); + MOCK_METHOD(Result, process, (Input), (const)); +}; + +struct HandlerWithoutInputMock +{ + using Output = InOutFake; + using Result = RPCng::HandlerReturnType; + + MOCK_METHOD(Result, process, (), (const)); +}; + +} // namespace unittests::detail diff --git a/unittests/util/Fixtures.h b/unittests/util/Fixtures.h new file mode 100644 index 00000000..5cfa5249 --- /dev/null +++ b/unittests/util/Fixtures.h @@ -0,0 +1,199 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2022, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include + +#include + +#include "MockBackend.h" +#include +#include + +/** + * @brief Fixture with LogService support. + */ +class LoggerFixture : virtual public ::testing::Test +{ + /** + * @brief A simple string buffer that can be used to mock std::cout for + * console logging. + */ + class FakeBuffer final : public std::stringbuf + { + public: + std::string + getStrAndReset() + { + auto value = str(); + str(""); + return value; + } + }; + + FakeBuffer buffer_; + std::ostream stream_ = std::ostream{&buffer_}; + +protected: + // Simulates the `LogService::init(config)` call + void + SetUp() override + { + static std::once_flag once_; + std::call_once(once_, [] { + boost::log::add_common_attributes(); + boost::log::register_simple_formatter_factory( + "Severity"); + }); + + namespace src = boost::log::sources; + namespace keywords = boost::log::keywords; + namespace sinks = boost::log::sinks; + namespace expr = boost::log::expressions; + auto core = boost::log::core::get(); + + core->remove_all_sinks(); + boost::log::add_console_log( + stream_, keywords::format = "%Channel%:%Severity% %Message%"); + auto min_severity = expr::channel_severity_filter( + clio::log_channel, clio::log_severity); + min_severity["General"] = clio::Severity::DBG; + min_severity["Trace"] = clio::Severity::TRC; + core->set_filter(min_severity); + core->set_logging_enabled(true); + } + + void + checkEqual(std::string expected) + { + auto value = buffer_.getStrAndReset(); + ASSERT_EQ(value, expected + '\n'); + } + + void + checkEmpty() + { + ASSERT_TRUE(buffer_.getStrAndReset().empty()); + } +}; + +/** + * @brief Fixture with LogService support but completely disabled logging. + * + * This is meant to be used as a base for other fixtures. + */ +class NoLoggerFixture : virtual public LoggerFixture +{ +protected: + void + SetUp() override + { + LoggerFixture::SetUp(); + boost::log::core::get()->set_logging_enabled(false); + } +}; + +/** + * @brief Fixture with an embedded boost::asio context running on a thread + * + * This is meant to be used as a base for other fixtures. + */ +struct AsyncAsioContextTest : virtual public NoLoggerFixture +{ + AsyncAsioContextTest() + { + work.emplace(ctx); // make sure ctx does not stop on its own + } + + ~AsyncAsioContextTest() + { + work.reset(); + ctx.stop(); + runner.join(); + } + +protected: + boost::asio::io_context ctx; + +private: + std::optional work; + std::thread runner{[this] { ctx.run(); }}; +}; + +/** + * @brief Fixture with an embedded boost::asio context that is not running by + * default but can be progressed on the calling thread + * + * Use `run_for(duration)` etc. directly on `ctx`. + * This is meant to be used as a base for other fixtures. + */ +struct SyncAsioContextTest : virtual public NoLoggerFixture +{ + SyncAsioContextTest() + { + } + +protected: + boost::asio::io_context ctx; +}; + +/** + * @brief Fixture with an mock backend + */ +struct MockBackendTest : virtual public NoLoggerFixture +{ + void + SetUp() override + { + NoLoggerFixture::SetUp(); + clio::Config cfg; + mockBackendPtr = std::make_shared(cfg); + } + void + TearDown() override + { + mockBackendPtr.reset(); + } + +protected: + std::shared_ptr mockBackendPtr; +}; + +/** + * @brief Fixture with an mock backend and an embedded boost::asio context + * Handler unittest base class + */ +class HandlerBaseTest : public MockBackendTest, public SyncAsioContextTest +{ + void + SetUp() override + { + MockBackendTest::SetUp(); + SyncAsioContextTest::SetUp(); + } + void + TearDown() override + { + SyncAsioContextTest::TearDown(); + MockBackendTest::TearDown(); + } +}; diff --git a/unittests/util/MockBackend.h b/unittests/util/MockBackend.h new file mode 100644 index 00000000..415308c5 --- /dev/null +++ b/unittests/util/MockBackend.h @@ -0,0 +1,203 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +using namespace Backend; + +class MockBackend : public BackendInterface +{ +public: + MockBackend(clio::Config cfg) : BackendInterface(cfg) + { + } + MOCK_METHOD( + std::optional, + fetchLedgerBySequence, + (std::uint32_t const sequence, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + fetchLedgerByHash, + (ripple::uint256 const& hash, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + fetchLatestLedgerSequence, + (boost::asio::yield_context & yield), + (const, override)); + + MOCK_METHOD( + std::optional, + fetchTransaction, + (ripple::uint256 const& hash, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::vector, + fetchTransactions, + (std::vector const& hashes, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + TransactionsAndCursor, + fetchAccountTransactions, + (ripple::AccountID const& account, + std::uint32_t const limit, + bool forward, + std::optional const& cursor, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::vector, + fetchAllTransactionsInLedger, + (std::uint32_t const ledgerSequence, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::vector, + fetchAllTransactionHashesInLedger, + (std::uint32_t const ledgerSequence, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + fetchNFT, + (ripple::uint256 const& tokenID, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yieldd), + (const, override)); + + MOCK_METHOD( + TransactionsAndCursor, + fetchNFTTransactions, + (ripple::uint256 const& tokenID, + std::uint32_t const limit, + bool const forward, + std::optional const& cursorIn, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::vector, + doFetchLedgerObjects, + (std::vector const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + doFetchLedgerObject, + (ripple::uint256 const& key, + std::uint32_t const sequence, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::vector, + fetchLedgerDiff, + (std::uint32_t const ledgerSequence, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + doFetchSuccessorKey, + (ripple::uint256 key, + std::uint32_t const ledgerSequence, + boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD( + std::optional, + hardFetchLedgerRange, + (boost::asio::yield_context & yield), + (const, override)); + + MOCK_METHOD( + void, + writeLedger, + (ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader), + (override)); + + MOCK_METHOD( + void, + writeLedgerObject, + (std::string && key, std::uint32_t const seq, std::string&& blob), + (override)); + + MOCK_METHOD( + void, + writeTransaction, + (std::string && hash, + std::uint32_t const seq, + std::uint32_t const date, + std::string&& transaction, + std::string&& metadata), + (override)); + + MOCK_METHOD(void, writeNFTs, (std::vector && blob), (override)); + + MOCK_METHOD( + void, + writeAccountTransactions, + (std::vector && blob), + (override)); + + MOCK_METHOD( + void, + writeNFTTransactions, + (std::vector && blob), + (override)); + + MOCK_METHOD( + void, + writeSuccessor, + (std::string && key, std::uint32_t const seq, std::string&& successor), + (override)); + + MOCK_METHOD(void, startWrites, (), (const, override)); + + MOCK_METHOD( + bool, + doOnlineDelete, + (std::uint32_t numLedgersToKeep, boost::asio::yield_context& yield), + (const, override)); + + MOCK_METHOD(bool, isTooBusy, (), (const, override)); + + MOCK_METHOD(void, open, (bool), (override)); + + MOCK_METHOD(void, close, (), (override)); + + MOCK_METHOD( + void, + doWriteLedgerObject, + (std::string && key, std::uint32_t const seq, std::string&& blob), + (override)); + + MOCK_METHOD(bool, doFinishWrites, (), (override)); +}; diff --git a/unittests/util/MockWsBase.h b/unittests/util/MockWsBase.h new file mode 100644 index 00000000..5e0c86bd --- /dev/null +++ b/unittests/util/MockWsBase.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +struct MockSession : public WsBase +{ + std::string message; + void + send(std::shared_ptr msg_type) override + { + message += std::string(msg_type->data()); + } + MockSession(util::TagDecoratorFactory const& factory) : WsBase(factory) + { + } +}; + +struct MockDeadSession : public WsBase +{ + void + send(std::shared_ptr msg_type) override + { + // err happen, the session should remove from subscribers + ec_.assign(2, boost::system::system_category()); + } + MockDeadSession(util::TagDecoratorFactory const& factory) : WsBase(factory) + { + } +}; diff --git a/unittests/util/TestObject.cpp b/unittests/util/TestObject.cpp new file mode 100644 index 00000000..35d6bb4b --- /dev/null +++ b/unittests/util/TestObject.cpp @@ -0,0 +1,414 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include "TestObject.h" + +#include +#include + +ripple::AccountID +GetAccountIDWithString(std::string_view id) +{ + return ripple::parseBase58(std::string(id)).value(); +} + +ripple::LedgerInfo +CreateLedgerInfo(std::string_view ledgerHash, ripple::LedgerIndex seq) +{ + auto ledgerinfo = ripple::LedgerInfo(); + ledgerinfo.hash = ripple::uint256{ledgerHash}; + ledgerinfo.seq = seq; + return ledgerinfo; +} + +ripple::STObject +CreateFeeSettingLedgerObject( + uint64_t base, + uint32_t reserveInc, + uint32_t reserveBase, + uint32_t refFeeUnit, + uint32_t flag) +{ + ripple::STObject obj(ripple::sfFee); + obj.setFieldU16(ripple::sfLedgerEntryType, ripple::ltFEE_SETTINGS); + obj.setFieldU64(ripple::sfBaseFee, base); + obj.setFieldU32(ripple::sfReserveIncrement, reserveInc); + obj.setFieldU32(ripple::sfReserveBase, reserveBase); + obj.setFieldU32(ripple::sfReferenceFeeUnits, refFeeUnit); + obj.setFieldU32(ripple::sfFlags, flag); + return obj; +} + +ripple::Blob +CreateFeeSettingBlob( + uint64_t base, + uint32_t reserveInc, + uint32_t reserveBase, + uint32_t refFeeUnit, + uint32_t flag) +{ + auto lo = CreateFeeSettingLedgerObject( + base, reserveInc, reserveBase, refFeeUnit, flag); + return lo.getSerializer().peekData(); +} + +ripple::STObject +CreatePaymentTransactionObject( + std::string_view accountId1, + std::string_view accountId2, + int amount, + int fee, + uint32_t seq) +{ + ripple::STObject obj(ripple::sfTransaction); + obj.setFieldU16(ripple::sfTransactionType, ripple::ttPAYMENT); + auto account = + ripple::parseBase58(std::string(accountId1)); + obj.setAccountID(ripple::sfAccount, account.value()); + obj.setFieldAmount(ripple::sfAmount, ripple::STAmount(amount, false)); + obj.setFieldAmount(ripple::sfFee, ripple::STAmount(fee, false)); + auto account2 = + ripple::parseBase58(std::string(accountId2)); + obj.setAccountID(ripple::sfDestination, account2.value()); + obj.setFieldU32(ripple::sfSequence, seq); + const char* key = "test"; + ripple::Slice slice(key, 4); + obj.setFieldVL(ripple::sfSigningPubKey, slice); + return obj; +} + +ripple::STObject +CreateAccountRootObject( + std::string_view accountId, + uint32_t flag, + uint32_t seq, + int balance, + uint32_t ownerCount, + std::string_view previousTxnID, + uint32_t previousTxnSeq) +{ + ripple::STObject accountRoot(ripple::sfAccount); + accountRoot.setFieldU16(ripple::sfLedgerEntryType, ripple::ltACCOUNT_ROOT); + accountRoot.setFieldU32(ripple::sfFlags, flag); + accountRoot.setAccountID( + ripple::sfAccount, GetAccountIDWithString(accountId)); + accountRoot.setFieldU32(ripple::sfSequence, seq); + accountRoot.setFieldAmount( + ripple::sfBalance, ripple::STAmount(balance, false)); + accountRoot.setFieldU32(ripple::sfOwnerCount, ownerCount); + accountRoot.setFieldH256( + ripple::sfPreviousTxnID, ripple::uint256{previousTxnID}); + accountRoot.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); + return accountRoot; +} + +ripple::STObject +CreateCreateOfferTransactionObject( + std::string_view accountId, + int fee, + uint32_t seq, + std::string_view currency, + std::string_view issuer, + int takerGets, + int takerPays) +{ + ripple::STObject obj(ripple::sfTransaction); + obj.setFieldU16(ripple::sfTransactionType, ripple::ttOFFER_CREATE); + auto account = + ripple::parseBase58(std::string(accountId)); + obj.setAccountID(ripple::sfAccount, account.value()); + auto amount = ripple::STAmount(fee, false); + obj.setFieldAmount(ripple::sfFee, amount); + obj.setFieldU32(ripple::sfSequence, seq); + // add amount + ripple::Issue issue1( + ripple::Currency{currency}, + ripple::parseBase58(std::string(issuer)).value()); + obj.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); + obj.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(takerPays, false)); + + auto key = "test"; + ripple::Slice slice(key, 4); + obj.setFieldVL(ripple::sfSigningPubKey, slice); + return obj; +} + +ripple::Issue +GetIssue(std::string_view currency, std::string_view issuerId) +{ + // standard currency + if (currency.size() == 3) + return ripple::Issue( + ripple::to_currency(std::string(currency)), + ripple::parseBase58(std::string(issuerId)) + .value()); + return ripple::Issue( + ripple::Currency{currency}, + ripple::parseBase58(std::string(issuerId)).value()); +} + +ripple::STObject +CreateMetaDataForBookChange( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int perviousTakerGets, + int finalTakerPays, + int perviousTakerPays) +{ + ripple::STObject finalFields(ripple::sfFinalFields); + ripple::Issue issue1 = GetIssue(currency, issueId); + finalFields.setFieldAmount( + ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + ripple::STObject previousFields(ripple::sfPreviousFields); + previousFields.setFieldAmount( + ripple::sfTakerPays, ripple::STAmount(issue1, perviousTakerPays)); + previousFields.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(perviousTakerGets, false)); + ripple::STObject metaObj(ripple::sfTransactionMetaData); + ripple::STArray metaArray{1}; + ripple::STObject node(ripple::sfModifiedNode); + node.setFieldU16(ripple::sfLedgerEntryType, ripple::ltOFFER); + node.emplace_back(std::move(finalFields)); + node.emplace_back(std::move(previousFields)); + metaArray.push_back(node); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, transactionIndex); + return metaObj; +} + +ripple::STObject +CreateMetaDataForCreateOffer( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int finalTakerPays) +{ + ripple::STObject finalFields(ripple::sfNewFields); + ripple::Issue issue1 = GetIssue(currency, issueId); + finalFields.setFieldAmount( + ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + ripple::STObject metaObj(ripple::sfTransactionMetaData); + ripple::STArray metaArray{1}; + ripple::STObject node(ripple::sfCreatedNode); + node.setFieldU16(ripple::sfLedgerEntryType, ripple::ltOFFER); + node.emplace_back(std::move(finalFields)); + metaArray.push_back(node); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, transactionIndex); + return metaObj; +} + +ripple::STObject +CreateMetaDataForCancelOffer( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int finalTakerPays) +{ + ripple::STObject finalFields(ripple::sfFinalFields); + ripple::Issue issue1 = GetIssue(currency, issueId); + finalFields.setFieldAmount( + ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + ripple::STObject metaObj(ripple::sfTransactionMetaData); + ripple::STArray metaArray{1}; + ripple::STObject node(ripple::sfDeletedNode); + node.setFieldU16(ripple::sfLedgerEntryType, ripple::ltOFFER); + node.emplace_back(std::move(finalFields)); + metaArray.push_back(node); + metaObj.setFieldArray(ripple::sfAffectedNodes, metaArray); + metaObj.setFieldU8(ripple::sfTransactionResult, ripple::tesSUCCESS); + metaObj.setFieldU32(ripple::sfTransactionIndex, transactionIndex); + return metaObj; +} + +ripple::STObject +CreateOwnerDirLedgerObject( + std::vector indexes, + std::string_view rootIndex) +{ + ripple::STObject ownerDir(ripple::sfLedgerEntry); + ownerDir.setFieldU16(ripple::sfLedgerEntryType, ripple::ltDIR_NODE); + ownerDir.setFieldV256(ripple::sfIndexes, ripple::STVector256{indexes}); + ownerDir.setFieldH256(ripple::sfRootIndex, ripple::uint256{rootIndex}); + ownerDir.setFieldU32(ripple::sfFlags, 0); + return ownerDir; +} + +ripple::STObject +CreatePaymentChannelLedgerObject( + std::string_view accountId, + std::string_view destId, + int amount, + int balance, + uint32_t settleDelay, + std::string_view previousTxnId, + uint32_t previousTxnSeq) +{ + ripple::STObject channel(ripple::sfLedgerEntry); + channel.setFieldU16(ripple::sfLedgerEntryType, ripple::ltPAYCHAN); + channel.setAccountID(ripple::sfAccount, GetAccountIDWithString(accountId)); + channel.setAccountID(ripple::sfDestination, GetAccountIDWithString(destId)); + channel.setFieldAmount(ripple::sfAmount, ripple::STAmount(amount, false)); + channel.setFieldAmount(ripple::sfBalance, ripple::STAmount(balance, false)); + channel.setFieldU32(ripple::sfSettleDelay, settleDelay); + channel.setFieldU64(ripple::sfOwnerNode, 0); + channel.setFieldH256( + ripple::sfPreviousTxnID, ripple::uint256{previousTxnId}); + channel.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); + channel.setFieldU32(ripple::sfFlags, 0); + uint8_t key[33] = {0}; + key[0] = 2; // KeyType::secp256k1 + ripple::Slice slice(key, 33); + channel.setFieldVL(ripple::sfPublicKey, slice); + return channel; +} + +[[nodiscard]] ripple::STObject +CreateRippleStateLedgerObject( + std::string_view accountId, + std::string_view currency, + std::string_view issuerId, + int balance, + std::string_view lowNodeAccountId, + int lowLimit, + std::string_view highNodeAccountId, + int highLimit, + std::string_view previousTxnId, + uint32_t previousTxnSeq) +{ + auto line = ripple::STObject(ripple::sfLedgerEntry); + line.setFieldU16(ripple::sfLedgerEntryType, ripple::ltRIPPLE_STATE); + line.setFieldU32(ripple::sfFlags, 0); + line.setFieldAmount( + ripple::sfBalance, + ripple::STAmount(GetIssue(currency, issuerId), balance)); + line.setFieldAmount( + ripple::sfHighLimit, + ripple::STAmount(GetIssue(currency, highNodeAccountId), highLimit)); + line.setFieldAmount( + ripple::sfLowLimit, + ripple::STAmount(GetIssue(currency, lowNodeAccountId), lowLimit)); + line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{previousTxnId}); + line.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); + return line; +} + +ripple::STObject +CreateOfferLedgerObject( + std::string_view account, + int takerGets, + int takerPays, + std::string_view currency, + std::string_view issueId) +{ + ripple::STObject offer(ripple::sfLedgerEntry); + offer.setFieldU16(ripple::sfLedgerEntryType, ripple::ltOFFER); + offer.setAccountID(ripple::sfAccount, GetAccountIDWithString(account)); + offer.setFieldU32(ripple::sfSequence, 0); + offer.setFieldU32(ripple::sfFlags, 0); + ripple::Issue issue1 = GetIssue(currency, issueId); + offer.setFieldAmount( + ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); + offer.setFieldAmount( + ripple::sfTakerPays, ripple::STAmount(takerPays, false)); + offer.setFieldH256(ripple::sfBookDirectory, ripple::uint256{}); + offer.setFieldU64(ripple::sfBookNode, 0); + offer.setFieldU64(ripple::sfOwnerNode, 0); + offer.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); + offer.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0); + return offer; +} + +ripple::STObject +CreateTicketLedgerObject(std::string_view account, uint32_t sequence) +{ + ripple::STObject ticket(ripple::sfLedgerEntry); + ticket.setFieldU16(ripple::sfLedgerEntryType, ripple::ltTICKET); + ticket.setAccountID(ripple::sfAccount, GetAccountIDWithString(account)); + ticket.setFieldU32(ripple::sfFlags, 0); + ticket.setFieldU64(ripple::sfOwnerNode, 0); + ticket.setFieldU32(ripple::sfTicketSequence, sequence); + ticket.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); + ticket.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0); + return ticket; +} + +ripple::STObject +CreateEscrowLedgerObject(std::string_view account, std::string_view dest) +{ + ripple::STObject escrow(ripple::sfLedgerEntry); + escrow.setFieldU16(ripple::sfLedgerEntryType, ripple::ltESCROW); + escrow.setAccountID(ripple::sfAccount, GetAccountIDWithString(account)); + escrow.setAccountID(ripple::sfDestination, GetAccountIDWithString(dest)); + escrow.setFieldAmount(ripple::sfAmount, ripple::STAmount(0, false)); + escrow.setFieldU64(ripple::sfOwnerNode, 0); + escrow.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); + escrow.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0); + escrow.setFieldU32(ripple::sfFlags, 0); + return escrow; +} + +ripple::STObject +CreateCheckLedgerObject(std::string_view account, std::string_view dest) +{ + ripple::STObject check(ripple::sfLedgerEntry); + check.setFieldU16(ripple::sfLedgerEntryType, ripple::ltCHECK); + check.setAccountID(ripple::sfAccount, GetAccountIDWithString(account)); + check.setAccountID(ripple::sfDestination, GetAccountIDWithString(dest)); + check.setFieldU32(ripple::sfFlags, 0); + check.setFieldU64(ripple::sfOwnerNode, 0); + check.setFieldU64(ripple::sfDestinationNode, 0); + check.setFieldAmount(ripple::sfSendMax, ripple::STAmount(0, false)); + check.setFieldU32(ripple::sfSequence, 0); + check.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); + check.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0); + return check; +} + +ripple::STObject +CreateDepositPreauthLedgerObject( + std::string_view account, + std::string_view auth) +{ + ripple::STObject depositPreauth(ripple::sfLedgerEntry); + depositPreauth.setFieldU16( + ripple::sfLedgerEntryType, ripple::ltDEPOSIT_PREAUTH); + depositPreauth.setAccountID( + ripple::sfAccount, GetAccountIDWithString(account)); + depositPreauth.setAccountID( + ripple::sfAuthorize, GetAccountIDWithString(auth)); + depositPreauth.setFieldU32(ripple::sfFlags, 0); + depositPreauth.setFieldU64(ripple::sfOwnerNode, 0); + depositPreauth.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); + depositPreauth.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0); + return depositPreauth; +} diff --git a/unittests/util/TestObject.h b/unittests/util/TestObject.h new file mode 100644 index 00000000..8c408578 --- /dev/null +++ b/unittests/util/TestObject.h @@ -0,0 +1,193 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2023, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +#include + +/* + * Create AccountID object with string + */ +[[nodiscard]] ripple::AccountID +GetAccountIDWithString(std::string_view id); + +/* + * Create a simple ledgerInfo object with only hash and seq + */ +[[nodiscard]] ripple::LedgerInfo +CreateLedgerInfo(std::string_view ledgerHash, ripple::LedgerIndex seq); + +/* + * Create a FeeSetting ledger object + */ +[[nodiscard]] ripple::STObject +CreateFeeSettingLedgerObject( + uint64_t base, + uint32_t reserveInc, + uint32_t reserveBase, + uint32_t refFeeUnit, + uint32_t flag); + +/* + * Create a FeeSetting ledger object and return its blob + */ +[[nodiscard]] ripple::Blob +CreateFeeSettingBlob( + uint64_t base, + uint32_t reserveInc, + uint32_t reserveBase, + uint32_t refFeeUnit, + uint32_t flag); + +/* + * Create a payment transaction object + */ +[[nodiscard]] ripple::STObject +CreatePaymentTransactionObject( + std::string_view accountId1, + std::string_view accountId2, + int amount, + int fee, + uint32_t seq); + +/* + * Create an account root ledger object + */ +[[nodiscard]] ripple::STObject +CreateAccountRootObject( + std::string_view accountId, + uint32_t flag, + uint32_t seq, + int balance, + uint32_t ownerCount, + std::string_view previousTxnID, + uint32_t previousTxnSeq); + +/* + * Create a createoffer treansaction + * Taker pay is XRP + */ +[[nodiscard]] ripple::STObject +CreateCreateOfferTransactionObject( + std::string_view accountId, + int fee, + uint32_t seq, + std::string_view currency, + std::string_view issuer, + int takerGets, + int takerPays); + +/* + * Return an issue object with given currency and issue account + */ +[[nodiscard]] ripple::Issue +GetIssue(std::string_view currency, std::string_view issuerId); + +/* + * Create a offer change meta data + */ +[[nodiscard]] ripple::STObject +CreateMetaDataForBookChange( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int perviousTakerGets, + int finalTakerPays, + int perviousTakerPays); + +/* + * Meta data for adding a offer object + */ +[[nodiscard]] ripple::STObject +CreateMetaDataForCreateOffer( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int finalTakerPays); + +/* + * Meta data for removing a offer object + */ +[[nodiscard]] ripple::STObject +CreateMetaDataForCancelOffer( + std::string_view currency, + std::string_view issueId, + uint32_t transactionIndex, + int finalTakerGets, + int finalTakerPays); + +/* + * Create a owner dir ledger object + */ +[[nodiscard]] ripple::STObject +CreateOwnerDirLedgerObject( + std::vector indexes, + std::string_view rootIndex); + +/* + * Create a payment channel ledger object + */ +[[nodiscard]] ripple::STObject +CreatePaymentChannelLedgerObject( + std::string_view accountId, + std::string_view destId, + int amount, + int balance, + uint32_t settleDelay, + std::string_view previousTxnId, + uint32_t previousTxnSeq); + +[[nodiscard]] ripple::STObject +CreateRippleStateLedgerObject( + std::string_view accountId, + std::string_view currency, + std::string_view issuerId, + int balance, + std::string_view lowNodeAccountId, + int lowLimit, + std::string_view highNodeAccountId, + int highLimit, + std::string_view previousTxnId, + uint32_t previousTxnSeq); + +ripple::STObject +CreateOfferLedgerObject( + std::string_view account, + int takerGets, + int takerPays, + std::string_view currency, + std::string_view issueId); + +ripple::STObject +CreateTicketLedgerObject(std::string_view rootIndex, uint32_t sequence); + +ripple::STObject +CreateEscrowLedgerObject(std::string_view account, std::string_view dest); + +ripple::STObject +CreateCheckLedgerObject(std::string_view account, std::string_view dest); + +ripple::STObject +CreateDepositPreauthLedgerObject( + std::string_view account, + std::string_view auth);