mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-05 04:15:51 +00:00
Compare commits
184 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
871d43c85f | ||
|
|
5ce3fff788 | ||
|
|
a76194d299 | ||
|
|
14f9f98cf2 | ||
|
|
01e4eed130 | ||
|
|
893315c50d | ||
|
|
b83d206ced | ||
|
|
9d28e64383 | ||
|
|
b873af2d43 | ||
|
|
435db339df | ||
|
|
9836e4ceaf | ||
|
|
5d2c079f1a | ||
|
|
244337c5b6 | ||
|
|
b07fbb14dc | ||
|
|
7e8569b03a | ||
|
|
fc0c93b2ee | ||
|
|
8ba7388d58 | ||
|
|
0bbb539d0b | ||
|
|
c50174235f | ||
|
|
aace437245 | ||
|
|
2e3e8cd779 | ||
|
|
6f93e1003e | ||
|
|
14978ca91d | ||
|
|
9adcaeb21b | ||
|
|
d94fe4e7ab | ||
|
|
c8029255ba | ||
|
|
d548d44a61 | ||
|
|
4cae248b5c | ||
|
|
b3db4cadab | ||
|
|
02c0a1f11d | ||
|
|
6bc2ec745f | ||
|
|
d7d5d61747 | ||
|
|
f1b3a6b511 | ||
|
|
f52f36ecbc | ||
|
|
860d10cddc | ||
|
|
36ac3215e2 | ||
|
|
7776a5ffb6 | ||
|
|
4b2d53fc2f | ||
|
|
9a19519550 | ||
|
|
88e25687dc | ||
|
|
93e2ac529d | ||
|
|
0bc84fefbf | ||
|
|
36bb20806e | ||
|
|
dfe974d5ab | ||
|
|
bf65cfabae | ||
|
|
f42e024f38 | ||
|
|
d816ef54ab | ||
|
|
e60fd3e58e | ||
|
|
654168efec | ||
|
|
5d06a79f13 | ||
|
|
3320125d8f | ||
|
|
a1f93b09f7 | ||
|
|
232acaeff2 | ||
|
|
d86104577b | ||
|
|
e9937fab76 | ||
|
|
75c2011845 | ||
|
|
5604b37c02 | ||
|
|
f604856eab | ||
|
|
b69e4350a1 | ||
|
|
95da706fed | ||
|
|
1bb67217e5 | ||
|
|
21f1b70daf | ||
|
|
430812abf5 | ||
|
|
8d5e28ef30 | ||
|
|
4180d81819 | ||
|
|
21eeb9ae02 | ||
|
|
edd2e9dd4b | ||
|
|
b25ac5d707 | ||
|
|
9d10cff873 | ||
|
|
bc438ce58a | ||
|
|
b99a68e55f | ||
|
|
7a819f4955 | ||
|
|
6b78b1ad8b | ||
|
|
488e28e874 | ||
|
|
d26dd5a8cf | ||
|
|
67f0fa26ae | ||
|
|
a3211f4458 | ||
|
|
7d4e5ff0bd | ||
|
|
f6c2008540 | ||
|
|
d74ca4940b | ||
|
|
739807a7d7 | ||
|
|
9fa26be13a | ||
|
|
f0555af284 | ||
|
|
b7fa9b09fe | ||
|
|
08f7a7a476 | ||
|
|
703196b013 | ||
|
|
284986e7b7 | ||
|
|
09ac1b866e | ||
|
|
4112cc42df | ||
|
|
c07e04ce84 | ||
|
|
19455b4d6c | ||
|
|
1186622e58 | ||
|
|
023e02da15 | ||
|
|
8dbf049a71 | ||
|
|
fe5150dba4 | ||
|
|
992d5a7a70 | ||
|
|
b702b6e14e | ||
|
|
557c76233a | ||
|
|
6ba9903a37 | ||
|
|
81bf9894e4 | ||
|
|
047d64983c | ||
|
|
1708b929b8 | ||
|
|
a377514287 | ||
|
|
c51d696181 | ||
|
|
1a9d328f94 | ||
|
|
3b1dc60f63 | ||
|
|
0c2ca1737e | ||
|
|
2f65a26dc7 | ||
|
|
37c765a072 | ||
|
|
29f1f860d8 | ||
|
|
414a416938 | ||
|
|
1a4180f678 | ||
|
|
bca086d776 | ||
|
|
f81086f40c | ||
|
|
962fb12410 | ||
|
|
10af787324 | ||
|
|
5f32bbbd81 | ||
|
|
fa78d4e783 | ||
|
|
05b03b2086 | ||
|
|
866b1d32b3 | ||
|
|
8a1f00debb | ||
|
|
3ec5755930 | ||
|
|
a0d173feb8 | ||
|
|
b0f678411c | ||
|
|
1369eaeef6 | ||
|
|
bf217345ae | ||
|
|
7bb567761c | ||
|
|
4b94ed3e55 | ||
|
|
75c0d22f87 | ||
|
|
9803e86158 | ||
|
|
0f7e1d5517 | ||
|
|
cf7a6ecc89 | ||
|
|
5c9dce0f8a | ||
|
|
041aba9a0b | ||
|
|
b13c44eb12 | ||
|
|
a47bf2e8fe | ||
|
|
4b8dd7b981 | ||
|
|
25067c97ed | ||
|
|
0a5bf911c1 | ||
|
|
6015faa0d3 | ||
|
|
e68fd3251a | ||
|
|
c13ac79552 | ||
|
|
b1299792a6 | ||
|
|
2cbf09d6ae | ||
|
|
42cf55fd0e | ||
|
|
e825be24cc | ||
|
|
997742b555 | ||
|
|
031ad411a6 | ||
|
|
486f1f2fd2 | ||
|
|
739dd81981 | ||
|
|
1f900fcf7f | ||
|
|
fc68664b02 | ||
|
|
ffa5c58b32 | ||
|
|
4bf3a228dc | ||
|
|
9091bb06f4 | ||
|
|
41e3176c56 | ||
|
|
bedca85c78 | ||
|
|
39157f8be4 | ||
|
|
3affda8b13 | ||
|
|
8cc2de5643 | ||
|
|
9b74b3f898 | ||
|
|
ea2837749a | ||
|
|
8bd8ab9b8a | ||
|
|
dc89d23e5a | ||
|
|
734c7a5c36 | ||
|
|
b17ef28f55 | ||
|
|
e56bd7b29e | ||
|
|
5bf334e5f7 | ||
|
|
97ef66d130 | ||
|
|
4c9c606202 | ||
|
|
a885551006 | ||
|
|
fae1ec0c8d | ||
|
|
de23f015d6 | ||
|
|
37f9493d15 | ||
|
|
49387059ef | ||
|
|
744af4b639 | ||
|
|
db2b9dac3b | ||
|
|
ccf73dc68c | ||
|
|
3de421c390 | ||
|
|
d4a9560c3f | ||
|
|
983aa29271 | ||
|
|
0ebe92de68 | ||
|
|
eb1ea28e27 | ||
|
|
1764f3524e |
@@ -34,7 +34,7 @@ BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 80
|
||||
ColumnLimit: 120
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
|
||||
@@ -7,3 +7,4 @@
|
||||
# clang-format
|
||||
e41150248a97e4bdc1cf21b54650c4bb7c63928e
|
||||
2e542e7b0d94451a933c88778461cc8d3d7e6417
|
||||
d816ef54abd8e8e979b9c795bdb657a8d18f5e95
|
||||
|
||||
20
.githooks/ensure_release_tag
Executable file
20
.githooks/ensure_release_tag
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Pushing a release branch requires an annotated tag at the released commit
|
||||
branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
if [[ $branch =~ master ]]; then
|
||||
# check if HEAD commit is tagged
|
||||
if ! git describe --exact-match HEAD; then
|
||||
echo "Commits to master must be tagged"
|
||||
exit 1
|
||||
fi
|
||||
elif [[ $branch =~ release/* ]]; then
|
||||
IFS=/ read -r branch rel_ver <<< ${branch}
|
||||
tag=$(git describe --tags --abbrev=0)
|
||||
if [[ "${rel_ver}" != "${tag}" ]]; then
|
||||
echo "release/${rel_ver} branches must have annotated tag ${rel_ver}"
|
||||
echo "git tag -am\"${rel_ver}\" ${rel_ver}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
27
.githooks/pre-commit
Executable file
27
.githooks/pre-commit
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format-11 -i"
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
second=$(git diff $sources)
|
||||
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
|
||||
if [ "$changes" != "0" ]; then
|
||||
cat <<\EOF
|
||||
|
||||
WARNING
|
||||
-----------------------------------------------------------------------------
|
||||
Automatically re-formatted code with `clang-format` - commit was aborted.
|
||||
Please manually add any updated files and commit again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
.githooks/ensure_release_tag
|
||||
21
.github/actions/sign/action.yml
vendored
21
.github/actions/sign/action.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: 'Sign packages'
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
steps:
|
||||
- name: Sign
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex -o pipefail
|
||||
echo "$GPG_KEY_B64"| base64 -d | gpg --batch --no-tty --allow-secret-key-import --import -
|
||||
unset GPG_KEY_B64
|
||||
export GPG_PASSPHRASE=$(echo $GPG_KEY_PASS_B64 | base64 -di)
|
||||
unset GPG_KEY_PASS_B64
|
||||
export GPG_KEYID=$(gpg --with-colon --list-secret-keys | head -n1 | cut -d : -f 5)
|
||||
for PKG in $(ls *.deb); do
|
||||
dpkg-sig \
|
||||
-g "--no-tty --digest-algo 'sha512' --passphrase '${GPG_PASSPHRASE}' --pinentry-mode=loopback" \
|
||||
-k "${GPG_KEYID}" \
|
||||
--sign builder \
|
||||
$PKG
|
||||
done
|
||||
176
.github/workflows/build.yml
vendored
176
.github/workflows/build.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Build Clio
|
||||
on:
|
||||
push:
|
||||
branches: [master, release, develop, develop-next]
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
pull_request:
|
||||
branches: [master, release, develop, develop-next]
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
|
||||
build_clio:
|
||||
name: Build Clio
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: [self-hosted, heavy]
|
||||
needs: lint
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -29,19 +29,21 @@ jobs:
|
||||
- suffix: rpm
|
||||
image: rippleci/clio-rpm-builder:2022-09-17
|
||||
script: rpm
|
||||
|
||||
container:
|
||||
image: ${{ matrix.type.image }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Clone Clio packaging repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio-packages
|
||||
repository: XRPLF/clio-packages
|
||||
ref: main
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
@@ -54,7 +56,6 @@ jobs:
|
||||
cmake --build clio-packages/build --parallel $(nproc)
|
||||
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
|
||||
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
|
||||
|
||||
- name: Artifact packages
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
@@ -67,59 +68,57 @@ jobs:
|
||||
name: clio_tests-${{ matrix.type.suffix }}
|
||||
path: ${{ github.workspace }}/clio_tests
|
||||
|
||||
sign:
|
||||
name: Sign packages
|
||||
needs: build_clio
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/develop'
|
||||
env:
|
||||
GPG_KEY_B64: ${{ secrets.GPG_KEY_B64 }}
|
||||
GPG_KEY_PASS_B64: ${{ secrets.GPG_KEY_PASS_B64 }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- suffix: deb
|
||||
image: ubuntu:20.04
|
||||
script: dpkg
|
||||
# - suffix: rpm
|
||||
# image: centos:7
|
||||
# script: rpm
|
||||
container:
|
||||
image: ${{ matrix.type.image }}
|
||||
build_dev:
|
||||
name: Build on Mac/Clang14 and run tests
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
runs-on: [self-hosted, macOS]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install dpkg-sig
|
||||
run: |
|
||||
apt-get update && apt-get install -y dpkg-sig gnupg
|
||||
- name: Get package artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_${{ matrix.type.suffix }}_packages
|
||||
path: clio
|
||||
|
||||
- name: find packages
|
||||
run: find . -name "*.${{ matrix.type.suffix }}"
|
||||
|
||||
- name: Sign packages
|
||||
uses: ./.github/actions/sign
|
||||
|
||||
|
||||
- name: Verify the signature
|
||||
run: |
|
||||
set -e
|
||||
for PKG in $(ls *.deb); do
|
||||
gpg --verify "${PKG}"
|
||||
done
|
||||
|
||||
- name: Get short SHA
|
||||
id: shortsha
|
||||
run: echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)"
|
||||
|
||||
- name: Artifact signed packages
|
||||
uses: actions/upload-artifact@v2
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
name: signed-clio-deb-packages-${{ steps.shortsha.outputs.sha8 }}
|
||||
path: ${{ github.workspace }}/*.deb
|
||||
path: boost_1_77_0
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build Boost
|
||||
if: ${{ steps.boost.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
rm -rf boost_1_77_0.tar.gz boost_1_77_0 # cleanup if needed first
|
||||
curl -s -fOJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
cd boost_1_77_0
|
||||
./bootstrap.sh
|
||||
./b2 define=BOOST_ASIO_HAS_STD_INVOKE_RESULT cxxflags="-std=c++20"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install llvm@14 pkg-config protobuf openssl ninja cassandra-cpp-driver bison cmake
|
||||
|
||||
- name: Setup environment for llvm-14
|
||||
run: |
|
||||
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
|
||||
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
|
||||
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
|
||||
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost_1_77_0
|
||||
cd clio
|
||||
cmake -B build -DCMAKE_C_COMPILER='/usr/local/opt/llvm@14/bin/clang' -DCMAKE_CXX_COMPILER='/usr/local/opt/llvm@14/bin/clang++'
|
||||
if ! cmake --build build -j; then
|
||||
echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
- name: Run Test
|
||||
run: |
|
||||
cd clio/build
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
test_clio:
|
||||
name: Test Clio
|
||||
@@ -140,3 +139,74 @@ jobs:
|
||||
- name: Run tests
|
||||
timeout-minutes: 10
|
||||
uses: ./.github/actions/test
|
||||
|
||||
code_coverage:
|
||||
name: Build on Linux and code coverage
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build boost
|
||||
if: steps.boost.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
mv boost_1_77_0 boost
|
||||
cd boost
|
||||
./bootstrap.sh
|
||||
./b2
|
||||
- name: install deps
|
||||
run: |
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost
|
||||
cd clio
|
||||
cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"'
|
||||
if ! cmake --build build -j$(nproc); then
|
||||
echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
cd build
|
||||
make clio_tests-ccov
|
||||
- name: Code Coverage Summary Report
|
||||
uses: irongut/CodeCoverageSummary@v1.2.0
|
||||
with:
|
||||
filename: clio/build/clio_tests-gcc-cov/out.xml
|
||||
badge: true
|
||||
output: both
|
||||
format: markdown
|
||||
|
||||
- name: Save PR number and ccov report
|
||||
run: |
|
||||
mkdir -p ./UnitTestCoverage
|
||||
echo ${{ github.event.number }} > ./UnitTestCoverage/NR
|
||||
cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html
|
||||
cp code-coverage-results.md ./UnitTestCoverage/out.md
|
||||
cat code-coverage-results.md > $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
files: clio/build/clio_tests-gcc-cov/out.xml
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: UnitTestCoverage
|
||||
path: UnitTestCoverage/
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: code_coverage_report
|
||||
path: clio/build/clio_tests-gcc-cov/out.xml
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
*clio*.log
|
||||
build/
|
||||
build*/
|
||||
.vscode
|
||||
.python-version
|
||||
config.json
|
||||
src/main/impl/Build.cpp
|
||||
|
||||
39
CMake/Build.cpp.in
Normal file
39
CMake/Build.cpp.in
Normal file
@@ -0,0 +1,39 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <main/Build.h>
|
||||
|
||||
namespace Build {
|
||||
static constexpr char versionString[] = "@VERSION@";
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = versionString;
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace Build
|
||||
@@ -1,15 +1,33 @@
|
||||
#[===================================================================[
|
||||
read version from source
|
||||
write version to source
|
||||
#]===================================================================]
|
||||
|
||||
file (STRINGS src/main/impl/Build.cpp BUILD_INFO)
|
||||
foreach (line_ ${BUILD_INFO})
|
||||
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||
set (clio_version ${CMAKE_MATCH_1})
|
||||
endif ()
|
||||
endforeach ()
|
||||
if (clio_version)
|
||||
message (STATUS "clio version: ${clio_version}")
|
||||
else ()
|
||||
message (FATAL_ERROR "unable to determine clio version")
|
||||
endif ()
|
||||
find_package(Git REQUIRED)
|
||||
|
||||
set(GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set(GIT_COMMAND branch --show-current)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(BRANCH STREQUAL "")
|
||||
set(BRANCH "dev")
|
||||
endif()
|
||||
|
||||
if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
|
||||
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else()
|
||||
set(GIT_COMMAND describe --tags)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${TAG_VERSION}-${REV}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set(VERSION "${VERSION}+DEBUG")
|
||||
endif()
|
||||
|
||||
message(STATUS "Build version: ${VERSION}")
|
||||
set(clio_version "${VERSION}")
|
||||
|
||||
configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
|
||||
126
CMake/coverage.cmake
Normal file
126
CMake/coverage.cmake
Normal file
@@ -0,0 +1,126 @@
|
||||
# call add_converage(module_name) to add coverage targets for the given module
|
||||
function(add_converage module)
|
||||
if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
message("[Coverage] Building with llvm Code Coverage Tools")
|
||||
# Using llvm gcov ; llvm install by xcode
|
||||
set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message(FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif()
|
||||
|
||||
# set Flags
|
||||
target_compile_options(${module} PRIVATE -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(${module} PUBLIC -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(clio PUBLIC -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
# llvm-cov
|
||||
add_custom_target(
|
||||
${module}-ccov-preprocessing
|
||||
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
|
||||
${module}.profdata
|
||||
DEPENDS ${module})
|
||||
|
||||
add_custom_target(
|
||||
${module}-ccov-show
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# add summary for CI parse
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps"
|
||||
-show-region-summary=false
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# exclude libs and unittests self
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
-output-dir=${module}-llvm-cov -format="html"
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps" > /dev/null 2>&1
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
add_custom_command(
|
||||
TARGET ${module}-ccov
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
|
||||
)
|
||||
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}"
|
||||
MATCHES "GNU")
|
||||
message("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
|
||||
find_program(GCOV_PATH gcov)
|
||||
if(NOT GCOV_PATH)
|
||||
message(FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif() # NOT GCOV_PATH
|
||||
find_program(GCOVR_PATH gcovr)
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif() # NOT GCOVR_PATH
|
||||
|
||||
set(COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(${module} PRIVATE gcov)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(clio PRIVATE gcov)
|
||||
# this target is used for CI as well generate the summary out.xml will send
|
||||
# to github action to generate markdown, we can paste it to comments or
|
||||
# readme
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} -x
|
||||
${COV_OUTPUT_PATH}/out.xml --exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
|
||||
--html ${COV_OUTPUT_PATH}/report.html
|
||||
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
|
||||
# generate the detail report
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
|
||||
--html-details ${COV_OUTPUT_PATH}/index.html
|
||||
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
add_custom_command(
|
||||
TARGET ${module}-ccov-report
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
else()
|
||||
message(FATAL_ERROR "Complier not support yet")
|
||||
endif()
|
||||
endfunction()
|
||||
@@ -1,31 +0,0 @@
|
||||
set(POSTGRES_INSTALL_DIR ${CMAKE_BINARY_DIR}/postgres)
|
||||
set(POSTGRES_LIBS pq pgcommon pgport)
|
||||
ExternalProject_Add(postgres
|
||||
GIT_REPOSITORY https://github.com/postgres/postgres.git
|
||||
GIT_TAG REL_14_1
|
||||
GIT_SHALLOW 1
|
||||
LOG_CONFIGURE 1
|
||||
LOG_BUILD 1
|
||||
CONFIGURE_COMMAND ./configure --prefix ${POSTGRES_INSTALL_DIR} --without-readline --verbose
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make VERBOSE=${CMAKE_VERBOSE_MAKEFILE} -j32
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${CMAKE_COMMAND} -E env make -s --no-print-directory install
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pq${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgcommon${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgport${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
)
|
||||
ExternalProject_Get_Property (postgres BINARY_DIR)
|
||||
|
||||
foreach(_lib ${POSTGRES_LIBS})
|
||||
add_library(${_lib} STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(${_lib} postgres)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
IMPORTED_LOCATION ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}.a)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${POSTGRES_INSTALL_DIR}/include)
|
||||
target_link_libraries(clio PUBLIC ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endforeach()
|
||||
add_dependencies(clio postgres)
|
||||
target_include_directories(clio PUBLIC ${POSTGRES_INSTALL_DIR}/include)
|
||||
11
CMake/deps/SourceLocation.cmake
Normal file
11
CMake/deps/SourceLocation.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
include(CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if(SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif()
|
||||
|
||||
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif()
|
||||
@@ -144,8 +144,10 @@ if(NOT cassandra)
|
||||
else()
|
||||
message("Found system installed cassandra cpp driver")
|
||||
message(${cassandra})
|
||||
|
||||
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
|
||||
message(${cassandra_includes})
|
||||
get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH)
|
||||
get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY)
|
||||
target_link_libraries (clio PUBLIC ${cassandra})
|
||||
target_include_directories(clio INTERFACE ${cassandra_includes})
|
||||
target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR})
|
||||
endif()
|
||||
|
||||
@@ -10,10 +10,13 @@ if(NOT googletest_POPULATED)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio_tests PUBLIC clio gtest_main)
|
||||
target_link_libraries(clio_tests PUBLIC clio gmock_main)
|
||||
target_include_directories(clio_tests PRIVATE unittests)
|
||||
|
||||
enable_testing()
|
||||
|
||||
include(GoogleTest)
|
||||
|
||||
gtest_discover_tests(clio_tests)
|
||||
#increase timeout for tests discovery to 10 seconds, by default it is 5s. As more unittests added, we start to hit this issue
|
||||
#https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests(clio_tests DISCOVERY_TIMEOUT 10)
|
||||
|
||||
14
CMake/deps/libfmt.cmake
Normal file
14
CMake/deps/libfmt.cmake
Normal file
@@ -0,0 +1,14 @@
|
||||
FetchContent_Declare(
|
||||
libfmt
|
||||
URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(libfmt)
|
||||
|
||||
if(NOT libfmt_POPULATED)
|
||||
FetchContent_Populate(libfmt)
|
||||
add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC fmt)
|
||||
|
||||
@@ -11,6 +11,7 @@ ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
|
||||
Restart=on-failure
|
||||
User=clio
|
||||
Group=clio
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -1 +1,6 @@
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else")
|
||||
target_compile_options(clio
|
||||
PUBLIC -Wall
|
||||
-Werror
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else)
|
||||
|
||||
167
CMakeLists.txt
167
CMakeLists.txt
@@ -14,21 +14,13 @@ if(VERBOSE)
|
||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||
endif()
|
||||
|
||||
if(NOT GIT_COMMIT_HASH)
|
||||
if(VERBOSE)
|
||||
message("GIT_COMMIT_HASH not provided...looking for git")
|
||||
endif()
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=8
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if(gch)
|
||||
set(GIT_COMMIT_HASH "${gch}")
|
||||
message(STATUS "Git commit: ${GIT_COMMIT_HASH}")
|
||||
add_definitions(-DCLIO_GIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
endif()
|
||||
endif()
|
||||
endif() #git
|
||||
|
||||
if(PACKAGING)
|
||||
add_definitions(-DPKG=1)
|
||||
endif()
|
||||
|
||||
#c++20 removed std::result_of but boost 1.75 is still using it.
|
||||
add_definitions(-DBOOST_ASIO_HAS_STD_INVOKE_RESULT=1)
|
||||
|
||||
add_library(clio)
|
||||
target_compile_features(clio PUBLIC cxx_std_20)
|
||||
@@ -39,73 +31,150 @@ include(ExternalProject)
|
||||
include(CMake/settings.cmake)
|
||||
include(CMake/ClioVersion.cmake)
|
||||
include(CMake/deps/rippled.cmake)
|
||||
include(CMake/deps/libfmt.cmake)
|
||||
include(CMake/deps/Boost.cmake)
|
||||
include(CMake/deps/cassandra.cmake)
|
||||
include(CMake/deps/Postgres.cmake)
|
||||
include(CMake/deps/SourceLocation.cmake)
|
||||
|
||||
target_sources(clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/backend/BackendInterface.cpp
|
||||
src/backend/CassandraBackend.cpp
|
||||
src/backend/Pg.cpp
|
||||
src/backend/PostgresBackend.cpp
|
||||
src/backend/SimpleCache.cpp
|
||||
src/backend/LedgerCache.cpp
|
||||
## NextGen Backend
|
||||
src/backend/cassandra/impl/Future.cpp
|
||||
src/backend/cassandra/impl/Cluster.cpp
|
||||
src/backend/cassandra/impl/Batch.cpp
|
||||
src/backend/cassandra/impl/Result.cpp
|
||||
src/backend/cassandra/impl/Tuple.cpp
|
||||
src/backend/cassandra/impl/SslContext.cpp
|
||||
src/backend/cassandra/Handle.cpp
|
||||
src/backend/cassandra/SettingsProvider.cpp
|
||||
## ETL
|
||||
src/etl/ETLSource.cpp
|
||||
src/etl/ProbingETLSource.cpp
|
||||
src/etl/Source.cpp
|
||||
src/etl/ProbingSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ReportingETL.cpp
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Subscriptions
|
||||
src/subscriptions/SubscriptionManager.cpp
|
||||
## RPC
|
||||
src/rpc/RPC.cpp
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/Factories.cpp
|
||||
src/rpc/RPCHelpers.cpp
|
||||
src/rpc/Counters.cpp
|
||||
src/rpc/WorkQueue.cpp
|
||||
## RPC Methods
|
||||
# Account
|
||||
src/rpc/common/Specs.cpp
|
||||
src/rpc/common/Validators.cpp
|
||||
# RPC impl
|
||||
src/rpc/common/impl/HandlerProvider.cpp
|
||||
## RPC handler
|
||||
src/rpc/handlers/AccountChannels.cpp
|
||||
src/rpc/handlers/AccountCurrencies.cpp
|
||||
src/rpc/handlers/AccountInfo.cpp
|
||||
src/rpc/handlers/AccountLines.cpp
|
||||
src/rpc/handlers/AccountOffers.cpp
|
||||
src/rpc/handlers/AccountNFTs.cpp
|
||||
src/rpc/handlers/AccountObjects.cpp
|
||||
src/rpc/handlers/AccountOffers.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
src/rpc/handlers/GatewayBalances.cpp
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
# NFT
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
# Ledger
|
||||
src/rpc/handlers/Ledger.cpp
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
src/rpc/handlers/LedgerEntry.cpp
|
||||
src/rpc/handlers/LedgerRange.cpp
|
||||
# Transaction
|
||||
src/rpc/handlers/Tx.cpp
|
||||
src/rpc/handlers/NFTBuyOffers.cpp
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
src/rpc/handlers/NFTOffersCommon.cpp
|
||||
src/rpc/handlers/NFTSellOffers.cpp
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
# Dex
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
# NFT
|
||||
src/rpc/handlers/NFTOffers.cpp
|
||||
# Payment Channel
|
||||
src/rpc/handlers/ChannelAuthorize.cpp
|
||||
src/rpc/handlers/ChannelVerify.cpp
|
||||
# Subscribe
|
||||
src/rpc/handlers/Subscribe.cpp
|
||||
# Server
|
||||
src/rpc/handlers/ServerInfo.cpp
|
||||
# Utility
|
||||
src/rpc/handlers/Random.cpp)
|
||||
src/rpc/handlers/Tx.cpp
|
||||
## Util
|
||||
src/config/Config.cpp
|
||||
src/log/Logger.cpp
|
||||
src/util/Taggable.cpp)
|
||||
|
||||
add_executable(clio_server src/main/main.cpp)
|
||||
target_link_libraries(clio_server PUBLIC clio)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
add_executable(clio_tests unittests/main.cpp)
|
||||
set(TEST_TARGET clio_tests)
|
||||
add_executable(${TEST_TARGET}
|
||||
unittests/Playground.cpp
|
||||
unittests/Logger.cpp
|
||||
unittests/Config.cpp
|
||||
unittests/ProfilerTest.cpp
|
||||
unittests/DOSGuard.cpp
|
||||
unittests/SubscriptionTest.cpp
|
||||
unittests/SubscriptionManagerTest.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTest.cpp
|
||||
unittests/etl/ExtractorTest.cpp
|
||||
unittests/etl/TransformerTest.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTest.cpp
|
||||
unittests/rpc/CountersTest.cpp
|
||||
unittests/rpc/AdminVerificationTest.cpp
|
||||
## RPC handlers
|
||||
unittests/rpc/handlers/DefaultProcessorTests.cpp
|
||||
unittests/rpc/handlers/TestHandlerTests.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTest.cpp
|
||||
unittests/rpc/handlers/AccountLinesTest.cpp
|
||||
unittests/rpc/handlers/AccountTxTest.cpp
|
||||
unittests/rpc/handlers/AccountOffersTest.cpp
|
||||
unittests/rpc/handlers/AccountInfoTest.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTest.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTest.cpp
|
||||
unittests/rpc/handlers/BookOffersTest.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTest.cpp
|
||||
unittests/rpc/handlers/TxTest.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTest.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTest.cpp
|
||||
unittests/rpc/handlers/ServerInfoTest.cpp
|
||||
unittests/rpc/handlers/PingTest.cpp
|
||||
unittests/rpc/handlers/RandomTest.cpp
|
||||
unittests/rpc/handlers/NFTInfoTest.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTest.cpp
|
||||
unittests/rpc/handlers/SubscribeTest.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTest.cpp
|
||||
unittests/rpc/handlers/LedgerDataTest.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTest.cpp
|
||||
unittests/rpc/handlers/BookChangesTest.cpp
|
||||
unittests/rpc/handlers/LedgerTest.cpp
|
||||
# Backend
|
||||
unittests/backend/BackendFactoryTest.cpp
|
||||
unittests/backend/cassandra/BaseTests.cpp
|
||||
unittests/backend/cassandra/BackendTests.cpp
|
||||
unittests/backend/cassandra/RetryPolicyTests.cpp
|
||||
unittests/backend/cassandra/SettingsProviderTests.cpp
|
||||
unittests/backend/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/backend/cassandra/AsyncExecutorTests.cpp
|
||||
unittests/webserver/ServerTest.cpp
|
||||
unittests/webserver/RPCExecutorTest.cpp)
|
||||
include(CMake/deps/gtest.cmake)
|
||||
|
||||
# test for dwarf5 bug on ci
|
||||
target_compile_options(clio PUBLIC -gdwarf-4)
|
||||
|
||||
# if CODE_COVERAGE enable, add clio_test-ccov
|
||||
if(CODE_COVERAGE)
|
||||
include(CMake/coverage.cmake)
|
||||
add_converage(${TEST_TARGET})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(CMake/install/install.cmake)
|
||||
|
||||
@@ -3,22 +3,29 @@ Thank you for your interest in contributing to the `clio` project 🙏
|
||||
|
||||
To contribute, please:
|
||||
1. Fork the repository under your own user.
|
||||
2. Create a new branch on which to write your changes.
|
||||
2. Create a new branch on which to commit/push your changes.
|
||||
3. Write and test your code.
|
||||
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||
5. Where applicable, write test cases for your code and include those in `unittests`.
|
||||
6. Ensure your code passes automated checks (e.g. clang-format)
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.). See below for more details.
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change). See below for more details.
|
||||
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||
|
||||
> **Note:** Please make sure you read the [Style guide](#style-guide).
|
||||
> **Note:** Please read the [Style guide](#style-guide).
|
||||
|
||||
## Install git hooks
|
||||
Please run the following command in order to use git hooks that are helpful for `clio` development.
|
||||
|
||||
``` bash
|
||||
git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git commands
|
||||
This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
|
||||
Please note that there are more than one way to do this and these commands are only provided for your convenience.
|
||||
Please note that there are more than one way to do this and these commands are provided for your convenience.
|
||||
At this point it's assumed that you have already finished working on your feature/bug.
|
||||
|
||||
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up to date with the main `clio` repository.
|
||||
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up-to-date with the main `clio` repository.
|
||||
|
||||
``` bash
|
||||
# Create a backup of your branch
|
||||
@@ -30,16 +37,16 @@ git pull origin develop
|
||||
git checkout <your feature branch>
|
||||
git rebase -i develop
|
||||
```
|
||||
For each commit in the list other than the first one please select `s` to squash.
|
||||
After this is done you will have the opportunity to write a message for the squashed commit.
|
||||
For each commit in the list other than the first one, enter `s` to squash.
|
||||
After this is done, you will have the opportunity to write a message for the squashed commit.
|
||||
|
||||
> **Hint:** Please use **imperative mood** commit message capitalizing the first word of the subject.
|
||||
> **Hint:** Please use **imperative mood** in the commit message, and capitalize the first word.
|
||||
|
||||
``` bash
|
||||
# You should now have a single commit on top of a commit in `develop`
|
||||
git log
|
||||
```
|
||||
> **Todo:** In case there are merge conflicts, please resolve them now
|
||||
> **Note:** If there are merge conflicts, please resolve them now.
|
||||
|
||||
``` bash
|
||||
# Use the same commit message as you did above
|
||||
@@ -47,16 +54,16 @@ git commit -m 'Your message'
|
||||
git rebase --continue
|
||||
```
|
||||
|
||||
> **Important:** If you have no GPG keys setup please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
|
||||
> **Important:** If you have no GPG keys set up, please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
|
||||
|
||||
``` bash
|
||||
# Sign the commit with your GPG key and finally push your changes to the repo
|
||||
# Sign the commit with your GPG key, and push your changes
|
||||
git commit --amend -S
|
||||
git push --force
|
||||
```
|
||||
|
||||
## Fixing issues found during code review
|
||||
While your code is in review it's possible that some changes will be requested by the reviewer.
|
||||
While your code is in review, it's possible that some changes will be requested by reviewer(s).
|
||||
This section describes the process of adding your fixes.
|
||||
|
||||
We assume that you already made the required changes on your feature branch.
|
||||
@@ -65,25 +72,26 @@ We assume that you already made the required changes on your feature branch.
|
||||
# Add the changed code
|
||||
git add <paths to add>
|
||||
|
||||
# Add a folded commit message (so you can squash them later)
|
||||
# Add a [FOLD] commit message (so you remember to squash it later)
|
||||
# while also signing it with your GPG key
|
||||
git commit -S -m "[FOLD] Your commit message"
|
||||
|
||||
# And finally push your changes
|
||||
git push
|
||||
```
|
||||
## After code review
|
||||
Last but not least, when your PR is approved you still have to `Squash and merge` your code.
|
||||
Luckily there is a button for that towards the bottom of the PR's page on github.
|
||||
|
||||
> **Important:** Please leave the automatically generated link to PR in the subject line **and** in the description field please add `"Fixes #ISSUE_ID"` (replacing `ISSUE_ID` with yours).
|
||||
## After code review
|
||||
When your PR is approved and ready to merge, use `Squash and merge`.
|
||||
The button for that is near the bottom of the PR's page on GitHub.
|
||||
|
||||
> **Important:** Please leave the automatically-generated mention/link to the PR in the subject line **and** in the description field add `"Fix #ISSUE_ID"` (replacing `ISSUE_ID` with yours) if the PR fixes an issue.
|
||||
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
|
||||
|
||||
# Style guide
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
@@ -107,7 +115,7 @@ To change your code to conform use `clang-format -i <your changed files>`.
|
||||
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
|
||||
|
||||
## Code Review
|
||||
PRs must be reviewed by at least one of the maintainers.
|
||||
A PR must be reviewed and approved by at least one of the maintainers before it can be merged.
|
||||
|
||||
## Adding and Removing
|
||||
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
|
||||
@@ -116,8 +124,11 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe
|
||||
|
||||
## Existing Maintainers
|
||||
|
||||
* [cjcobb23](https://github.com/cjcobb23) (Ripple)
|
||||
* [natenichols](https://github.com/natenichols) (Ripple)
|
||||
* [legleux](https://github.com/legleux) (Ripple)
|
||||
* [undertome](https://github.com/undertome) (Ripple)
|
||||
* [cindyyan317](https://github.com/cindyyan317) (Ripple)
|
||||
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||
* [legleux](https://github.com/legleux) (Ripple)
|
||||
|
||||
## Honorable ex-Maintainers
|
||||
|
||||
* [cjcobb23](https://github.com/cjcobb23) (ex-Ripple)
|
||||
* [natenichols](https://github.com/natenichols) (ex-Ripple)
|
||||
|
||||
67
README.md
67
README.md
@@ -22,13 +22,15 @@ from which data can be extracted. The rippled node does not need to be running o
|
||||
|
||||
## Building
|
||||
|
||||
Clio is built with CMake. Clio requires at least GCC-11 (C++20), and Boost 1.75.0 or later.
|
||||
Clio is built with CMake. Clio requires at least GCC-11/clang-14.0.0 (C++20), and Boost 1.75.0.
|
||||
|
||||
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
|
||||
```sh
|
||||
# Install dependencies
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake clang-format
|
||||
# Install gcovr to run code coverage
|
||||
sudo apt-get -y install gcovr
|
||||
|
||||
# Compile Boost
|
||||
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
|
||||
@@ -72,6 +74,36 @@ server is running
|
||||
to the IP of your Clio server. This entry can take the form of a comma-separated list if
|
||||
you are running multiple Clio nodes.
|
||||
|
||||
|
||||
In addition, the parameter `start_sequence` can be included and configured within the top level of the config file. This parameter specifies the sequence of first ledger to extract if the database is empty. Note that ETL extracts ledgers in order and that no backfilling functionality currently exists, meaning Clio will not retroactively learn ledgers older than the one you specify. Choosing to specify this or not will yield the following behavior:
|
||||
- If this setting is absent and the database is empty, ETL will start with the next ledger validated by the network.
|
||||
- If this setting is present and the database is not empty, an exception is thrown.
|
||||
|
||||
In addition, the optional parameter `finish_sequence` can be added to the json file as well, specifying where the ledger can stop.
|
||||
|
||||
To add `start_sequence` and/or `finish_sequence` to the config.json file appropriately, they will be on the same top level of precedence as other parameters (such as `database`, `etl_sources`, `read_only`, etc.) and be specified with an integer. Here is an example snippet from the config file:
|
||||
|
||||
```json
|
||||
"start_sequence": 12345,
|
||||
"finish_sequence": 54321
|
||||
```
|
||||
|
||||
The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top level of precedence of our Clio config. `ssl_cert_file` specifies the filepath for your SSL cert while `ssl_key_file` specifies the filepath for your SSL key. It is up to you how to change ownership of these folders for your designated Clio user. Your options include:
|
||||
- Copying the two files as root somewhere that's accessible by the Clio user, then running `sudo chown` to your user
|
||||
- Changing the permissions directly so it's readable by your Clio user
|
||||
- Running Clio as root (strongly discouraged)
|
||||
|
||||
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
|
||||
|
||||
```json
|
||||
"server":{
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233
|
||||
},
|
||||
"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
"ssl_key_file" : "/full/path/to/key.file"
|
||||
```
|
||||
|
||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||
start first, and it's fine to stop one or the other and restart at any given time.
|
||||
|
||||
@@ -152,9 +184,33 @@ You must:
|
||||
## Logging
|
||||
Clio provides several logging options, all are configurable via the config file and are detailed below.
|
||||
|
||||
`log_level`: The minimum level of severity at which the log message will be outputted.
|
||||
`log_level`: The minimum level of severity at which the log message will be outputted by default.
|
||||
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
|
||||
|
||||
`log_format`: The format of log lines produced by clio. Defaults to `"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%"`.
|
||||
Each of the variables expands like so
|
||||
- `TimeStamp`: The full date and time of the log entry
|
||||
- `SourceLocation`: A partial path to the c++ file and the line number in said file (`source/file/path:linenumber`)
|
||||
- `ThreadID`: The ID of the thread the log entry is written from
|
||||
- `Channel`: The channel that this log entry was sent to
|
||||
- `Severity`: The severity (aka log level) the entry was sent at
|
||||
- `Message`: The actual log message
|
||||
|
||||
`log_channels`: An array of json objects, each overriding properties for a logging `channel`.
|
||||
At the moment of writing, only `log_level` can be overriden using this mechanism.
|
||||
|
||||
Each object is of this format:
|
||||
```json
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
}
|
||||
```
|
||||
If no override is present for a given channel, that channel will log at the severity specified by the global `log_level`.
|
||||
Overridable log channels: `Backend`, `WebServer`, `Subscriptions`, `RPC`, `ETL` and `Performance`.
|
||||
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`. Defaults to true.
|
||||
|
||||
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it. If not specified, logs are not written to a file.
|
||||
@@ -170,6 +226,11 @@ rotate the current log file. Defaults to 12 hours.
|
||||
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
|
||||
size-based log rotation occurs, the timer for the time-based rotation will reset.
|
||||
|
||||
`log_tag_style`: Tag implementation to use. Must be one of:
|
||||
- `uint`: Lock free and threadsafe but outputs just a simple unsigned integer
|
||||
- `uuid`: Threadsafe and outputs a UUID tag
|
||||
- `none`: Don't use tagging at all
|
||||
|
||||
## Cassandra / Scylla Administration
|
||||
|
||||
Since Clio relies on either Cassandra or Scylla for its database backend, here are some important considerations:
|
||||
|
||||
121
REVIEW.md
121
REVIEW.md
@@ -1,121 +0,0 @@
|
||||
# How to review clio
|
||||
Clio is a massive project, and thus I don't expect the code to be reviewed the
|
||||
way a normal PR would. So I put this guide together to help reviewers look at
|
||||
the relevant pieces of code without getting lost in the weeds.
|
||||
|
||||
One thing reviewers should keep in mind is that most of clio is designed to be
|
||||
lightweight and simple. We try not to introduce any uneccessary complexity and
|
||||
keep the code as simple and straightforward as possible. Sometimes complexity is
|
||||
unavoidable, but simplicity is the goal.
|
||||
|
||||
## Order of review
|
||||
The code is organized into 4 main components, each with their own folder. The
|
||||
code in each folder is as self contained as possible. A good way to approach
|
||||
the review would be to review one folder at a time.
|
||||
|
||||
### backend
|
||||
The code in the backend folder is the heart of the project, and reviewers should
|
||||
start here. This is the most complex part of the code, as well as the most
|
||||
performance sensitive. clio does not keep any data in memory, so performance
|
||||
generally depends on the data model and the way we talk to the database.
|
||||
|
||||
Reviewers should start with the README in this folder to get a high level idea
|
||||
of the data model and to review the data model itself. Then, reviewers should
|
||||
dive into the implementation. The table schemas and queries for Cassandra are
|
||||
defined in `CassandraBackend::open()`. The table schemas for Postgres are defined
|
||||
in Pg.cpp. The queries for Postgres are defined in each of the functions of `PostgresBackend`.
|
||||
A good way to approach the implementation would be to look at the table schemas,
|
||||
and then go through the functions declared in `BackendInterface`. Reviewers could
|
||||
also branch out to the rest of the code by looking at where these functions are
|
||||
called from.
|
||||
|
||||
### webserver
|
||||
The code in the webserver folder implements the web server for handling RPC requests.
|
||||
This code was mostly copied and pasted from boost beast example code, so I would
|
||||
really appreciate review here.
|
||||
|
||||
### rpc
|
||||
The rpc folder contains all of the handlers and any helper functions they need.
|
||||
This code is not too complicated, so reviewers don't need to dwell long here.
|
||||
|
||||
### etl
|
||||
The etl folder contains all of the code for extracting data from rippled. This
|
||||
code is complex and important, but most of this code was just copied from rippled
|
||||
reporting mode, and thus has already been reviewed and is being used in prod.
|
||||
|
||||
## Design decisions that should be reviewed
|
||||
|
||||
### Data model
|
||||
Reviewers should review the general data model. The data model itself is described
|
||||
at a high level in the README in the backend folder. The table schemas and queries
|
||||
for Cassandra are defined in the `open()` function of `CassandraBackend`. The table
|
||||
schemas for Postgres are defined in Pg.cpp.
|
||||
|
||||
Particular attention should be paid to the keys table, and the problem that solves
|
||||
(successor/upper bound). I originally was going to have a special table for book_offers,
|
||||
but then I decided that we could use the keys table itself for that and save space.
|
||||
This makes book_offers somewhat slow compared to rippled, though still very usable.
|
||||
|
||||
### Large rows
|
||||
I did some tricks with Cassandra to deal with very large rows in the keys and account_tx
|
||||
tables. For each of these, the partition key (the first component of the primary
|
||||
key) is a compound key. This is meant to break large rows into smaller rows. This
|
||||
is done to avoid hotspots. Data is sharded in Cassandra, and if some rows get very
|
||||
large, some nodes can have a lot more data than others.
|
||||
|
||||
For account_tx, this has performance implications when iterating very far back
|
||||
in time. Refer to the `fetchAccountTransactions()` function in `CassandraBackend`.
|
||||
|
||||
It is unclear if this needs to be done for other tables.
|
||||
|
||||
### Postgres table partitioning
|
||||
Originally, Postgres exhibited performance problems when the dataset approach 1
|
||||
TB. This was solved by table partitioning.
|
||||
|
||||
### Threading
|
||||
I used asio for multithreading. There are a lot of different io_contexts lying
|
||||
around the code. This needs to be cleaned up a bit. Most of these are really
|
||||
just ways to submit an async job to a single thread. I don't think it makes
|
||||
sense to have one io_context for the whole application, but some of the threading
|
||||
is a bit opaque and could be cleaned up.
|
||||
|
||||
### Boost Json
|
||||
I used boost json for serializing data to json.
|
||||
|
||||
### No cache
|
||||
As of now, there is no cache. I am not sure if a cache is even worth it. A
|
||||
transaction cache would not be hard, but a cache for ledger data will be hard.
|
||||
While a cache would improve performance, it would increase memory usage. clio
|
||||
is designed to be lightweight. Also, I've reached thousands of requests per
|
||||
second with a single clio node, so I'm not sure performance is even an issue.
|
||||
|
||||
## Things I'm less than happy about
|
||||
|
||||
#### BackendIndexer
|
||||
This is a particularly hairy piece of code that handles writing to the keys table.
|
||||
I am not too happy with this code. Parts of it need to execute in real time as
|
||||
part of ETL, and other parts are allowed to run in the background. There is also
|
||||
code that detects if a previous background job failed to complete before the
|
||||
server shutdown, and thus tries to rerun that job. The code feels tacked on, and
|
||||
I would like it to be more cleanly integrated with the rest of the code.
|
||||
|
||||
#### Shifting
|
||||
There is some bit shifting going on with the keys table and the account_tx table.
|
||||
The keys table is written to every 2^20 ledgers. Maybe it would be better to just
|
||||
write every 1 million ledgers.
|
||||
|
||||
#### performance of book_offers
|
||||
book_offers is a bit slow. It could be sped up in a variety of ways. One is to
|
||||
keep a separate book_offers table. However, this is not straightforward and will
|
||||
use more space. Another is to keep a cache of book_offers for the most recent ledger
|
||||
(or few ledgers). I am not sure if this is worth it
|
||||
|
||||
#### account_tx in Cassandra
|
||||
After the fix to deal with large rows, account_tx can be slow at times when using
|
||||
Cassandra. Specifically, if there are large gaps in time where the account was
|
||||
not affected by any transactions, the code will be reading empty records. I would
|
||||
like to sidestep this issue if possible.
|
||||
|
||||
#### Implementation of fetchLedgerPage
|
||||
`fetchLedgerPage()` is rather complex. Part of this seems unavoidable, since this
|
||||
code is dealing with the keys table.
|
||||
@@ -1,38 +1,35 @@
|
||||
{
|
||||
"database":
|
||||
{
|
||||
"type":"cassandra",
|
||||
"cassandra":
|
||||
{
|
||||
"secure_connect_bundle":"[path/to/zip. ignore if using contact_points]",
|
||||
"contact_points":"[ip. ignore if using secure_connect_bundle]",
|
||||
"port":"[port. ignore if using_secure_connect_bundle]",
|
||||
"keyspace":"clio",
|
||||
"username":"[username, if any]",
|
||||
"password":"[password, if any]",
|
||||
"max_requests_outstanding":25000,
|
||||
"threads":8
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
"keyspace": "clio",
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
"max_requests_outstanding": 25000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources":
|
||||
[
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip":"[rippled ip]",
|
||||
"ws_port":"6006",
|
||||
"grpc_port":"50051"
|
||||
"ip": "[rippled ip]",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard":
|
||||
{
|
||||
"whitelist":["127.0.0.1"]
|
||||
"dos_guard": {
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
]
|
||||
},
|
||||
"server":{
|
||||
"ip":"0.0.0.0",
|
||||
"port":8080
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 8080
|
||||
},
|
||||
"log_level":"debug",
|
||||
"log_file":"./clio.log",
|
||||
"online_delete":0,
|
||||
"extractor_threads":8,
|
||||
"read_only":false
|
||||
"log_level": "debug",
|
||||
"log_file": "./clio.log",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false
|
||||
}
|
||||
|
||||
@@ -1,40 +1,93 @@
|
||||
{
|
||||
"database":
|
||||
{
|
||||
"type":"cassandra",
|
||||
"cassandra":
|
||||
{
|
||||
"contact_points":"127.0.0.1",
|
||||
"port":9042,
|
||||
"keyspace":"clio",
|
||||
"replication_factor":1,
|
||||
"table_prefix":"",
|
||||
"max_requests_outstanding":25000,
|
||||
"threads":8
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
"contact_points": "127.0.0.1",
|
||||
"port": 9042,
|
||||
"keyspace": "clio",
|
||||
"replication_factor": 1,
|
||||
"table_prefix": "",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources":
|
||||
[
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip":"127.0.0.1",
|
||||
"ws_port":"6006",
|
||||
"grpc_port":"50051"
|
||||
"ip": "127.0.0.1",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard":
|
||||
{
|
||||
"whitelist":["127.0.0.1"]
|
||||
"dos_guard": {
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
], // comma-separated list of ips to exclude from rate limiting
|
||||
/* The below values are the default values and are only specified here
|
||||
* for documentation purposes. The rate limiter currently limits
|
||||
* connections and bandwidth per ip. The rate limiter looks at the raw
|
||||
* ip of a client connection, and so requests routed through a load
|
||||
* balancer will all have the same ip and be treated as a single client
|
||||
*/
|
||||
"max_fetches": 1000000, // max bytes per ip per sweep interval
|
||||
"max_connections": 20, // max connections per ip
|
||||
"max_requests": 20, // max connections per ip
|
||||
"sweep_interval": 1 // time in seconds before resetting bytes per ip count
|
||||
},
|
||||
"server":{
|
||||
"ip":"0.0.0.0",
|
||||
"port":51233
|
||||
"cache": {
|
||||
"peers": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
"port": 51234
|
||||
}
|
||||
]
|
||||
},
|
||||
"log_level":"debug",
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
/* Max number of requests to queue up before rejecting further requests.
|
||||
* Defaults to 0, which disables the limit
|
||||
*/
|
||||
"max_queue_size": 500
|
||||
},
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
},
|
||||
{
|
||||
"channel": "WebServer",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "Subscriptions",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "RPC",
|
||||
"log_level": "error"
|
||||
},
|
||||
{
|
||||
"channel": "ETL",
|
||||
"log_level": "debug"
|
||||
},
|
||||
{
|
||||
"channel": "Performance",
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"log_level": "info",
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
|
||||
"log_to_console": true,
|
||||
"log_directory":"./clio_log",
|
||||
"log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"extractor_threads":8,
|
||||
"read_only":false
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
//"start_sequence": [integer] the ledger index to start from,
|
||||
//"finish_sequence": [integer] the ledger index to finish at,
|
||||
//"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
//"ssl_key_file" : "/full/path/to/key.file"
|
||||
}
|
||||
|
||||
@@ -1,65 +1,62 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/CassandraBackend.h>
|
||||
#include <backend/PostgresBackend.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
namespace Backend {
|
||||
std::shared_ptr<BackendInterface>
|
||||
make_Backend(boost::asio::io_context& ioc, boost::json::object const& config)
|
||||
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
|
||||
static clio::Logger log{"Backend"};
|
||||
log.info() << "Constructing BackendInterface";
|
||||
|
||||
boost::json::object dbConfig = config.at("database").as_object();
|
||||
|
||||
bool readOnly = false;
|
||||
if (config.contains("read_only"))
|
||||
readOnly = config.at("read_only").as_bool();
|
||||
|
||||
auto type = dbConfig.at("type").as_string();
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
|
||||
auto const type = config.value<std::string>("database.type");
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
if (boost::iequals(type, "cassandra"))
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new"))
|
||||
{
|
||||
if (config.contains("online_delete"))
|
||||
dbConfig.at(type).as_object()["ttl"] =
|
||||
config.at("online_delete").as_int64() * 4;
|
||||
backend = std::make_shared<CassandraBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
}
|
||||
else if (boost::iequals(type, "postgres"))
|
||||
{
|
||||
if (dbConfig.contains("experimental") &&
|
||||
dbConfig.at("experimental").is_bool() &&
|
||||
dbConfig.at("experimental").as_bool())
|
||||
backend = std::make_shared<PostgresBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(fatal)
|
||||
<< "Postgres support is experimental at this time. "
|
||||
<< "If you would really like to use Postgres, add "
|
||||
"\"experimental\":true to your database config";
|
||||
auto cfg = config.section("database." + type);
|
||||
backend =
|
||||
std::make_shared<Backend::Cassandra::CassandraBackend>(Backend::Cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
throw std::runtime_error("Invalid database type");
|
||||
|
||||
backend->open(readOnly);
|
||||
auto rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
auto const rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
backend->updateRange(rng->minSequence);
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << ": Constructed BackendInterface Successfully";
|
||||
|
||||
log.info() << "Constructed BackendInterface Successfully";
|
||||
return backend;
|
||||
}
|
||||
} // namespace Backend
|
||||
|
||||
#endif // RIPPLE_REPORTING_BACKEND_FACTORY
|
||||
|
||||
@@ -1,32 +1,59 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <backend/BackendInterface.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
namespace Backend {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
gLog.debug() << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
gLog.debug() << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
}
|
||||
void
|
||||
BackendInterface::writeLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob)
|
||||
BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob)
|
||||
{
|
||||
assert(key.size() == sizeof(ripple::uint256));
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(
|
||||
boost::asio::yield_context& yield) const
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
gLog.trace() << "called";
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
@@ -43,7 +70,7 @@ BackendInterface::hardFetchLedgerRangeNoThrow(
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
gLog.trace() << "called";
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
@@ -57,21 +84,17 @@ BackendInterface::fetchLedgerObject(
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache and missed in db";
|
||||
gLog.trace() << "Missed cache and missed in db";
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache but found in db";
|
||||
gLog.trace() << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
}
|
||||
}
|
||||
@@ -93,9 +116,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
else
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hits = " << keys.size() - misses.size()
|
||||
<< " - cache misses = " << misses.size();
|
||||
gLog.trace() << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
@@ -121,11 +142,9 @@ BackendInterface::fetchSuccessorKey(
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -152,7 +171,6 @@ BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
@@ -161,10 +179,7 @@ BackendInterface::fetchBookOffers(
|
||||
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
|
||||
ripple::uint256 uTipIndex = book;
|
||||
std::vector<ripple::uint256> keys;
|
||||
auto getMillis = [](auto diff) {
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(diff)
|
||||
.count();
|
||||
};
|
||||
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
|
||||
auto begin = std::chrono::system_clock::now();
|
||||
std::uint32_t numSucc = 0;
|
||||
std::uint32_t numPages = 0;
|
||||
@@ -179,30 +194,24 @@ BackendInterface::fetchBookOffers(
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " - offerDir.has_value() "
|
||||
<< offerDir.has_value() << " breaking";
|
||||
gLog.trace() << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
++numPages;
|
||||
ripple::STLedgerEntry sle{
|
||||
ripple::SerialIter{
|
||||
offerDir->blob.data(), offerDir->blob.size()},
|
||||
offerDir->key};
|
||||
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " next is empty. breaking";
|
||||
gLog.trace() << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
auto nextDir =
|
||||
fetchLedgerObject(nextKey.key, ledgerSequence, yield);
|
||||
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield);
|
||||
assert(nextDir);
|
||||
offerDir->blob = *nextDir;
|
||||
offerDir->key = nextKey.key;
|
||||
@@ -214,29 +223,21 @@ BackendInterface::fetchBookOffers(
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " key = " << ripple::strHex(keys[i])
|
||||
<< " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
gLog.trace() << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " "
|
||||
<< "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin))
|
||||
<< " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
|
||||
<< std::to_string(numSucc) << " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis)
|
||||
<< " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages)
|
||||
<< ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = "
|
||||
<< std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
gLog.debug() << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
|
||||
return page;
|
||||
}
|
||||
@@ -255,11 +256,8 @@ BackendInterface::fetchLedgerPage(
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd)
|
||||
{
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back()
|
||||
: cursor ? *cursor
|
||||
: firstKey;
|
||||
std::uint32_t const seq =
|
||||
outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
reachedEnd = true;
|
||||
@@ -274,16 +272,14 @@ BackendInterface::fetchLedgerPage(
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__
|
||||
<< " deleted or non-existent object in successor table. key = "
|
||||
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
|
||||
gLog.error() << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
|
||||
gLog.error() << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
@@ -293,9 +289,7 @@ BackendInterface::fetchLedgerPage(
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
BackendInterface::fetchFees(
|
||||
std::uint32_t const seq,
|
||||
boost::asio::yield_context& yield) const
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const
|
||||
{
|
||||
ripple::Fees fees;
|
||||
|
||||
@@ -304,7 +298,7 @@ BackendInterface::fetchFees(
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees";
|
||||
gLog.error() << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,48 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/SimpleCache.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <backend/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
/**
|
||||
* @brief Throws an error when database read time limit is exceeded.
|
||||
*
|
||||
* This class is throws an error when read time limit is exceeded but
|
||||
* is also paired with a separate class to retry the connection.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
public:
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
@@ -18,10 +50,20 @@ class DatabaseTimeout : public std::exception
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Separate class that reattempts connection after time limit.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param func Instance of Cassandra database handler class.
|
||||
* @param waitMs Is the arbitrary time limit of 500ms.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
retryOnTimeout(F func, size_t waitMs = 500)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
@@ -30,48 +72,82 @@ retryOnTimeout(F func, size_t waitMs = 500)
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
log.error() << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " function timed out. Retrying ... ";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Passes in serialized handlers in an asynchronous fashion.
|
||||
*
|
||||
* Note that the synchronous auto passes handlers critical to supporting
|
||||
* the Clio backend. The coroutine types are checked if same/different.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronous(F&& f)
|
||||
{
|
||||
/** @brief Serialized handlers and their execution.
|
||||
*
|
||||
* The ctx class is converted into a serialized handler, also named
|
||||
* ctx, and is used to pass a stream of data into the method.
|
||||
*/
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::io_context::strand strand(ctx);
|
||||
std::optional<boost::asio::io_context::work> work;
|
||||
|
||||
/*! @brief Place the ctx within the vector of serialized handlers. */
|
||||
work.emplace(ctx);
|
||||
|
||||
using R = typename std::result_of<F(boost::asio::yield_context&)>::type;
|
||||
/**
|
||||
* @brief If/else statements regarding coroutine type matching.
|
||||
*
|
||||
* R is the currently executing coroutine that is about to get passed.
|
||||
* If corountine types do not match, the current one's type is stored.
|
||||
*/
|
||||
using R = typename boost::result_of<F(boost::asio::yield_context&)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
/**
|
||||
* @brief When the coroutine type is the same
|
||||
*
|
||||
* The spawn function enables programs to implement asynchronous logic
|
||||
* in a synchronous manner. res stores the instance of the currently
|
||||
* executing coroutine, yield. The different type is returned.
|
||||
*/
|
||||
R res;
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
res = f(yield);
|
||||
work.reset();
|
||||
});
|
||||
boost::asio::spawn(strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
res = f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
work.reset();
|
||||
});
|
||||
/*! @brief When the corutine type is different, run as normal. */
|
||||
boost::asio::spawn(strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reestablishes synchronous connection on timeout.
|
||||
*
|
||||
* @tparam Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra database handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(F&& f)
|
||||
@@ -79,57 +155,66 @@ synchronousAndRetryOnTimeout(F&& f)
|
||||
return retryOnTimeout([&]() { return synchronous(f); });
|
||||
}
|
||||
|
||||
/*! @brief Handles ledger and transaction backend data. */
|
||||
class BackendInterface
|
||||
{
|
||||
/**
|
||||
* @brief Shared mutexes and a cache for the interface.
|
||||
*
|
||||
* rngMutex is a shared mutex. Shared mutexes prevent shared data
|
||||
* from being accessed by multiple threads and has two levels of
|
||||
* access: shared and exclusive.
|
||||
*/
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
SimpleCache cache_;
|
||||
LedgerCache cache_;
|
||||
|
||||
// mutex used for open() and close()
|
||||
mutable std::mutex mutex_;
|
||||
/**
|
||||
* @brief Public read methods
|
||||
*
|
||||
* All of these reads methods can throw DatabaseTimeout. When writing
|
||||
* code in an RPC handler, this exception does not need to be caught:
|
||||
* when an RPC results in a timeout, an error is returned to the client.
|
||||
*/
|
||||
|
||||
public:
|
||||
BackendInterface(boost::json::object const& config)
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface()
|
||||
{
|
||||
}
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// *** public read methods ***
|
||||
// All of these reads methods can throw DatabaseTimeout. When writing code
|
||||
// in an RPC handler, this exception does not need to be caught: when an RPC
|
||||
// results in a timeout, an error is returned to the client
|
||||
public:
|
||||
// *** ledger methods
|
||||
//
|
||||
|
||||
SimpleCache const&
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
SimpleCache&
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/*! @brief Fetches a specific ledger by sequence number. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches a specific ledger by hash. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the latest ledger sequence. */
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the current ledger range while locking that process */
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const
|
||||
{
|
||||
@@ -137,10 +222,18 @@ public:
|
||||
return range;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences to be tracked.
|
||||
*
|
||||
* Function that continues updating the range sliding window or creates
|
||||
* a new sliding window once the maxSequence limit has been reached.
|
||||
*
|
||||
* @param newMax Unsigned 32-bit integer representing new max of range.
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax)
|
||||
{
|
||||
std::unique_lock lck(rngMtx_);
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
range = {newMax, newMax};
|
||||
@@ -148,20 +241,49 @@ public:
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the fees for specific transactions.
|
||||
*
|
||||
* @param seq Unsigned 32-bit integer reprsenting sequence.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<ripple::Fees>
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
|
||||
|
||||
// *** transaction methods
|
||||
/*! @brief TRANSACTION METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash Unsigned 256-bit integer representing hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes Unsigned integer value representing a hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account
|
||||
*
|
||||
* @param account A specific XRPL Account, speciifed by unique type
|
||||
* accountID.
|
||||
* @param limit Paging limit for how many transactions can be returned per
|
||||
* page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -170,23 +292,50 @@ public:
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Unsigned 32-bit integer for latest total
|
||||
* transactions.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<ripple::uint256>
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// *** NFT methods
|
||||
/*! @brief NFT methods */
|
||||
/**
|
||||
* @brief Fetches a specific NFT
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<NFT>
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param limit Paging limit as to how many transactions return per page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursorIn Represents transaction number and ledger sequence.
|
||||
* @param yield Currently executing coroutine is passed in as input.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
@@ -195,38 +344,68 @@ public:
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// *** state data methods
|
||||
/*! @brief STATE DATA METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific ledger object: vector of unsigned chars
|
||||
*
|
||||
* @param key Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<Blob>
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
|
||||
*
|
||||
* @param keys Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob>
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObject */
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObjects */
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers: vector of objects
|
||||
*
|
||||
* Objects are made of a key value, vector of unsigned chars (blob),
|
||||
* and a boolean detailing whether keys and blob match.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<LedgerObject>
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Fetches a page of ledger objects, ordered by key/index.
|
||||
// Used by ledger_data
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Paging limit as to how many transactions returned per page.
|
||||
* @param outOfOrder Boolean on whether ledger page is out of order.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return LedgerPage
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
@@ -235,63 +414,94 @@ public:
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
// Fetches the successor to key/index
|
||||
/*! @brief Fetches successor object from key/index. */
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/*! @brief Fetches successor key from key/index. */
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
// Fetches the successor to key/index
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchSuccessorKey. */
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return BookOffersPage
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Returns a ledger range
|
||||
*
|
||||
* Ledger range is a struct of min and max sequence numbers). Due to
|
||||
* the use of [&], which denotes a special case of a lambda expression
|
||||
* where values found outside the scope are passed by reference, wrt the
|
||||
* currently executing coroutine.
|
||||
*
|
||||
* @return std::optional<LedgerRange>
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([&](boost::asio::yield_context yield) {
|
||||
return hardFetchLedgerRange(yield);
|
||||
});
|
||||
return synchronous([&](boost::asio::yield_context yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerInfo Const on ledger information.
|
||||
* @param ledgerHeader r-value string representing ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) = 0;
|
||||
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* The key and blob are r-value references and do NOT have memory addresses.
|
||||
*
|
||||
* @param key String represented as an r-value.
|
||||
* @param seq Unsigned integer representing a sequence.
|
||||
* @param blob r-value vector of unsigned characters (blob).
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob);
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash r-value reference. No memory address.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param date Unsigned 32-bit integer.
|
||||
* @param transaction r-value reference. No memory address.
|
||||
* @param metadata r-value refrence. No memory address.
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
@@ -300,55 +510,70 @@ public:
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new transaction for a specific NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Passed in as an r-value reference.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param successor Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) = 0;
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
|
||||
// Tell the database we are about to begin writing data for a particular
|
||||
// ledger.
|
||||
/*! @brief Tells database we will write data for a specific ledger. */
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
// Tell the database we have finished writing all data for a particular
|
||||
// ledger
|
||||
// TODO change the return value to represent different results. committed,
|
||||
// write conflict, errored, successful but not committed
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* TODO: change the return value to represent different results:
|
||||
* Committed, write conflict, errored, successful but not committed
|
||||
*
|
||||
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
virtual bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
isTooBusy() const = 0;
|
||||
|
||||
// Open the database. Set up all of the necessary objects and
|
||||
// datastructures. After this call completes, the database is ready for
|
||||
// use.
|
||||
virtual void
|
||||
open(bool readOnly) = 0;
|
||||
|
||||
// Close the database, releasing any resources
|
||||
virtual void
|
||||
close(){};
|
||||
|
||||
// *** private helper methods
|
||||
private:
|
||||
/**
|
||||
* @brief Private helper method to write ledger object
|
||||
*
|
||||
* @param key r-value string representing key.
|
||||
* @param seq Unsigned 32-bit integer representing sequence.
|
||||
* @param blob r-value vector of unsigned chars.
|
||||
*/
|
||||
virtual void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob) = 0;
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
@@ -356,4 +581,3 @@ private:
|
||||
|
||||
} // namespace Backend
|
||||
using BackendInterface = Backend::BackendInterface;
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,38 @@
|
||||
#ifndef CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
#define CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <backend/Pg.h>
|
||||
|
||||
#include <backend/Types.h>
|
||||
|
||||
/// Struct used to keep track of what to write to
|
||||
/// account_transactions/account_tx tables
|
||||
/**
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables
|
||||
*/
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
@@ -18,10 +40,7 @@ struct AccountTransactionsData
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(
|
||||
ripple::TxMeta& meta,
|
||||
ripple::uint256 const& txHash,
|
||||
beast::Journal& j)
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -32,8 +51,11 @@ struct AccountTransactionsData
|
||||
AccountTransactionsData() = default;
|
||||
};
|
||||
|
||||
/// Represents a link from a tx to an NFT that was targeted/modified/created
|
||||
/// by it. Gets written to nf_token_transactions table and the like.
|
||||
/**
|
||||
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it
|
||||
*
|
||||
* Gets written to nf_token_transactions table and the like.
|
||||
*/
|
||||
struct NFTTransactionsData
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
@@ -41,20 +63,17 @@ struct NFTTransactionsData
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::uint256 txHash;
|
||||
|
||||
NFTTransactionsData(
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::TxMeta const& meta,
|
||||
ripple::uint256 const& txHash)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, txHash(txHash)
|
||||
NFTTransactionsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::uint256 const& txHash)
|
||||
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), txHash(txHash)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
|
||||
/// table and the like.
|
||||
/**
|
||||
* @brief Represents an NFT state at a particular ledger.
|
||||
*
|
||||
* Gets written to nf_tokens table and the like.
|
||||
*/
|
||||
struct NFTsData
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
@@ -64,16 +83,38 @@ struct NFTsData
|
||||
// final state of an NFT per ledger. Since we pull this from transactions
|
||||
// we keep track of which tx index created this so we can de-duplicate, as
|
||||
// it is possible for one ledger to have multiple txs that change the
|
||||
// state of the same NFT.
|
||||
std::uint32_t transactionIndex;
|
||||
// state of the same NFT. This field is not applicable when we are loading
|
||||
// initial NFT state via ledger objects, since we do not have to tiebreak
|
||||
// NFT state for a given ledger in that case.
|
||||
std::optional<std::uint32_t> transactionIndex;
|
||||
ripple::AccountID owner;
|
||||
bool isBurned;
|
||||
// We only set the uri if this is a mint tx, or if we are
|
||||
// loading initial state from NFTokenPage objects. In other words,
|
||||
// uri should only be set if the etl process believes this NFT hasn't
|
||||
// been seen before in our local database. We do this so that we don't
|
||||
// write to the the nf_token_uris table every
|
||||
// time the same NFT changes hands. We also can infer if there is a URI
|
||||
// that we need to write to the issuer_nf_tokens table.
|
||||
std::optional<ripple::Blob> uri;
|
||||
bool isBurned = false;
|
||||
|
||||
// This constructor is used when parsing an NFTokenMint tx.
|
||||
// Unfortunately because of the extreme edge case of being able to
|
||||
// re-mint an NFT with the same ID, we must explicitly record a null
|
||||
// URI. For this reason, we _always_ write this field as a result of
|
||||
// this tx.
|
||||
NFTsData(
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::TxMeta const& meta,
|
||||
bool isBurned)
|
||||
ripple::Blob const& uri,
|
||||
ripple::TxMeta const& meta)
|
||||
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
|
||||
// This constructor is used when parsing an NFTokenBurn or
|
||||
// NFTokenAcceptOffer tx
|
||||
NFTsData(ripple::uint256 const& tokenID, ripple::AccountID const& owner, ripple::TxMeta const& meta, bool isBurned)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -81,6 +122,21 @@ struct NFTsData
|
||||
, isBurned(isBurned)
|
||||
{
|
||||
}
|
||||
|
||||
// This constructor is used when parsing an NFTokenPage directly from
|
||||
// ledger state.
|
||||
// Unfortunately because of the extreme edge case of being able to
|
||||
// re-mint an NFT with the same ID, we must explicitly record a null
|
||||
// URI. For this reason, we _always_ write this field as a result of
|
||||
// this tx.
|
||||
NFTsData(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri)
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
@@ -119,8 +175,7 @@ isBookDir(T const& key, R const& object)
|
||||
if (!isDirNode(object))
|
||||
return false;
|
||||
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{object.data(), object.size()}, key};
|
||||
ripple::STLedgerEntry const sle{ripple::SerialIter{object.data(), object.size()}, key};
|
||||
return !sle[~ripple::sfOwner].has_value();
|
||||
}
|
||||
|
||||
@@ -147,30 +202,6 @@ getBookBase(T const& key)
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline ripple::LedgerInfo
|
||||
deserializeHeader(ripple::Slice data)
|
||||
{
|
||||
ripple::SerialIter sit(data.data(), data.size());
|
||||
|
||||
ripple::LedgerInfo info;
|
||||
|
||||
info.seq = sit.get32();
|
||||
info.drops = sit.get64();
|
||||
info.parentHash = sit.get256();
|
||||
info.txHash = sit.get256();
|
||||
info.accountHash = sit.get256();
|
||||
info.parentCloseTime =
|
||||
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
|
||||
info.closeTime =
|
||||
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
|
||||
info.closeTimeResolution = ripple::NetClock::duration{sit.get8()};
|
||||
info.closeFlags = sit.get8();
|
||||
|
||||
info.hash = sit.get256();
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& uint)
|
||||
{
|
||||
@@ -178,4 +209,3 @@ uint256ToString(ripple::uint256 const& uint)
|
||||
}
|
||||
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
#endif
|
||||
|
||||
@@ -1,24 +1,41 @@
|
||||
#include <backend/SimpleCache.h>
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/LedgerCache.h>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
uint32_t
|
||||
SimpleCache::latestLedgerSequence() const
|
||||
LedgerCache::latestLedgerSequence() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
return latestSeq_;
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::update(
|
||||
std::vector<LedgerObject> const& objs,
|
||||
uint32_t seq,
|
||||
bool isBackground)
|
||||
LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
{
|
||||
std::unique_lock lck{mtx_};
|
||||
std::scoped_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
{
|
||||
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
|
||||
@@ -48,7 +65,7 @@ SimpleCache::update(
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
@@ -64,7 +81,7 @@ SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
@@ -77,12 +94,13 @@ SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
--e;
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
objectReqCounter_++;
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
@@ -94,45 +112,49 @@ SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::setDisabled()
|
||||
LedgerCache::setDisabled()
|
||||
{
|
||||
disabled_ = true;
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::setFull()
|
||||
LedgerCache::setFull()
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
full_ = true;
|
||||
std::unique_lock lck{mtx_};
|
||||
std::scoped_lock lck{mtx_};
|
||||
deletes_.clear();
|
||||
}
|
||||
|
||||
bool
|
||||
SimpleCache::isFull() const
|
||||
LedgerCache::isFull() const
|
||||
{
|
||||
return full_;
|
||||
}
|
||||
|
||||
size_t
|
||||
SimpleCache::size() const
|
||||
LedgerCache::size() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
|
||||
float
|
||||
SimpleCache::getObjectHitRate() const
|
||||
LedgerCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
return 1;
|
||||
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||
}
|
||||
|
||||
float
|
||||
SimpleCache::getSuccessorHitRate() const
|
||||
LedgerCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
return 1;
|
||||
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
98
src/backend/LedgerCache.h
Normal file
98
src/backend/LedgerCache.h
Normal file
@@ -0,0 +1,98 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
class LedgerCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t objectHitCounter_ = 0;
|
||||
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t successorHitCounter_ = 0;
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
// Update the cache with new ledger objects set isBackground to true when writing old data from a background thread
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
void
|
||||
setFull();
|
||||
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
// whether the cache has all data for the most recent ledger
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
1788
src/backend/Pg.cpp
1788
src/backend/Pg.cpp
File diff suppressed because it is too large
Load Diff
564
src/backend/Pg.h
564
src/backend/Pg.h
@@ -1,564 +0,0 @@
|
||||
#ifndef RIPPLE_CORE_PG_H_INCLUDED
|
||||
#define RIPPLE_CORE_PG_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/icl/closed_interval.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <libpq-fe.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// These postgres structs must be freed only by the postgres API.
|
||||
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
|
||||
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
|
||||
using asio_socket_type = std::unique_ptr<
|
||||
boost::asio::ip::tcp::socket,
|
||||
void (*)(boost::asio::ip::tcp::socket*)>;
|
||||
|
||||
/** first: command
|
||||
* second: parameter values
|
||||
*
|
||||
* The 2nd member takes an optional string to
|
||||
* distinguish between NULL parameters and empty strings. An empty
|
||||
* item corresponds to a NULL parameter.
|
||||
*
|
||||
* Postgres reads each parameter as a c-string, regardless of actual type.
|
||||
* Binary types (bytea) need to be converted to hex and prepended with
|
||||
* \x ("\\x").
|
||||
*/
|
||||
using pg_params =
|
||||
std::pair<char const*, std::vector<std::optional<std::string>>>;
|
||||
|
||||
/** Parameter values for pg API. */
|
||||
using pg_formatted_params = std::vector<char const*>;
|
||||
|
||||
/** Parameters for managing postgres connections. */
|
||||
struct PgConfig
|
||||
{
|
||||
/** Maximum connections allowed to db. */
|
||||
std::size_t max_connections{1000};
|
||||
/** Close idle connections past this duration. */
|
||||
std::chrono::seconds timeout{600};
|
||||
|
||||
/** Index of DB connection parameter names. */
|
||||
std::vector<char const*> keywordsIdx;
|
||||
/** DB connection parameter names. */
|
||||
std::vector<std::string> keywords;
|
||||
/** Index of DB connection parameter values. */
|
||||
std::vector<char const*> valuesIdx;
|
||||
/** DB connection parameter values. */
|
||||
std::vector<std::string> values;
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class that operates on postgres query results.
|
||||
*
|
||||
* The functions that return results do not check first whether the
|
||||
* expected results are actually there. Therefore, the caller first needs
|
||||
* to check whether or not a valid response was returned using the operator
|
||||
* bool() overload. If number of tuples or fields are unknown, then check
|
||||
* those. Each result field should be checked for null before attempting
|
||||
* to return results. Finally, the caller must know the type of the field
|
||||
* before calling the corresponding function to return a field. Postgres
|
||||
* internally stores each result field as null-terminated strings.
|
||||
*/
|
||||
class PgResult
|
||||
{
|
||||
// The result object must be freed using the libpq API PQclear() call.
|
||||
pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }};
|
||||
std::optional<std::pair<ExecStatusType, std::string>> error_;
|
||||
|
||||
public:
|
||||
/** Constructor for when the process is stopping.
|
||||
*
|
||||
*/
|
||||
PgResult()
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for successful query results.
|
||||
*
|
||||
* @param result Query result.
|
||||
*/
|
||||
explicit PgResult(pg_result_type&& result) : result_(std::move(result))
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for failed query results.
|
||||
*
|
||||
* @param result Query result that contains error information.
|
||||
* @param conn Postgres connection that contains error information.
|
||||
*/
|
||||
PgResult(PGresult* result, PGconn* conn)
|
||||
: error_({PQresultStatus(result), PQerrorMessage(conn)})
|
||||
{
|
||||
}
|
||||
|
||||
/** Return field as a null-terminated string pointer.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
char const*
|
||||
c_str(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetvalue(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
std::vector<unsigned char>
|
||||
asUnHexedBlob(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
std::string_view view{c_str(ntuple, nfield) + 2};
|
||||
auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend());
|
||||
if (res)
|
||||
return *res;
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
asUInt256(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
ripple::uint256 val;
|
||||
if (!val.parseHex(c_str(ntuple, nfield) + 2))
|
||||
throw std::runtime_error("Pg - failed to parse hex into uint256");
|
||||
return val;
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' INT type (32 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int32_t
|
||||
asInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int32_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' BIGINT type (64 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int64_t
|
||||
asBigInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int64_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Returns whether the field is NULL or not.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Whether field is NULL.
|
||||
*/
|
||||
bool
|
||||
isNull(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetisnull(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
/** Check whether a valid response occurred.
|
||||
*
|
||||
* @return Whether or not the query returned a valid response.
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
return result_ != nullptr;
|
||||
}
|
||||
|
||||
/** Message describing the query results suitable for diagnostics.
|
||||
*
|
||||
* If error, then the postgres error type and message are returned.
|
||||
* Otherwise, "ok"
|
||||
*
|
||||
* @return Query result message.
|
||||
*/
|
||||
std::string
|
||||
msg() const;
|
||||
|
||||
/** Get number of rows in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result rows.
|
||||
*/
|
||||
int
|
||||
ntuples() const
|
||||
{
|
||||
return PQntuples(result_.get());
|
||||
}
|
||||
|
||||
/** Get number of fields in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result fields.
|
||||
*/
|
||||
int
|
||||
nfields() const
|
||||
{
|
||||
return PQnfields(result_.get());
|
||||
}
|
||||
|
||||
/** Return result status of the command.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
ExecStatusType
|
||||
status() const
|
||||
{
|
||||
return PQresultStatus(result_.get());
|
||||
}
|
||||
};
|
||||
|
||||
/* Class that contains and operates upon a postgres connection. */
|
||||
class Pg
|
||||
{
|
||||
friend class PgPool;
|
||||
friend class PgQuery;
|
||||
|
||||
PgConfig const& config_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
|
||||
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
|
||||
|
||||
// The connection object must be freed using the libpq API PQfinish() call.
|
||||
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
|
||||
|
||||
inline asio_socket_type
|
||||
getSocket(boost::asio::yield_context& strand);
|
||||
|
||||
inline PgResult
|
||||
waitForStatus(boost::asio::yield_context& yield, ExecStatusType expected);
|
||||
|
||||
inline void
|
||||
flush(boost::asio::yield_context& yield);
|
||||
|
||||
/** Clear results from the connection.
|
||||
*
|
||||
* Results from previous commands must be cleared before new commands
|
||||
* can be processed. This function should be called on connections
|
||||
* that weren't processed completely before being reused, such as
|
||||
* when being checked-in.
|
||||
*
|
||||
* @return whether or not connection still exists.
|
||||
*/
|
||||
bool
|
||||
clear();
|
||||
|
||||
/** Connect to postgres.
|
||||
*
|
||||
* Idempotently connects to postgres by first checking whether an
|
||||
* existing connection is already present. If connection is not present
|
||||
* or in an errored state, reconnects to the database.
|
||||
*/
|
||||
void
|
||||
connect(boost::asio::yield_context& yield);
|
||||
|
||||
/** Disconnect from postgres. */
|
||||
void
|
||||
disconnect()
|
||||
{
|
||||
conn_.reset();
|
||||
socket_.reset();
|
||||
}
|
||||
|
||||
/** Execute postgres query.
|
||||
*
|
||||
* If parameters are included, then the command should contain only a
|
||||
* single SQL statement. If no parameters, then multiple SQL statements
|
||||
* delimited by semi-colons can be processed. The response is from
|
||||
* the last command executed.
|
||||
*
|
||||
* @param command postgres API command string.
|
||||
* @param nParams postgres API number of parameters.
|
||||
* @param values postgres API array of parameter.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(
|
||||
char const* command,
|
||||
std::size_t const nParams,
|
||||
char const* const* values,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
/** Execute postgres query with no parameters.
|
||||
*
|
||||
* @param command Query string.
|
||||
* @return Query result object;
|
||||
*/
|
||||
PgResult
|
||||
query(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return query(command, 0, nullptr, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command and parameter values.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(pg_params const& dbParams, boost::asio::yield_context& yield);
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
public:
|
||||
/** Constructor for Pg class.
|
||||
*
|
||||
* @param config Config parameters.
|
||||
* @param j Logger object.
|
||||
*/
|
||||
Pg(PgConfig const& config, boost::asio::io_context& ctx)
|
||||
: config_(config), strand_(ctx)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Database connection pool.
|
||||
*
|
||||
* Allow re-use of postgres connections. Postgres connections are created
|
||||
* as needed until configurable limit is reached. After use, each connection
|
||||
* is placed in a container ordered by time of use. Each request for
|
||||
* a connection grabs the most recently used connection from the container.
|
||||
* If none are available, a new connection is used (up to configured limit).
|
||||
* Idle connections are destroyed periodically after configurable
|
||||
* timeout duration.
|
||||
*
|
||||
* This should be stored as a shared pointer so PgQuery objects can safely
|
||||
* outlive it.
|
||||
*/
|
||||
class PgPool
|
||||
{
|
||||
friend class PgQuery;
|
||||
|
||||
using clock_type = std::chrono::steady_clock;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
PgConfig config_;
|
||||
std::mutex mutex_;
|
||||
std::condition_variable cond_;
|
||||
std::size_t connections_{};
|
||||
bool stop_{false};
|
||||
|
||||
/** Idle database connections ordered by timestamp to allow timing out. */
|
||||
std::multimap<std::chrono::time_point<clock_type>, std::unique_ptr<Pg>>
|
||||
idle_;
|
||||
|
||||
/** Get a postgres connection object.
|
||||
*
|
||||
* Return the most recent idle connection in the pool, if available.
|
||||
* Otherwise, return a new connection unless we're at the threshold.
|
||||
* If so, then wait until a connection becomes available.
|
||||
*
|
||||
* @return Postgres object.
|
||||
*/
|
||||
std::unique_ptr<Pg>
|
||||
checkout();
|
||||
|
||||
/** Return a postgres object to the pool for reuse.
|
||||
*
|
||||
* If connection is healthy, place in pool for reuse. After calling this,
|
||||
* the container no longer have a connection unless checkout() is called.
|
||||
*
|
||||
* @param pg Pg object.
|
||||
*/
|
||||
void
|
||||
checkin(std::unique_ptr<Pg>& pg);
|
||||
|
||||
public:
|
||||
/** Connection pool constructor.
|
||||
*
|
||||
* @param pgConfig Postgres config.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent.
|
||||
*/
|
||||
PgPool(boost::asio::io_context& ioc, boost::json::object const& config);
|
||||
|
||||
~PgPool()
|
||||
{
|
||||
onStop();
|
||||
}
|
||||
|
||||
PgConfig&
|
||||
config()
|
||||
{
|
||||
return config_;
|
||||
}
|
||||
|
||||
/** Initiate idle connection timer.
|
||||
*
|
||||
* The PgPool object needs to be fully constructed to support asynchronous
|
||||
* operations.
|
||||
*/
|
||||
void
|
||||
setup();
|
||||
|
||||
/** Prepare for process shutdown. (Stoppable) */
|
||||
void
|
||||
onStop();
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class to query postgres.
|
||||
*
|
||||
* This class should be used by functions outside of this
|
||||
* compilation unit for querying postgres. It automatically acquires and
|
||||
* relinquishes a database connection to handle each query.
|
||||
*/
|
||||
class PgQuery
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<PgPool> pool_;
|
||||
std::unique_ptr<Pg> pg_;
|
||||
|
||||
public:
|
||||
PgQuery() = delete;
|
||||
|
||||
PgQuery(std::shared_ptr<PgPool> const& pool)
|
||||
: pool_(pool), pg_(pool->checkout())
|
||||
{
|
||||
}
|
||||
|
||||
~PgQuery()
|
||||
{
|
||||
pool_->checkin(pg_);
|
||||
}
|
||||
|
||||
// TODO. add sendQuery and getResult, for sending the query and getting the
|
||||
// result asynchronously. This could be useful for sending a bunch of
|
||||
// requests concurrently
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command with parameters.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(pg_params const& dbParams, boost::asio::yield_context& yield)
|
||||
{
|
||||
if (!pg_) // It means we're stopping. Return empty result.
|
||||
return PgResult();
|
||||
return pg_->query(dbParams, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with only command statement.
|
||||
*
|
||||
* @param command Command statement.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return operator()(pg_params{command, {}}, yield);
|
||||
}
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield)
|
||||
{
|
||||
pg_->bulkInsert(table, records, yield);
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Create Postgres connection pool manager.
|
||||
*
|
||||
* @param pgConfig Configuration for Postgres.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent object.
|
||||
* @return Postgres connection pool manager
|
||||
*/
|
||||
std::shared_ptr<PgPool>
|
||||
make_PgPool(boost::asio::io_context& ioc, boost::json::object const& pgConfig);
|
||||
|
||||
/** Initialize the Postgres schema.
|
||||
*
|
||||
* This function ensures that the database is running the latest version
|
||||
* of the schema.
|
||||
*
|
||||
* @param pool Postgres connection pool manager.
|
||||
*/
|
||||
void
|
||||
initSchema(std::shared_ptr<PgPool> const& pool);
|
||||
void
|
||||
initAccountTx(std::shared_ptr<PgPool> const& pool);
|
||||
|
||||
// Load the ledger info for the specified ledger/s from the database
|
||||
// @param whichLedger specifies the ledger to load via ledger sequence, ledger
|
||||
// hash or std::monostate (which loads the most recent)
|
||||
// @return vector of LedgerInfos
|
||||
std::optional<ripple::LedgerInfo>
|
||||
getLedger(
|
||||
std::variant<std::monostate, ripple::uint256, std::uint32_t> const&
|
||||
whichLedger,
|
||||
std::shared_ptr<PgPool>& pgPool);
|
||||
|
||||
#endif // RIPPLE_CORE_PG_H_INCLUDED
|
||||
@@ -1,895 +0,0 @@
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <backend/PostgresBackend.h>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
// Type alias for async completion handlers
|
||||
using completion_token = boost::asio::yield_context;
|
||||
using function_type = void(boost::system::error_code);
|
||||
using result_type = boost::asio::async_result<completion_token, function_type>;
|
||||
using handler_type = typename result_type::completion_handler_type;
|
||||
|
||||
struct HandlerWrapper
|
||||
{
|
||||
handler_type handler;
|
||||
|
||||
HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
PostgresBackend::PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config)
|
||||
: BackendInterface(config)
|
||||
, pgPool_(make_PgPool(ioc, config))
|
||||
, writeConnection_(pgPool_)
|
||||
{
|
||||
if (config.contains("write_interval"))
|
||||
{
|
||||
writeInterval_ = config.at("write_interval").as_int64();
|
||||
}
|
||||
}
|
||||
void
|
||||
PostgresBackend::writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
auto cmd = boost::format(
|
||||
R"(INSERT INTO ledgers
|
||||
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
|
||||
|
||||
auto ledgerInsert = boost::str(
|
||||
cmd % ledgerInfo.seq % ripple::strHex(ledgerInfo.hash) %
|
||||
ripple::strHex(ledgerInfo.parentHash) % ledgerInfo.drops.drops() %
|
||||
ledgerInfo.closeTime.time_since_epoch().count() %
|
||||
ledgerInfo.parentCloseTime.time_since_epoch().count() %
|
||||
ledgerInfo.closeTimeResolution.count() % ledgerInfo.closeFlags %
|
||||
ripple::strHex(ledgerInfo.accountHash) %
|
||||
ripple::strHex(ledgerInfo.txHash));
|
||||
|
||||
auto res = writeConnection_(ledgerInsert.data(), yield);
|
||||
abortWrite_ = !res;
|
||||
inProcessLedger = ledgerInfo.seq;
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
PgQuery pg(pgPool_);
|
||||
for (auto const& record : data)
|
||||
{
|
||||
for (auto const& a : record.accounts)
|
||||
{
|
||||
std::string acct = ripple::strHex(a);
|
||||
accountTxBuffer_ << "\\\\x" << acct << '\t'
|
||||
<< std::to_string(record.ledgerSequence) << '\t'
|
||||
<< std::to_string(record.transactionIndex) << '\t'
|
||||
<< "\\\\x" << ripple::strHex(record.txHash)
|
||||
<< '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeNFTTransactions(std::vector<NFTTransactionsData>&& data)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (abortWrite_)
|
||||
return;
|
||||
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(blob) << '\n';
|
||||
numRowsInObjectsBuffer_++;
|
||||
// If the buffer gets too large, the insert fails. Not sure why. So we
|
||||
// insert after 1 million records
|
||||
if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num objects = "
|
||||
<< numRowsInObjectsBuffer_;
|
||||
writeConnection_.bulkInsert("objects", objectsBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
objectsBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (range)
|
||||
{
|
||||
if (successors_.count(key) > 0)
|
||||
return;
|
||||
successors_.insert(key);
|
||||
}
|
||||
successorBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(successor) << '\n';
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << ripple::strHex(key) << " - " << std::to_string(seq);
|
||||
numRowsInSuccessorBuffer_++;
|
||||
if (numRowsInSuccessorBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num successors = "
|
||||
<< numRowsInSuccessorBuffer_;
|
||||
writeConnection_.bulkInsert(
|
||||
"successor", successorBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
successorBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
|
||||
<< std::to_string(seq) << '\t' << std::to_string(date)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(transaction)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeNFTs(std::vector<NFTsData>&& data)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
|
||||
{
|
||||
if (!res)
|
||||
{
|
||||
auto msg = res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg;
|
||||
if (msg.find("statement timeout"))
|
||||
throw DatabaseTimeout();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
if (res.status() != PGRES_TUPLES_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << " : Postgres response should have been "
|
||||
"PGRES_TUPLES_OK but instead was "
|
||||
<< res.status() << " - msg = " << res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg.str();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " Postgres result msg : " << res.msg();
|
||||
if (res.isNull() || res.ntuples() == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
else if (res.ntuples() > 0)
|
||||
{
|
||||
if (res.nfields() != numFieldsExpected)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Wrong number of fields in Postgres "
|
||||
"response. Expected "
|
||||
<< numFieldsExpected << ", but got " << res.nfields();
|
||||
throw std::runtime_error(msg.str());
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
return res.ntuples();
|
||||
}
|
||||
|
||||
ripple::LedgerInfo
|
||||
parseLedgerInfo(PgResult const& res)
|
||||
{
|
||||
std::int64_t ledgerSeq = res.asBigInt(0, 0);
|
||||
ripple::uint256 hash = res.asUInt256(0, 1);
|
||||
ripple::uint256 prevHash = res.asUInt256(0, 2);
|
||||
std::int64_t totalCoins = res.asBigInt(0, 3);
|
||||
std::int64_t closeTime = res.asBigInt(0, 4);
|
||||
std::int64_t parentCloseTime = res.asBigInt(0, 5);
|
||||
std::int64_t closeTimeRes = res.asBigInt(0, 6);
|
||||
std::int64_t closeFlags = res.asBigInt(0, 7);
|
||||
ripple::uint256 accountHash = res.asUInt256(0, 8);
|
||||
ripple::uint256 txHash = res.asUInt256(0, 9);
|
||||
|
||||
using time_point = ripple::NetClock::time_point;
|
||||
using duration = ripple::NetClock::duration;
|
||||
|
||||
ripple::LedgerInfo info;
|
||||
info.seq = ledgerSeq;
|
||||
info.hash = hash;
|
||||
info.parentHash = prevHash;
|
||||
info.drops = totalCoins;
|
||||
info.closeTime = time_point{duration{closeTime}};
|
||||
info.parentCloseTime = time_point{duration{parentCloseTime}};
|
||||
info.closeFlags = closeFlags;
|
||||
info.closeTimeResolution = duration{closeTimeRes};
|
||||
info.accountHash = accountHash;
|
||||
info.txHash = txHash;
|
||||
info.validated = true;
|
||||
return info;
|
||||
}
|
||||
std::optional<std::uint32_t>
|
||||
PostgresBackend::fetchLatestLedgerSequence(
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
auto const query =
|
||||
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(query, yield); checkResult(res, 1))
|
||||
return res.asBigInt(0, 0);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_seq = "
|
||||
<< std::to_string(sequence);
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_hash = \'\\x"
|
||||
<< ripple::to_string(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
PostgresBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()", yield);
|
||||
if (!range)
|
||||
return {};
|
||||
|
||||
std::string res{range.c_str()};
|
||||
BOOST_LOG_TRIVIAL(debug) << "range is = " << res;
|
||||
try
|
||||
{
|
||||
size_t minVal = 0;
|
||||
size_t maxVal = 0;
|
||||
if (res == "empty" || res == "error" || res.empty())
|
||||
return {};
|
||||
else if (size_t delim = res.find('-'); delim != std::string::npos)
|
||||
{
|
||||
minVal = std::stol(res.substr(0, delim));
|
||||
maxVal = std::stol(res.substr(delim + 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
minVal = maxVal = std::stol(res);
|
||||
}
|
||||
return LedgerRange{minVal, maxVal};
|
||||
}
|
||||
catch (std::exception&)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " : "
|
||||
<< "Error parsing result of getCompleteLedgers()";
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
PostgresBackend::doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM objects WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto blob = res.asUnHexedBlob(0, 0);
|
||||
if (blob.size())
|
||||
return blob;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM transactions "
|
||||
"WHERE hash = "
|
||||
<< "\'\\x" << ripple::strHex(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 4))
|
||||
{
|
||||
return {
|
||||
{res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)}};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction, metadata, ledger_seq,date FROM transactions "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 4))
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
txns.push_back(
|
||||
{res.asUnHexedBlob(i, 0),
|
||||
res.asUnHexedBlob(i, 1),
|
||||
res.asBigInt(i, 2),
|
||||
res.asBigInt(i, 3)});
|
||||
}
|
||||
return txns;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
std::vector<ripple::uint256>
|
||||
PostgresBackend::fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT hash FROM transactions WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 1))
|
||||
{
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
hashes.push_back(res.asUInt256(i, 0));
|
||||
}
|
||||
return hashes;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<NFT>
|
||||
PostgresBackend::fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
PostgresBackend::doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT next FROM successor WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(ledgerSequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto next = res.asUInt256(0, 0);
|
||||
if (next == lastKey)
|
||||
return {};
|
||||
return next;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!hashes.size())
|
||||
return {};
|
||||
|
||||
std::vector<TransactionAndMetadata> results;
|
||||
results.resize(hashes.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
std::atomic_uint numRemaining = hashes.size();
|
||||
std::atomic_bool errored = false;
|
||||
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
boost::asio::spawn(
|
||||
get_associated_executor(yield),
|
||||
[this, &hash, &results, hw, &numRemaining, &errored, i](
|
||||
boost::asio::yield_context yield) {
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " getting txn = " << i;
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM "
|
||||
"transactions "
|
||||
"WHERE HASH = \'\\x"
|
||||
<< ripple::strHex(hash) << "\'";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 4))
|
||||
{
|
||||
results[i] = {
|
||||
res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)};
|
||||
}
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(hashes.size())
|
||||
<< " transactions asynchronously. took "
|
||||
<< std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
PostgresBackend::doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!keys.size())
|
||||
return {};
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
std::atomic_uint numRemaining = keys.size();
|
||||
std::atomic_bool errored = false;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
auto const& key = keys[i];
|
||||
boost::asio::spawn(
|
||||
boost::asio::get_associated_executor(yield),
|
||||
[this, &key, &results, &numRemaining, &errored, hw, i, sequence](
|
||||
boost::asio::yield_context yield) {
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM "
|
||||
"objects "
|
||||
"WHERE key = \'\\x"
|
||||
<< ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 1))
|
||||
results[i] = res.asUnHexedBlob();
|
||||
}
|
||||
catch (DatabaseTimeout const& ex)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(keys.size())
|
||||
<< " objects asynchronously. ms = " << std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
PostgresBackend::fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT key,object FROM objects "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 2))
|
||||
{
|
||||
std::vector<LedgerObject> objects;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
objects.push_back({res.asUInt256(i, 0), res.asUnHexedBlob(i, 1)});
|
||||
}
|
||||
return objects;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// TODO this implementation and fetchAccountTransactions should be
|
||||
// generalized
|
||||
TransactionsAndCursor
|
||||
PostgresBackend::fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
TransactionsAndCursor
|
||||
PostgresBackend::fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
pg_params dbParams;
|
||||
|
||||
char const*& command = dbParams.first;
|
||||
std::vector<std::optional<std::string>>& values = dbParams.second;
|
||||
command =
|
||||
"SELECT account_tx($1::bytea, $2::bigint, $3::bool, "
|
||||
"$4::bigint, $5::bigint)";
|
||||
values.resize(5);
|
||||
values[0] = "\\x" + strHex(account);
|
||||
|
||||
values[1] = std::to_string(limit);
|
||||
|
||||
values[2] = std::to_string(forward);
|
||||
|
||||
if (cursor)
|
||||
{
|
||||
values[3] = std::to_string(cursor->ledgerSequence);
|
||||
values[4] = std::to_string(cursor->transactionIndex);
|
||||
}
|
||||
for (size_t i = 0; i < values.size(); ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << "value " << std::to_string(i) << " = "
|
||||
<< (values[i] ? values[i].value() : "null");
|
||||
}
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto res = pgQuery(dbParams, yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
auto duration = ((end - start).count()) / 1000000000.0;
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " : executed stored_procedure in "
|
||||
<< std::to_string(duration)
|
||||
<< " num records = " << std::to_string(checkResult(res, 1));
|
||||
|
||||
checkResult(res, 1);
|
||||
|
||||
char const* resultStr = res.c_str();
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||
<< "postgres result = " << resultStr
|
||||
<< " : account = " << strHex(account);
|
||||
|
||||
boost::json::value raw = boost::json::parse(resultStr);
|
||||
boost::json::object responseObj = raw.as_object();
|
||||
BOOST_LOG_TRIVIAL(debug) << " parsed = " << responseObj;
|
||||
if (responseObj.contains("transactions"))
|
||||
{
|
||||
auto txns = responseObj.at("transactions").as_array();
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (auto& hashHex : txns)
|
||||
{
|
||||
ripple::uint256 hash;
|
||||
if (hash.parseHex(hashHex.at("hash").as_string().c_str() + 2))
|
||||
hashes.push_back(hash);
|
||||
}
|
||||
if (responseObj.contains("cursor"))
|
||||
{
|
||||
return {
|
||||
fetchTransactions(hashes, yield),
|
||||
{{responseObj.at("cursor").at("ledger_sequence").as_int64(),
|
||||
responseObj.at("cursor")
|
||||
.at("transaction_index")
|
||||
.as_int64()}}};
|
||||
}
|
||||
return {fetchTransactions(hashes, yield), {}};
|
||||
}
|
||||
return {{}, {}};
|
||||
} // namespace Backend
|
||||
|
||||
void
|
||||
PostgresBackend::open(bool readOnly)
|
||||
{
|
||||
initSchema(pgPool_);
|
||||
initAccountTx(pgPool_);
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::close()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::startWrites() const
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
abortWrite_ = false;
|
||||
auto res = writeConnection_("BEGIN", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error creating transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doFinishWrites()
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (!abortWrite_)
|
||||
{
|
||||
std::string txStr = transactionsBuffer_.str();
|
||||
writeConnection_.bulkInsert("transactions", txStr, yield);
|
||||
writeConnection_.bulkInsert(
|
||||
"account_transactions", accountTxBuffer_.str(), yield);
|
||||
std::string objectsStr = objectsBuffer_.str();
|
||||
if (objectsStr.size())
|
||||
writeConnection_.bulkInsert("objects", objectsStr, yield);
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " objects size = " << objectsStr.size()
|
||||
<< " txns size = " << txStr.size();
|
||||
std::string successorStr = successorBuffer_.str();
|
||||
if (successorStr.size())
|
||||
writeConnection_.bulkInsert("successor", successorStr, yield);
|
||||
if (!range)
|
||||
{
|
||||
std::stringstream indexCreate;
|
||||
indexCreate
|
||||
<< "CREATE INDEX diff ON objects USING hash(ledger_seq) "
|
||||
"WHERE NOT "
|
||||
"ledger_seq = "
|
||||
<< std::to_string(inProcessLedger);
|
||||
writeConnection_(indexCreate.str().data(), yield);
|
||||
}
|
||||
}
|
||||
auto res = writeConnection_("COMMIT", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error committing transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
transactionsBuffer_.str("");
|
||||
transactionsBuffer_.clear();
|
||||
objectsBuffer_.str("");
|
||||
objectsBuffer_.clear();
|
||||
successorBuffer_.str("");
|
||||
successorBuffer_.clear();
|
||||
successors_.clear();
|
||||
accountTxBuffer_.str("");
|
||||
accountTxBuffer_.clear();
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
});
|
||||
|
||||
return !abortWrite_;
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return false;
|
||||
std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
|
||||
if (minLedger <= rng->minSequence)
|
||||
return false;
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery("SET statement_timeout TO 0", yield);
|
||||
std::optional<ripple::uint256> cursor;
|
||||
while (true)
|
||||
{
|
||||
auto [objects, curCursor] = retryOnTimeout([&]() {
|
||||
return fetchLedgerPage(cursor, minLedger, 256, false, yield);
|
||||
});
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
||||
std::stringstream objectsBuffer;
|
||||
|
||||
for (auto& obj : objects)
|
||||
{
|
||||
objectsBuffer << "\\\\x" << ripple::strHex(obj.key) << '\t'
|
||||
<< std::to_string(minLedger) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(obj.blob) << '\n';
|
||||
}
|
||||
pgQuery.bulkInsert("objects", objectsBuffer.str(), yield);
|
||||
cursor = curCursor;
|
||||
if (!cursor)
|
||||
break;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " finished inserting into objects";
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM ledgers WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from ledgers table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM keys WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from keys table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM books WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from books table");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
@@ -1,165 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#include <boost/json.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
|
||||
namespace Backend {
|
||||
class PostgresBackend : public BackendInterface
|
||||
{
|
||||
private:
|
||||
mutable size_t numRowsInObjectsBuffer_ = 0;
|
||||
mutable std::stringstream objectsBuffer_;
|
||||
mutable size_t numRowsInSuccessorBuffer_ = 0;
|
||||
mutable std::stringstream successorBuffer_;
|
||||
mutable std::stringstream transactionsBuffer_;
|
||||
mutable std::stringstream accountTxBuffer_;
|
||||
std::shared_ptr<PgPool> pgPool_;
|
||||
mutable PgQuery writeConnection_;
|
||||
mutable bool abortWrite_ = false;
|
||||
std::uint32_t writeInterval_ = 1000000;
|
||||
std::uint32_t inProcessLedger = 0;
|
||||
mutable std::unordered_set<std::string> successors_;
|
||||
|
||||
const char* const set_timeout = "SET statement_timeout TO 10000";
|
||||
|
||||
public:
|
||||
PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config);
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) override;
|
||||
|
||||
void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob) override;
|
||||
|
||||
void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) override;
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override;
|
||||
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData>&& data) override;
|
||||
|
||||
void
|
||||
writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
open(bool readOnly) override;
|
||||
|
||||
void
|
||||
close() override;
|
||||
|
||||
void
|
||||
startWrites() const override;
|
||||
|
||||
bool
|
||||
doFinishWrites() override;
|
||||
|
||||
bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
@@ -1,6 +1,6 @@
|
||||
# Clio Backend
|
||||
## Background
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra is the only supported database that is production-ready. However, support for more databases like PostgreSQL and DynamoDB may be added in future versions. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
|
||||
## Data Model
|
||||
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
|
||||
@@ -129,4 +129,92 @@ In each new ledger version with sequence `n`, a ledger object `v` can either be
|
||||
2. If `v` is...
|
||||
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
|
||||
2. Being **modified**, do nothing.
|
||||
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
|
||||
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
|
||||
|
||||
### NFT data model
|
||||
In `rippled` NFTs are stored in NFTokenPage ledger objects. This object is
|
||||
implemented to save ledger space and has the property that it gives us O(1)
|
||||
lookup time for an NFT, assuming we know who owns the NFT at a particular
|
||||
ledger. However, if we do not know who owns the NFT at a specific ledger
|
||||
height we have no alternative in rippled other than scanning the entire
|
||||
ledger. Because of this tradeoff, clio implements a special NFT indexing data
|
||||
structure that allows clio users to query NFTs quickly, while keeping
|
||||
rippled's space-saving optimizations.
|
||||
|
||||
#### `nf_tokens`
|
||||
```
|
||||
CREATE TABLE clio.nf_tokens (
|
||||
token_id blob, # The NFT's ID
|
||||
sequence bigint, # Sequence of ledger version
|
||||
owner blob, # The account ID of the owner of this NFT at this ledger
|
||||
is_burned boolean, # True if token was burned in this ledger
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table indexes NFT IDs with their owner at a given ledger. So
|
||||
```
|
||||
SELECT * FROM nf_tokens
|
||||
WHERE token_id = N AND seq <= Y
|
||||
ORDER BY seq DESC LIMIT 1;
|
||||
```
|
||||
will give you the owner of token N at ledger Y and whether it was burned. If
|
||||
the token is burned, the owner field indicates the account that owned the
|
||||
token at the time it was burned; it does not indicate the person who burned
|
||||
the token, necessarily. If you need to determine who burned the token you can
|
||||
use the `nft_history` API, which will give you the NFTokenBurn transaction
|
||||
that burned this token, along with the account that submitted that
|
||||
transaction.
|
||||
|
||||
#### `issuer_nf_tokens_v2`
|
||||
```
|
||||
CREATE TABLE clio.issuer_nf_tokens_v2 (
|
||||
issuer blob, # The NFT issuer's account ID
|
||||
taxon bigint, # The NFT's token taxon
|
||||
token_id blob, # The NFT's ID
|
||||
PRIMARY KEY (issuer, taxon, token_id)
|
||||
) WITH CLUSTERING ORDER BY (taxon ASC, token_id ASC) ...
|
||||
```
|
||||
This table indexes token IDs against their issuer and issuer/taxon
|
||||
combination. This is useful for determining all the NFTs a specific account
|
||||
issued, or all the NFTs a specific account issued with a specific taxon. It is
|
||||
not useful to know all the NFTs with a given taxon while excluding issuer, since the
|
||||
meaning of a taxon is left to an issuer.
|
||||
|
||||
#### `nf_token_uris`
|
||||
```
|
||||
CREATE TABLE clio.nf_token_uris (
|
||||
token_id blob, # The NFT's ID
|
||||
sequence bigint, # Sequence of ledger version
|
||||
uri blob, # The NFT's URI
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table is used to store an NFT's URI. Without storing this here, we would
|
||||
need to traverse the NFT owner's entire set of NFTs to find the URI, again due
|
||||
to the way that NFTs are stored in rippled. Furthermore, instead of storing
|
||||
this in the `nf_tokens` table, we store it here to save space. A given NFT
|
||||
will have only one entry in this table (see caveat below), written to this
|
||||
table as soon as clio sees the NFTokenMint transaction, or when clio loads an
|
||||
NFTokenPage from the initial ledger it downloaded. However, the `nf_tokens`
|
||||
table is written to every time an NFT changes ownership, or if it is burned.
|
||||
|
||||
Given this, why do we have to store the sequence? Unfortunately there is an
|
||||
extreme edge case where a given NFT ID can be burned, and then re-minted with
|
||||
a different URI. This is extremely unlikely, and might be fixed in a future
|
||||
version to rippled, but just in case we can handle that edge case by allowing
|
||||
a given NFT ID to have a new URI assigned in this case, without removing the
|
||||
prior URI.
|
||||
|
||||
#### `nf_token_transactions`
|
||||
```
|
||||
CREATE TABLE clio.nf_token_transactions (
|
||||
token_id blob, # The NFT's ID
|
||||
seq_idx tuple<bigint, bigint>, # Tuple of (ledger_index, transaction_index)
|
||||
hash blob, # Hash of the transaction
|
||||
PRIMARY KEY (token_id, seq_idx)
|
||||
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
|
||||
```
|
||||
This table is the NFT equivalent of `account_tx`. It's motivated by the exact
|
||||
same reasons and serves the analogous purpose here. It drives the
|
||||
`nft_history` API.
|
||||
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
#ifndef CLIO_SIMPLECACHE_H_INCLUDED
|
||||
#define CLIO_SIMPLECACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
namespace Backend {
|
||||
class SimpleCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_;
|
||||
mutable std::atomic_uint32_t objectHitCounter_;
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_;
|
||||
mutable std::atomic_uint32_t successorHitCounter_;
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
// temporary set to prevent background thread from writing already deleted
|
||||
// data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
// Update the cache with new ledger objects
|
||||
// set isBackground to true when writing old data from a background thread
|
||||
void
|
||||
update(
|
||||
std::vector<LedgerObject> const& blobs,
|
||||
uint32_t seq,
|
||||
bool isBackground = false);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
void
|
||||
setFull();
|
||||
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
// whether the cache has all data for the most recent ledger
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
@@ -1,5 +1,24 @@
|
||||
#ifndef CLIO_TYPES_H_INCLUDED
|
||||
#define CLIO_TYPES_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <optional>
|
||||
@@ -37,8 +56,27 @@ struct TransactionAndMetadata
|
||||
{
|
||||
Blob transaction;
|
||||
Blob metadata;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t date;
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t date = 0;
|
||||
|
||||
TransactionAndMetadata() = default;
|
||||
TransactionAndMetadata(
|
||||
Blob const& transaction,
|
||||
Blob const& metadata,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t date)
|
||||
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
|
||||
{
|
||||
}
|
||||
|
||||
TransactionAndMetadata(std::tuple<Blob, Blob, std::uint32_t, std::uint32_t> data)
|
||||
: transaction{std::get<0>(data)}
|
||||
, metadata{std::get<1>(data)}
|
||||
, ledgerSequence{std::get<2>(data)}
|
||||
, date{std::get<3>(data)}
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(const TransactionAndMetadata& other) const
|
||||
{
|
||||
@@ -51,6 +89,29 @@ struct TransactionsCursor
|
||||
{
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
|
||||
TransactionsCursor() = default;
|
||||
TransactionsCursor(std::uint32_t ledgerSequence, std::uint32_t transactionIndex)
|
||||
: ledgerSequence{ledgerSequence}, transactionIndex{transactionIndex}
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor(std::tuple<std::uint32_t, std::uint32_t> data)
|
||||
: ledgerSequence{std::get<0>(data)}, transactionIndex{std::get<1>(data)}
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor&
|
||||
operator=(TransactionsCursor const&) = default;
|
||||
|
||||
bool
|
||||
operator==(TransactionsCursor const& other) const = default;
|
||||
|
||||
[[nodiscard]] std::tuple<std::uint32_t, std::uint32_t>
|
||||
asTuple() const
|
||||
{
|
||||
return std::make_tuple(ledgerSequence, transactionIndex);
|
||||
}
|
||||
};
|
||||
|
||||
struct TransactionsAndCursor
|
||||
@@ -64,16 +125,31 @@ struct NFT
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
ripple::AccountID owner;
|
||||
Blob uri;
|
||||
bool isBurned;
|
||||
|
||||
NFT() = default;
|
||||
NFT(ripple::uint256 const& tokenID,
|
||||
std::uint32_t ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
Blob const& uri,
|
||||
bool isBurned)
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
|
||||
{
|
||||
}
|
||||
|
||||
NFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, ripple::AccountID const& owner, bool isBurned)
|
||||
: NFT(tokenID, ledgerSequence, owner, {}, isBurned)
|
||||
{
|
||||
}
|
||||
|
||||
// clearly two tokens are the same if they have the same ID, but this
|
||||
// struct stores the state of a given token at a given ledger sequence, so
|
||||
// we also need to compare with ledgerSequence
|
||||
bool
|
||||
operator==(NFT const& other) const
|
||||
{
|
||||
return tokenID == other.tokenID &&
|
||||
ledgerSequence == other.ledgerSequence;
|
||||
return tokenID == other.tokenID && ledgerSequence == other.ledgerSequence;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -82,11 +158,7 @@ struct LedgerRange
|
||||
std::uint32_t minSequence;
|
||||
std::uint32_t maxSequence;
|
||||
};
|
||||
constexpr ripple::uint256 firstKey{
|
||||
"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{
|
||||
"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
|
||||
79
src/backend/cassandra/Concepts.h
Normal file
79
src/backend/cassandra/Concepts.h
Normal file
@@ -0,0 +1,79 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{ a.getSettings() } -> std::same_as<Settings>;
|
||||
{ a.getKeyspace() } -> std::same_as<std::string>;
|
||||
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
|
||||
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
|
||||
{ a.getTtl() } -> std::same_as<uint16_t>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
std::vector<Statement> statements,
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{ T(settings, handle) };
|
||||
{ a.sync() } -> std::same_as<void>;
|
||||
{ a.isTooBusy() } -> std::same_as<bool>;
|
||||
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.write(prepared) } -> std::same_as<void>;
|
||||
{ a.write(std::move(statements)) } -> std::same_as<void>;
|
||||
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{ T(ioc) };
|
||||
{ a.shouldRetry(err) } -> std::same_as<bool>;
|
||||
{ a.retry([](){}) } -> std::same_as<void>;
|
||||
{ a.calculateDelay(attempt) } -> std::same_as<std::chrono::milliseconds>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
99
src/backend/cassandra/Error.h
Normal file
99
src/backend/cassandra/Error.h
Normal file
@@ -0,0 +1,99 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
/**
|
||||
* @brief A simple container for both error message and error code
|
||||
*/
|
||||
class CassandraError
|
||||
{
|
||||
std::string message_;
|
||||
uint32_t code_;
|
||||
|
||||
public:
|
||||
CassandraError() = default; // default constructible required by Expected
|
||||
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend std::string
|
||||
operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs + rhs.message();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend bool
|
||||
operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs == rhs.message();
|
||||
}
|
||||
|
||||
template <std::integral T>
|
||||
friend bool
|
||||
operator==(T const& lhs, CassandraError const& rhs)
|
||||
{
|
||||
return lhs == rhs.code();
|
||||
}
|
||||
|
||||
friend std::ostream&
|
||||
operator<<(std::ostream& os, CassandraError const& err)
|
||||
{
|
||||
os << err.message();
|
||||
return os;
|
||||
}
|
||||
|
||||
std::string
|
||||
message() const
|
||||
{
|
||||
return message_;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
code() const
|
||||
{
|
||||
return code_;
|
||||
}
|
||||
|
||||
bool
|
||||
isTimeout() const
|
||||
{
|
||||
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
isInvalidQuery() const
|
||||
{
|
||||
return code_ == CASS_ERROR_SERVER_INVALID_QUERY;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
155
src/backend/cassandra/Handle.cpp
Normal file
155
src/backend/cassandra/Handle.cpp
Normal file
@@ -0,0 +1,155 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings}
|
||||
{
|
||||
}
|
||||
|
||||
Handle::Handle(std::string_view contactPoints) : Handle{Settings::defaultSettings().withContactPoints(contactPoints)}
|
||||
{
|
||||
}
|
||||
|
||||
Handle::~Handle()
|
||||
{
|
||||
[[maybe_unused]] auto _ = disconnect(); // attempt to disconnect
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncConnect() const
|
||||
{
|
||||
return cass_session_connect(session_, cluster_);
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::connect() const
|
||||
{
|
||||
return asyncConnect().await();
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncConnect(std::string_view keyspace) const
|
||||
{
|
||||
return cass_session_connect_keyspace(session_, cluster_, keyspace.data());
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::connect(std::string_view keyspace) const
|
||||
{
|
||||
return asyncConnect(keyspace).await();
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncDisconnect() const
|
||||
{
|
||||
return cass_session_close(session_);
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::disconnect() const
|
||||
{
|
||||
return asyncDisconnect().await();
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncReconnect(std::string_view keyspace) const
|
||||
{
|
||||
if (auto rc = asyncDisconnect().await(); not rc) // sync
|
||||
throw std::logic_error("Reconnect to keyspace '" + std::string{keyspace} + "' failed: " + rc.error());
|
||||
return asyncConnect(keyspace);
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::reconnect(std::string_view keyspace) const
|
||||
{
|
||||
return asyncReconnect(keyspace).await();
|
||||
}
|
||||
|
||||
std::vector<Handle::FutureType>
|
||||
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
std::vector<Handle::FutureType> futures;
|
||||
for (auto const& statement : statements)
|
||||
futures.push_back(cass_session_execute(session_, statement));
|
||||
return futures;
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::executeEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
|
||||
{
|
||||
if (auto const rc = future.await(); not rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncExecute(Statement const& statement) const
|
||||
{
|
||||
return cass_session_execute(session_, statement);
|
||||
}
|
||||
|
||||
Handle::FutureWithCallbackType
|
||||
Handle::asyncExecute(Statement const& statement, std::function<void(Handle::ResultOrErrorType)>&& cb) const
|
||||
{
|
||||
return Handle::FutureWithCallbackType{cass_session_execute(session_, statement), std::move(cb)};
|
||||
}
|
||||
|
||||
Handle::ResultOrErrorType
|
||||
Handle::execute(Statement const& statement) const
|
||||
{
|
||||
return asyncExecute(statement).get();
|
||||
}
|
||||
|
||||
Handle::FutureType
|
||||
Handle::asyncExecute(std::vector<Statement> const& statements) const
|
||||
{
|
||||
return cass_session_execute_batch(session_, Batch{statements});
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::execute(std::vector<Statement> const& statements) const
|
||||
{
|
||||
return asyncExecute(statements).await();
|
||||
}
|
||||
|
||||
Handle::FutureWithCallbackType
|
||||
Handle::asyncExecute(std::vector<Statement> const& statements, std::function<void(Handle::ResultOrErrorType)>&& cb)
|
||||
const
|
||||
{
|
||||
return Handle::FutureWithCallbackType{cass_session_execute_batch(session_, Batch{statements}), std::move(cb)};
|
||||
}
|
||||
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType future = cass_session_prepare(session_, query.data());
|
||||
if (auto const rc = future.await(); rc)
|
||||
return cass_future_get_prepared(future);
|
||||
else
|
||||
throw std::runtime_error(rc.error().message());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
295
src/backend/cassandra/Handle.h
Normal file
295
src/backend/cassandra/Handle.h
Normal file
@@ -0,0 +1,295 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <backend/cassandra/impl/Session.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
/**
|
||||
* @brief Represents a handle to the cassandra database cluster
|
||||
*/
|
||||
class Handle
|
||||
{
|
||||
detail::Cluster cluster_;
|
||||
detail::Session session_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = ResultOrError;
|
||||
using MaybeErrorType = MaybeError;
|
||||
using FutureWithCallbackType = FutureWithCallback;
|
||||
using FutureType = Future;
|
||||
using StatementType = Statement;
|
||||
using PreparedStatementType = PreparedStatement;
|
||||
using ResultType = Result;
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle from a @ref Settings object
|
||||
*/
|
||||
explicit Handle(Settings clusterSettings = Settings::defaultSettings());
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle with default settings and only by setting
|
||||
* the contact points
|
||||
*/
|
||||
explicit Handle(std::string_view contactPoints);
|
||||
|
||||
/**
|
||||
* @brief Disconnects gracefully if possible
|
||||
*/
|
||||
~Handle();
|
||||
|
||||
/**
|
||||
* @brief Move is supported
|
||||
*/
|
||||
Handle(Handle&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Connect to the cluster asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncConnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncConnect() const for how this works.
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect() const;
|
||||
|
||||
/**
|
||||
* @brief Connect to the the specified keyspace asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncConnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncConnect(std::string_view) const for how this works.
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Disconnect from the cluster asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncDisconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncDisconnect() const for how this works.
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
disconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Reconnect to the the specified keyspace asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncReconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncReconnect(std::string_view) const for how this works.
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
reconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a simple query with optional args asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(std::string_view query, Args&&... args) const
|
||||
{
|
||||
auto statement = StatementType{query, std::forward<Args>(args)...};
|
||||
return cass_session_execute(session_, statement);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncExecute(std::string_view, Args&&...) const for how this
|
||||
* works.
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
execute(std::string_view query, Args&&... args) const
|
||||
{
|
||||
return asyncExecute<Args...>(query, std::forward<Args>(args)...).get();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute each of the statements asynchronously
|
||||
*
|
||||
* Batched version is not always the right option. Especially since it only
|
||||
* supports INSERT, UPDATE and DELETE statements.
|
||||
* This can be used as an alternative when statements need to execute in
|
||||
* bulk.
|
||||
*
|
||||
* @return A vector of future objects
|
||||
*/
|
||||
[[nodiscard]] std::vector<FutureType>
|
||||
asyncExecuteEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for
|
||||
* how this works.
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
executeEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a prepared statement with optional args asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(PreparedStatementType const& statement, Args&&... args) const
|
||||
{
|
||||
auto bound = statement.bind<Args...>(std::forward<Args>(args)...);
|
||||
return cass_session_execute(session_, bound);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&, Args&&...) const
|
||||
* for how this works.
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
execute(PreparedStatementType const& statement, Args&&... args) const
|
||||
{
|
||||
return asyncExecute<Args...>(statement, std::forward<Args>(args)...).get();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously with a
|
||||
* callback
|
||||
*
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(StatementType const& statement, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncExecute(StatementType const&) const for how this
|
||||
* works.
|
||||
*/
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
execute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&) const for how
|
||||
* this works.
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
execute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
* with a completion callback
|
||||
*
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Prepare a statement
|
||||
*
|
||||
* @return A @ref PreparedStatementType
|
||||
* @throws std::runtime_error with underlying error description on failure
|
||||
*/
|
||||
[[nodiscard]] PreparedStatementType
|
||||
prepare(std::string_view query) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Extracts the results into series of std::tuple<Types...> by creating a
|
||||
* simple wrapper with an STL input iterator inside.
|
||||
*
|
||||
* You can call .begin() and .end() in order to iterate as usual.
|
||||
* This also means that you can use it in a range-based for or with some
|
||||
* algorithms.
|
||||
*/
|
||||
template <typename... Types>
|
||||
[[nodiscard]] detail::ResultExtractor<Types...>
|
||||
extract(Handle::ResultType const& result)
|
||||
{
|
||||
return {result};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
669
src/backend/cassandra/Schema.h
Normal file
669
src/backend/cassandra/Schema.h
Normal file
@@ -0,0 +1,669 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <fmt/compile.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name)
|
||||
{
|
||||
return fmt::format("{}.{}{}", provider.getKeyspace(), provider.getTablePrefix().value_or(""), name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Manages the DB schema and provides access to prepared statements
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema
|
||||
{
|
||||
// Current schema version.
|
||||
// Update this everytime you update the schema.
|
||||
// Migrations will be ran automatically based on this value.
|
||||
static constexpr uint16_t version = 1u;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
public:
|
||||
explicit Schema(SettingsProviderType const& settingsProvider) : settingsProvider_{std::cref(settingsProvider)}
|
||||
{
|
||||
}
|
||||
|
||||
std::string createKeyspace = [this]() {
|
||||
return fmt::format(
|
||||
R"(
|
||||
CREATE KEYSPACE IF NOT EXISTS {}
|
||||
WITH replication = {{
|
||||
'class': 'SimpleStrategy',
|
||||
'replication_factor': '{}'
|
||||
}}
|
||||
AND durable_writes = true
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor());
|
||||
}();
|
||||
|
||||
// =======================
|
||||
// Schema creation queries
|
||||
// =======================
|
||||
|
||||
std::vector<Statement> createSchema = [this]() {
|
||||
std::vector<Statement> statements;
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
key blob,
|
||||
sequence bigint,
|
||||
object blob,
|
||||
PRIMARY KEY (key, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
hash blob PRIMARY KEY,
|
||||
ledger_sequence bigint,
|
||||
date bigint,
|
||||
transaction blob,
|
||||
metadata blob
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
ledger_sequence bigint,
|
||||
hash blob,
|
||||
PRIMARY KEY (ledger_sequence, hash)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
key blob,
|
||||
seq bigint,
|
||||
next blob,
|
||||
PRIMARY KEY (key, seq)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
seq bigint,
|
||||
key blob,
|
||||
PRIMARY KEY (seq, key)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
account blob,
|
||||
seq_idx tuple<bigint, bigint>,
|
||||
hash blob,
|
||||
PRIMARY KEY (account, seq_idx)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (seq_idx DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
sequence bigint PRIMARY KEY,
|
||||
header blob
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
hash blob PRIMARY KEY,
|
||||
sequence bigint
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
is_latest boolean PRIMARY KEY,
|
||||
sequence bigint
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
token_id blob,
|
||||
sequence bigint,
|
||||
owner blob,
|
||||
is_burned boolean,
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
issuer blob,
|
||||
taxon bigint,
|
||||
token_id blob,
|
||||
PRIMARY KEY (issuer, taxon, token_id)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (taxon ASC, token_id ASC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
token_id blob,
|
||||
sequence bigint,
|
||||
uri blob,
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
token_id blob,
|
||||
seq_idx tuple<bigint, bigint>,
|
||||
hash blob,
|
||||
PRIMARY KEY (token_id, seq_idx)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (seq_idx DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
|
||||
/**
|
||||
* @brief Prepared statements holder
|
||||
*/
|
||||
class Statements
|
||||
{
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
std::reference_wrapper<Handle const> handle_;
|
||||
|
||||
public:
|
||||
Statements(SettingsProviderType const& settingsProvider, Handle const& handle)
|
||||
: settingsProvider_{settingsProvider}, handle_{std::cref(handle)}
|
||||
{
|
||||
}
|
||||
|
||||
//
|
||||
// Insert queries
|
||||
//
|
||||
|
||||
PreparedStatement insertObject = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(key, sequence, object)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertTransaction = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(hash, ledger_sequence, date, transaction, metadata)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerTransaction = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(ledger_sequence, hash)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertSuccessor = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(key, seq, next)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertDiff = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(seq, key)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertAccountTx = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(account, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFT = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(token_id, sequence, owner, is_burned)
|
||||
VALUES (?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertIssuerNFT = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(issuer, taxon, token_id)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTURI = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(token_id, sequence, uri)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTTx = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(token_id, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(sequence, header)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHash = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(hash, sequence)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
}();
|
||||
|
||||
//
|
||||
// Update (and "delete") queries
|
||||
//
|
||||
|
||||
PreparedStatement updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
}();
|
||||
|
||||
PreparedStatement deleteLedgerRange = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = false
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
}();
|
||||
|
||||
//
|
||||
// Select queries
|
||||
//
|
||||
|
||||
PreparedStatement selectSuccessor = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT next
|
||||
FROM {}
|
||||
WHERE key = ?
|
||||
AND seq <= ?
|
||||
ORDER BY seq DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectDiff = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT key
|
||||
FROM {}
|
||||
WHERE seq = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectObject = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT object, sequence
|
||||
FROM {}
|
||||
WHERE key = ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectTransaction = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT transaction, metadata, ledger_sequence, date
|
||||
FROM {}
|
||||
WHERE hash = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAllTransactionHashesInLedger = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash
|
||||
FROM {}
|
||||
WHERE ledger_sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT object, key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
}();
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT TOKEN(key)
|
||||
FROM {}
|
||||
WHERE key = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTx = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx <= ?
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
}();
|
||||
|
||||
// the smallest transaction idx is 0, we use uint to store the transaction index, so we shall use ">=" to
|
||||
// include it(the transaction with idx 0) in the result
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx >= ?
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFT = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT sequence, owner, is_burned
|
||||
FROM {}
|
||||
WHERE token_id = ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURI = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT uri
|
||||
FROM {}
|
||||
WHERE token_id = ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTx = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE token_id = ?
|
||||
AND seq_idx < ?
|
||||
ORDER BY seq_idx DESC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTxForward = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE token_id = ?
|
||||
AND seq_idx >= ?
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE hash = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerBySeq = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT header
|
||||
FROM {}
|
||||
WHERE sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLatestLedger = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest = true
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerRange = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
}();
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Recreates the prepared statements
|
||||
*/
|
||||
void
|
||||
prepareStatements(Handle const& handle)
|
||||
{
|
||||
log_.info() << "Preparing cassandra statements";
|
||||
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
|
||||
log_.info() << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements
|
||||
*/
|
||||
std::unique_ptr<Statements> const&
|
||||
operator->() const
|
||||
{
|
||||
return statements_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<Statements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
125
src/backend/cassandra/SettingsProvider.cpp
Normal file
125
src/backend/cassandra/SettingsProvider.cpp
Normal file
@@ -0,0 +1,125 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <config/Config.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
namespace detail {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object())
|
||||
throw std::runtime_error(
|
||||
"Feed entire Cassandra section to parse "
|
||||
"Settings::ContactPoints instead");
|
||||
|
||||
clio::Config obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
out.port = obj.maybeValue<uint16_t>("port");
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
inline Settings::SecureConnectionBundle
|
||||
tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_string())
|
||||
throw std::runtime_error("`secure_connect_bundle` must be a string");
|
||||
return Settings::SecureConnectionBundle{value.as_string().data()};
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
SettingsProvider::SettingsProvider(clio::Config const& cfg, uint16_t ttl)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
, replicationFactor_{cfg.valueOr<uint16_t>("replication_factor", 3)}
|
||||
, ttl_{ttl}
|
||||
, settings_{parseSettings()}
|
||||
{
|
||||
}
|
||||
|
||||
Settings
|
||||
SettingsProvider::getSettings() const
|
||||
{
|
||||
return settings_;
|
||||
}
|
||||
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath)
|
||||
{
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream)
|
||||
{
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
}
|
||||
|
||||
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
|
||||
if (fileStream.bad())
|
||||
{
|
||||
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
|
||||
}
|
||||
|
||||
return contents;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle)
|
||||
{
|
||||
settings.connectionInfo = *bundle;
|
||||
}
|
||||
else
|
||||
{
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
}
|
||||
|
||||
settings.threads = config_.valueOr<uint32_t>("threads", settings.threads);
|
||||
settings.maxWriteRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
settings.password = config_.maybeValue<std::string>("password");
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
86
src/backend/cassandra/SettingsProvider.h
Normal file
86
src/backend/cassandra/SettingsProvider.h
Normal file
@@ -0,0 +1,86 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
/**
|
||||
* @brief Provides settings for @ref CassandraBackend
|
||||
*/
|
||||
class SettingsProvider
|
||||
{
|
||||
clio::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
uint16_t replicationFactor_;
|
||||
uint16_t ttl_;
|
||||
Settings settings_;
|
||||
|
||||
public:
|
||||
explicit SettingsProvider(clio::Config const& cfg, uint16_t ttl = 0);
|
||||
|
||||
/*! Get the cluster settings */
|
||||
[[nodiscard]] Settings
|
||||
getSettings() const;
|
||||
|
||||
/*! Get the specified keyspace */
|
||||
[[nodiscard]] inline std::string
|
||||
getKeyspace() const
|
||||
{
|
||||
return keyspace_;
|
||||
}
|
||||
|
||||
/*! Get an optional table prefix to use in all queries */
|
||||
[[nodiscard]] inline std::optional<std::string>
|
||||
getTablePrefix() const
|
||||
{
|
||||
return tablePrefix_;
|
||||
}
|
||||
|
||||
/*! Get the replication factor */
|
||||
[[nodiscard]] inline uint16_t
|
||||
getReplicationFactor() const
|
||||
{
|
||||
return replicationFactor_;
|
||||
}
|
||||
|
||||
/*! Get the default time to live to use in all `create` queries */
|
||||
[[nodiscard]] inline uint16_t
|
||||
getTtl() const
|
||||
{
|
||||
return ttl_;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] std::optional<std::string>
|
||||
parseOptionalCertificate() const;
|
||||
|
||||
[[nodiscard]] Settings
|
||||
parseSettings() const;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
67
src/backend/cassandra/Types.h
Normal file
67
src/backend/cassandra/Types.h
Normal file
@@ -0,0 +1,67 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
namespace detail {
|
||||
struct Settings;
|
||||
class Session;
|
||||
class Cluster;
|
||||
struct Future;
|
||||
class FutureWithCallback;
|
||||
struct Result;
|
||||
class Statement;
|
||||
class PreparedStatement;
|
||||
struct Batch;
|
||||
} // namespace detail
|
||||
|
||||
using Settings = detail::Settings;
|
||||
using Future = detail::Future;
|
||||
using FutureWithCallback = detail::FutureWithCallback;
|
||||
using Result = detail::Result;
|
||||
using Statement = detail::Statement;
|
||||
using PreparedStatement = detail::PreparedStatement;
|
||||
using Batch = detail::Batch;
|
||||
|
||||
/**
|
||||
* @brief A strong type wrapper for int32_t
|
||||
*
|
||||
* This is unfortunately needed right now to support uint32_t properly
|
||||
* because clio uses bigint (int64) everywhere except for when one need
|
||||
* to specify LIMIT, which needs an int32 :-/
|
||||
*/
|
||||
struct Limit
|
||||
{
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
class Handle;
|
||||
class CassandraError;
|
||||
|
||||
using MaybeError = util::Expected<void, CassandraError>;
|
||||
using ResultOrError = util::Expected<Result, CassandraError>;
|
||||
using Error = util::Unexpected<CassandraError>;
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
119
src/backend/cassandra/impl/AsyncExecutor.h
Normal file
119
src/backend/cassandra/impl/AsyncExecutor.h
Normal file
@@ -0,0 +1,119 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/RetryPolicy.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A query executor with a changable retry policy
|
||||
*
|
||||
* Note: this is a bit of an anti-pattern and should be done differently
|
||||
* eventually.
|
||||
*
|
||||
* Currently it's basically a saner implementation of the previous design that
|
||||
* was used in production without much issue but was using raw new/delete and
|
||||
* could leak easily. This version is slightly better but the overall design is
|
||||
* flawed and should be reworked.
|
||||
*/
|
||||
template <
|
||||
typename StatementType,
|
||||
typename HandleType = Handle,
|
||||
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>>
|
||||
{
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
CallbackType onComplete_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
std::mutex mtx_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new instance of the AsyncExecutor and execute it.
|
||||
*/
|
||||
static void
|
||||
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
|
||||
{
|
||||
// this is a helper that allows us to use std::make_shared below
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
|
||||
{
|
||||
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
|
||||
ptr->execute(handle);
|
||||
}
|
||||
|
||||
private:
|
||||
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
execute(HandleType const& handle)
|
||||
{
|
||||
auto self = this->shared_from_this();
|
||||
|
||||
// lifetime is extended by capturing self ptr
|
||||
auto handler = [this, &handle, self](auto&& res) mutable {
|
||||
if (res)
|
||||
{
|
||||
onComplete_(std::move(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (retryPolicy_.shouldRetry(res.error()))
|
||||
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
|
||||
else
|
||||
onComplete_(std::move(res)); // report error
|
||||
}
|
||||
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
56
src/backend/cassandra/impl/Batch.cpp
Normal file
56
src/backend/cassandra/impl/Batch.cpp
Normal file
@@ -0,0 +1,56 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
};
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
// todo: use an appropritae value instead of CASS_BATCH_TYPE_LOGGED for
|
||||
// different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
for (auto const& statement : statements)
|
||||
if (auto const res = add(statement); not res)
|
||||
throw std::runtime_error("Failed to add statement to batch: " + res.error());
|
||||
}
|
||||
|
||||
MaybeError
|
||||
Batch::add(Statement const& statement)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK)
|
||||
{
|
||||
return Error{CassandraError{cass_error_desc(rc), rc}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
37
src/backend/cassandra/impl/Batch.h
Normal file
37
src/backend/cassandra/impl/Batch.h
Normal file
@@ -0,0 +1,37 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
struct Batch : public ManagedObject<CassBatch>
|
||||
{
|
||||
Batch(std::vector<Statement> const& statements);
|
||||
|
||||
MaybeError
|
||||
add(Statement const& statement);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
154
src/backend/cassandra/impl/Cluster.cpp
Normal file
154
src/backend/cassandra/impl/Cluster.cpp
Normal file
@@ -0,0 +1,154 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
// explicit deduction guide (not needed as of C++20, but clang be clang)
|
||||
template <class... Ts>
|
||||
overloadSet(Ts...) -> overloadSet<Ts...>;
|
||||
}; // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
|
||||
{
|
||||
using std::to_string;
|
||||
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Error setting cassandra protocol version to v4: "} + cass_error_desc(rc));
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
std::string{"Error setting cassandra io threads to "} + to_string(settings.threads) + ": " +
|
||||
cass_error_desc(rc));
|
||||
}
|
||||
|
||||
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: other options to experiment with and consider later:
|
||||
// cass_cluster_set_max_concurrent_requests_threshold(*this, 10000);
|
||||
// cass_cluster_set_queue_size_event(*this, 100000);
|
||||
// cass_cluster_set_queue_size_io(*this, 100000);
|
||||
// cass_cluster_set_write_bytes_high_water_mark(*this, 16 * 1024 * 1024); // 16mb
|
||||
// cass_cluster_set_write_bytes_low_water_mark(*this, 8 * 1024 * 1024); // half of allowance
|
||||
// cass_cluster_set_pending_requests_high_water_mark(*this, 5000);
|
||||
// cass_cluster_set_pending_requests_low_water_mark(*this, 2500); // half
|
||||
// cass_cluster_set_max_requests_per_flush(*this, 1000);
|
||||
// cass_cluster_set_max_concurrent_creation(*this, 8);
|
||||
// cass_cluster_set_max_connections_per_host(*this, 6);
|
||||
// cass_cluster_set_core_connections_per_host(*this, 4);
|
||||
// cass_cluster_set_constant_speculative_execution_policy(*this, 1000, 1024);
|
||||
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(
|
||||
*this, settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Could not set queue size for IO per host: "} + cass_error_desc(rc));
|
||||
}
|
||||
|
||||
setupConnection(settings);
|
||||
setupCertificate(settings);
|
||||
setupCredentials(settings);
|
||||
}
|
||||
|
||||
void
|
||||
Cluster::setupConnection(Settings const& settings)
|
||||
{
|
||||
std::visit(
|
||||
overloadSet{
|
||||
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }},
|
||||
settings.connectionInfo);
|
||||
}
|
||||
|
||||
void
|
||||
Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
|
||||
if (rc != CASS_OK)
|
||||
throw std::runtime_error("Cassandra: Error setting " + label + " [" + value + "]: " + cass_error_desc(rc));
|
||||
};
|
||||
|
||||
{
|
||||
log_.debug() << "Attempt connection using contact points: " << points.contactPoints;
|
||||
auto const rc = cass_cluster_set_contact_points(*this, points.contactPoints.data());
|
||||
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
|
||||
}
|
||||
|
||||
if (points.port)
|
||||
{
|
||||
auto const rc = cass_cluster_set_port(*this, points.port.value());
|
||||
throwErrorIfNeeded(rc, "port", to_string(points.port.value()));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
|
||||
{
|
||||
log_.debug() << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle" + bundle.bundle);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Cluster::setupCertificate(Settings const& settings)
|
||||
{
|
||||
if (not settings.certificate)
|
||||
return;
|
||||
|
||||
log_.debug() << "Configure SSL context";
|
||||
SslContext context = SslContext(*settings.certificate);
|
||||
cass_cluster_set_ssl(*this, context);
|
||||
}
|
||||
|
||||
void
|
||||
Cluster::setupCredentials(Settings const& settings)
|
||||
{
|
||||
if (not settings.username || not settings.password)
|
||||
return;
|
||||
|
||||
log_.debug() << "Set credentials; username: " << settings.username.value();
|
||||
cass_cluster_set_credentials(*this, settings.username.value().c_str(), settings.password.value().c_str());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
99
src/backend/cassandra/impl/Cluster.h
Normal file
99
src/backend/cassandra/impl/Cluster.h
Normal file
@@ -0,0 +1,99 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
struct Settings
|
||||
{
|
||||
struct ContactPoints
|
||||
{
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port;
|
||||
};
|
||||
|
||||
struct SecureConnectionBundle
|
||||
{
|
||||
std::string bundle; // no meaningful default
|
||||
};
|
||||
|
||||
bool enableLog = false;
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000;
|
||||
uint32_t maxReadRequestsOutstanding = 100'000;
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
std::optional<std::string> username;
|
||||
std::optional<std::string> password;
|
||||
|
||||
Settings
|
||||
withContactPoints(std::string_view contactPoints)
|
||||
{
|
||||
auto tmp = *this;
|
||||
tmp.connectionInfo = ContactPoints{std::string{contactPoints}};
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static Settings
|
||||
defaultSettings()
|
||||
{
|
||||
return Settings();
|
||||
}
|
||||
};
|
||||
|
||||
class Cluster : public ManagedObject<CassCluster>
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
Cluster(Settings const& settings);
|
||||
|
||||
private:
|
||||
void
|
||||
setupConnection(Settings const& settings);
|
||||
|
||||
void
|
||||
setupContactPoints(Settings::ContactPoints const& points);
|
||||
|
||||
void
|
||||
setupSecureBundle(Settings::SecureConnectionBundle const& bundle);
|
||||
|
||||
void
|
||||
setupCertificate(Settings const& settings);
|
||||
|
||||
void
|
||||
setupCredentials(Settings const& settings);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
443
src/backend/cassandra/impl/ExecutionStrategy.h
Normal file
443
src/backend/cassandra/impl/ExecutionStrategy.h
Normal file
@@ -0,0 +1,443 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/AsyncExecutor.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <boost/asio/async_result.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief Implements async and sync querying against the cassandra DB with
|
||||
* support for throttling.
|
||||
*
|
||||
* Note: A lot of the code that uses yield is repeated below. This is ok for now
|
||||
* because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
|
||||
std::uint32_t maxWriteRequestsOutstanding_;
|
||||
std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
|
||||
|
||||
std::uint32_t maxReadRequestsOutstanding_;
|
||||
std::atomic_uint32_t numReadRequestsOutstanding_ = 0;
|
||||
|
||||
std::mutex throttleMutex_;
|
||||
std::condition_variable throttleCv_;
|
||||
|
||||
std::mutex syncMutex_;
|
||||
std::condition_variable syncCv_;
|
||||
|
||||
boost::asio::io_context ioc_;
|
||||
std::optional<boost::asio::io_service::work> work_;
|
||||
|
||||
std::reference_wrapper<HandleType const> handle_;
|
||||
std::thread thread_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
|
||||
using StatementType = typename HandleType::StatementType;
|
||||
using PreparedStatementType = typename HandleType::PreparedStatementType;
|
||||
using FutureType = typename HandleType::FutureType;
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using ResultType = typename HandleType::ResultType;
|
||||
|
||||
using CompletionTokenType = boost::asio::yield_context;
|
||||
using FunctionType = void(boost::system::error_code);
|
||||
using AsyncResultType = boost::asio::async_result<CompletionTokenType, FunctionType>;
|
||||
using HandlerType = typename AsyncResultType::completion_handler_type;
|
||||
|
||||
DefaultExecutionStrategy(Settings settings, HandleType const& handle)
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
{
|
||||
log_.info() << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
}
|
||||
|
||||
~DefaultExecutionStrategy()
|
||||
{
|
||||
work_.reset();
|
||||
ioc_.stop();
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait for all async writes to finish before unblocking
|
||||
*/
|
||||
void
|
||||
sync()
|
||||
{
|
||||
log_.debug() << "Waiting to sync all writes...";
|
||||
std::unique_lock<std::mutex> lck(syncMutex_);
|
||||
syncCv_.wait(lck, [this]() { return finishedAllWriteRequests(); });
|
||||
log_.debug() << "Sync done.";
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
ResultOrErrorType
|
||||
writeSync(StatementType const& statement)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (auto res = handle_.get().execute(statement); res)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
template <typename... Args>
|
||||
ResultOrErrorType
|
||||
writeSync(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
return writeSync(preparedStatement.bind(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
template <typename... Args>
|
||||
void
|
||||
write(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking batched query execution used for writing data
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
* @param statements Vector of statements to execute as a batch
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
write(std::vector<StatementType>&& statements)
|
||||
{
|
||||
if (statements.empty())
|
||||
return;
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
* Retries forever until successful or throws an exception on timeout.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
* @return ResultType or error wrapped in Expected
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
return read(token, preparedStatement.bind(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
* Retries forever until successful or throws an exception on timeout.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param statements Statements to execute in a batch
|
||||
* @throw DatabaseTimeout on timeout
|
||||
* @return ResultType or error wrapped in Expected
|
||||
*/
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
auto const numStatements = statements.size();
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
numReadRequestsOutstanding_ += numStatements;
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statements, [handler](auto&&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
|
||||
numReadRequestsOutstanding_ -= numStatements;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed batch read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
* Retries forever until successful or throws an exception on timeout.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param statement Statement to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
* @return ResultType or error wrapped in Expected
|
||||
*/
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
++numReadRequestsOutstanding_;
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statement, [handler](auto const&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
* Attempts to execute each statement. On any error the whole vector will be
|
||||
* discarded and exception will be thrown.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param statements Statements to execute
|
||||
* @throw DatabaseTimeout on db error
|
||||
* @return Vector of results
|
||||
*/
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
|
||||
std::atomic_bool hadError = false;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
|
||||
// used as the handler for each async statement individually
|
||||
auto executionHandler = [handler, &hadError, &numOutstanding](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
};
|
||||
|
||||
std::transform(
|
||||
std::cbegin(statements),
|
||||
std::cend(statements),
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
throw DatabaseTimeout{};
|
||||
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
|
||||
// it's safe to call blocking get on futures here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
std::transform(
|
||||
std::make_move_iterator(std::begin(futures)),
|
||||
std::make_move_iterator(std::end(futures)),
|
||||
std::back_inserter(results),
|
||||
[](auto&& future) {
|
||||
auto entry = future.get();
|
||||
auto&& res = entry.value();
|
||||
return std::move(res);
|
||||
});
|
||||
|
||||
assert(futures.size() == statements.size());
|
||||
assert(results.size() == statements.size());
|
||||
return results;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
incrementOutstandingRequestCount()
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddWriteRequest())
|
||||
{
|
||||
log_.trace() << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
|
||||
}
|
||||
}
|
||||
++numWriteRequestsOutstanding_;
|
||||
}
|
||||
|
||||
void
|
||||
decrementOutstandingRequestCount()
|
||||
{
|
||||
// sanity check
|
||||
if (numWriteRequestsOutstanding_ == 0)
|
||||
{
|
||||
assert(false);
|
||||
throw std::runtime_error("decrementing num outstanding below 0");
|
||||
}
|
||||
size_t cur = (--numWriteRequestsOutstanding_);
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(throttleMutex_);
|
||||
throttleCv_.notify_one();
|
||||
}
|
||||
if (cur == 0)
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(syncMutex_);
|
||||
syncCv_.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
canAddWriteRequest() const
|
||||
{
|
||||
return numWriteRequestsOutstanding_ < maxWriteRequestsOutstanding_;
|
||||
}
|
||||
|
||||
bool
|
||||
finishedAllWriteRequests() const
|
||||
{
|
||||
return numWriteRequestsOutstanding_ == 0;
|
||||
}
|
||||
|
||||
void
|
||||
throwErrorIfNeeded(CassandraError err) const
|
||||
{
|
||||
if (err.isTimeout())
|
||||
throw DatabaseTimeout();
|
||||
|
||||
if (err.isInvalidQuery())
|
||||
throw std::runtime_error("Invalid query");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
102
src/backend/cassandra/impl/Future.cpp
Normal file
102
src/backend/cassandra/impl/Future.cpp
Normal file
@@ -0,0 +1,102 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
MaybeError
|
||||
Future::await() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
auto errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}(cass_error_desc(rc));
|
||||
return Error{CassandraError{errMsg, rc}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError
|
||||
Future::get() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
auto const errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("future::get()");
|
||||
return Error{CassandraError{errMsg, rc}};
|
||||
}
|
||||
else
|
||||
{
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
{
|
||||
// Note: can't use Future{ptr}.get() because double free will occur :/
|
||||
auto* cb = static_cast<FutureWithCallback::fn_t*>(cbPtr);
|
||||
if (auto const rc = cass_future_error_code(ptr); rc)
|
||||
{
|
||||
auto const errMsg = [&ptr](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
cass_future_error_message(ptr, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("invokeHelper");
|
||||
(*cb)(Error{CassandraError{errMsg, rc}});
|
||||
}
|
||||
else
|
||||
{
|
||||
(*cb)(Result{cass_future_get_result(ptr)});
|
||||
}
|
||||
}
|
||||
|
||||
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, fn_t&& cb)
|
||||
: Future{ptr}, cb_{std::make_unique<fn_t>(std::move(cb))}
|
||||
{
|
||||
// Instead of passing `this` as the userdata void*, we pass the address of
|
||||
// the callback itself which will survive std::move of the
|
||||
// FutureWithCallback parent. Not ideal but I have no better solution atm.
|
||||
cass_future_set_callback(*this, &invokeHelper, cb_.get());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
58
src/backend/cassandra/impl/Future.h
Normal file
58
src/backend/cassandra/impl/Future.h
Normal file
@@ -0,0 +1,58 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
struct Future : public ManagedObject<CassFuture>
|
||||
{
|
||||
/* implicit */ Future(CassFuture* ptr);
|
||||
|
||||
MaybeError
|
||||
await() const;
|
||||
|
||||
ResultOrError
|
||||
get() const;
|
||||
};
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* self);
|
||||
|
||||
class FutureWithCallback : public Future
|
||||
{
|
||||
public:
|
||||
using fn_t = std::function<void(ResultOrError)>;
|
||||
using fn_ptr_t = std::unique_ptr<fn_t>;
|
||||
|
||||
/* implicit */ FutureWithCallback(CassFuture* ptr, fn_t&& cb);
|
||||
FutureWithCallback(FutureWithCallback const&) = delete;
|
||||
FutureWithCallback(FutureWithCallback&&) = default;
|
||||
|
||||
private:
|
||||
/*! Wrapped in a unique_ptr so it can survive std::move :/ */
|
||||
fn_ptr_t cb_;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
47
src/backend/cassandra/impl/ManagedObject.h
Normal file
47
src/backend/cassandra/impl/ManagedObject.h
Normal file
@@ -0,0 +1,47 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
template <typename Managed>
|
||||
class ManagedObject
|
||||
{
|
||||
protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
public:
|
||||
template <typename deleterCallable>
|
||||
ManagedObject(Managed* rawPtr, deleterCallable deleter) : ptr_{rawPtr, deleter}
|
||||
{
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
}
|
||||
ManagedObject(ManagedObject&&) = default;
|
||||
|
||||
operator Managed* const() const
|
||||
{
|
||||
return ptr_.get();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
69
src/backend/cassandra/impl/Result.cpp
Normal file
69
src/backend/cassandra/impl/Result.cpp
Normal file
@@ -0,0 +1,69 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::size_t
|
||||
Result::numRows() const
|
||||
{
|
||||
return cass_result_row_count(*this);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool
|
||||
Result::hasRows() const
|
||||
{
|
||||
return numRows() > 0;
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)}
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultIterator
|
||||
ResultIterator::fromResult(Result const& result)
|
||||
{
|
||||
return {cass_iterator_from_result(result)};
|
||||
}
|
||||
|
||||
[[maybe_unused]] bool
|
||||
ResultIterator::moveForward()
|
||||
{
|
||||
hasMore_ = cass_iterator_next(*this);
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool
|
||||
ResultIterator::hasMore() const
|
||||
{
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
257
src/backend/cassandra/impl/Result.h
Normal file
257
src/backend/cassandra/impl/Result.h
Normal file
@@ -0,0 +1,257 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
template <typename Type>
|
||||
inline Type
|
||||
extractColumn(CassRow const* row, std::size_t idx)
|
||||
{
|
||||
using std::to_string;
|
||||
Type output;
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
using uchar_vector_t = std::vector<unsigned char>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::uint256");
|
||||
output = ripple::uint256::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
|
||||
output = ripple::AccountID::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vector_t>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
|
||||
output = uchar_vector_t{buf, buf + bufSize};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
{
|
||||
auto const* tuple = cass_row_get_column(row, idx);
|
||||
output = TupleIterator::fromTuple(tuple).extract<uint32_t, uint32_t>();
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
{
|
||||
char const* value;
|
||||
std::size_t len;
|
||||
auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len);
|
||||
throwErrorIfNeeded(rc, "Extract string");
|
||||
output = std::string{value, len};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
cass_bool_t flag;
|
||||
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
|
||||
throwErrorIfNeeded(rc, "Extract bool");
|
||||
output = flag ? true : false;
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64");
|
||||
output = static_cast<decayed_t>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
struct Result : public ManagedObject<CassResult const>
|
||||
{
|
||||
/* implicit */ Result(CassResult const* ptr);
|
||||
|
||||
[[nodiscard]] std::size_t
|
||||
numRows() const;
|
||||
|
||||
[[nodiscard]] bool
|
||||
hasRows() const;
|
||||
|
||||
template <typename... RowTypes>
|
||||
std::optional<std::tuple<RowTypes...>>
|
||||
get() const requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
{
|
||||
// row managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_result_first_row(*this);
|
||||
if (row == nullptr)
|
||||
return std::nullopt;
|
||||
|
||||
std::size_t idx = 0;
|
||||
auto advanceId = [&idx]() { return idx++; };
|
||||
|
||||
return std::make_optional<std::tuple<RowTypes...>>({extractColumn<RowTypes>(row, advanceId())...});
|
||||
}
|
||||
|
||||
template <typename RowType>
|
||||
std::optional<RowType>
|
||||
get() const
|
||||
{
|
||||
// row managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_result_first_row(*this);
|
||||
if (row == nullptr)
|
||||
return std::nullopt;
|
||||
return std::make_optional<RowType>(extractColumn<RowType>(row, 0));
|
||||
}
|
||||
};
|
||||
|
||||
class ResultIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
bool hasMore_ = false;
|
||||
|
||||
public:
|
||||
/* implicit */ ResultIterator(CassIterator* ptr);
|
||||
|
||||
[[nodiscard]] static ResultIterator
|
||||
fromResult(Result const& result);
|
||||
|
||||
[[maybe_unused]] bool
|
||||
moveForward();
|
||||
|
||||
[[nodiscard]] bool
|
||||
hasMore() const;
|
||||
|
||||
template <typename... RowTypes>
|
||||
std::tuple<RowTypes...>
|
||||
extractCurrentRow() const
|
||||
{
|
||||
// note: row is invalidated on each iteration.
|
||||
// managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_iterator_get_row(*this);
|
||||
|
||||
std::size_t idx = 0;
|
||||
auto advanceId = [&idx]() { return idx++; };
|
||||
|
||||
return {extractColumn<RowTypes>(row, advanceId())...};
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Types>
|
||||
class ResultExtractor
|
||||
{
|
||||
std::reference_wrapper<Result const> ref_;
|
||||
|
||||
public:
|
||||
struct Sentinel
|
||||
{
|
||||
};
|
||||
|
||||
struct Iterator
|
||||
{
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using difference_type = std::size_t; // rows count
|
||||
using value_type = std::tuple<Types...>;
|
||||
|
||||
/* implicit */ Iterator(ResultIterator iterator) : iterator_{std::move(iterator)}
|
||||
{
|
||||
}
|
||||
|
||||
Iterator(Iterator const&) = delete;
|
||||
Iterator&
|
||||
operator=(Iterator const&) = delete;
|
||||
|
||||
value_type
|
||||
operator*() const
|
||||
{
|
||||
return iterator_.extractCurrentRow<Types...>();
|
||||
}
|
||||
|
||||
value_type
|
||||
operator->()
|
||||
{
|
||||
return iterator_.extractCurrentRow<Types...>();
|
||||
}
|
||||
|
||||
Iterator&
|
||||
operator++()
|
||||
{
|
||||
iterator_.moveForward();
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(Sentinel const&) const
|
||||
{
|
||||
return not iterator_.hasMore();
|
||||
}
|
||||
|
||||
private:
|
||||
ResultIterator iterator_;
|
||||
};
|
||||
|
||||
ResultExtractor(Result const& result) : ref_{std::cref(result)}
|
||||
{
|
||||
}
|
||||
|
||||
Iterator
|
||||
begin()
|
||||
{
|
||||
return ResultIterator::fromResult(ref_);
|
||||
}
|
||||
|
||||
Sentinel
|
||||
end()
|
||||
{
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
94
src/backend/cassandra/impl/RetryPolicy.h
Normal file
94
src/backend/cassandra/impl/RetryPolicy.h
Normal file
@@ -0,0 +1,94 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A retry policy that employs exponential backoff
|
||||
*/
|
||||
class ExponentialBackoffRetryPolicy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
|
||||
boost::asio::steady_timer timer_;
|
||||
uint32_t attempt_ = 0u;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new retry policy instance with the io_context provided
|
||||
*/
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc) : timer_{ioc}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Computes next retry delay and returns true unconditionally
|
||||
*
|
||||
* @param err The cassandra error that triggered the retry
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
shouldRetry([[maybe_unused]] CassandraError err)
|
||||
{
|
||||
auto const delay = calculateDelay(attempt_);
|
||||
log_.error() << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in "
|
||||
<< delay.count() << " milliseconds";
|
||||
|
||||
return true; // keep retrying forever
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Schedules next retry
|
||||
*
|
||||
* @param fn The callable to execute
|
||||
*/
|
||||
template <typename Fn>
|
||||
void
|
||||
retry(Fn&& fn)
|
||||
{
|
||||
timer_.expires_after(calculateDelay(attempt_++));
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] const auto& err) {
|
||||
// todo: deal with cancellation (thru err)
|
||||
fn();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculates the wait time before attempting another retry
|
||||
*/
|
||||
std::chrono::milliseconds
|
||||
calculateDelay(uint32_t attempt)
|
||||
{
|
||||
return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
38
src/backend/cassandra/impl/Session.h
Normal file
38
src/backend/cassandra/impl/Session.h
Normal file
@@ -0,0 +1,38 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
class Session : public ManagedObject<CassSession>
|
||||
{
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
Session() : ManagedObject{cass_session_new(), deleter}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
37
src/backend/cassandra/impl/SslContext.cpp
Normal file
37
src/backend/cassandra/impl/SslContext.cpp
Normal file
@@ -0,0 +1,37 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Error setting Cassandra SSL Context: "} + cass_error_desc(rc));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
35
src/backend/cassandra/impl/SslContext.h
Normal file
35
src/backend/cassandra/impl/SslContext.h
Normal file
@@ -0,0 +1,35 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
struct SslContext : public ManagedObject<CassSsl>
|
||||
{
|
||||
explicit SslContext(std::string const& certificate);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
164
src/backend/cassandra/impl/Statement.h
Normal file
164
src/backend/cassandra/impl/Statement.h
Normal file
@@ -0,0 +1,164 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement>
|
||||
{
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new statement with optionally provided arguments
|
||||
*
|
||||
* Note: it's up to the user to make sure the bound parameters match
|
||||
* the format of the query (e.g. amount of '?' matches count of args).
|
||||
*/
|
||||
template <typename... Args>
|
||||
explicit Statement(std::string_view query, Args&&... args)
|
||||
: ManagedObject{cass_statement_new(query.data(), sizeof...(args)), deleter}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
bind<Args...>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, deleter}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
Statement(Statement&&) = default;
|
||||
|
||||
template <typename... Args>
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
std::size_t idx = 0;
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void
|
||||
bindAt(std::size_t const idx, Type&& value) const
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
throw std::logic_error(fmt::format("[{}] at idx {}: {}", label, idx, cass_error_desc(rc)));
|
||||
};
|
||||
|
||||
auto bindBytes = [this, idx](auto const* data, size_t size) {
|
||||
return cass_statement_bind_bytes(*this, idx, static_cast<cass_byte_t const*>(data), size);
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uchar_vec_t = std::vector<unsigned char>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vec_t>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind vector<unsigned char>");
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
{
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, Limit>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_int32(*this, idx, value.limit);
|
||||
throwErrorIfNeeded(rc, "Bind limit (int32)");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const>
|
||||
{
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
/* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, deleter}
|
||||
{
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
Statement
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
Statement statement = cass_prepared_bind(*this);
|
||||
statement.bind<Args...>(std::forward<Args>(args)...);
|
||||
return statement;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
43
src/backend/cassandra/impl/Tuple.cpp
Normal file
43
src/backend/cassandra/impl/Tuple.cpp
Normal file
@@ -0,0 +1,43 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, tupleDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, tupleIteratorDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] TupleIterator
|
||||
TupleIterator::fromTuple(CassValue const* value)
|
||||
{
|
||||
return {cass_iterator_from_tuple(value)};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
149
src/backend/cassandra/impl/Tuple.h
Normal file
149
src/backend/cassandra/impl/Tuple.h
Normal file
@@ -0,0 +1,149 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple>
|
||||
{
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/* implicit */ Tuple(CassTuple* ptr);
|
||||
|
||||
template <typename... Types>
|
||||
explicit Tuple(std::tuple<Types...>&& value)
|
||||
: ManagedObject{cass_tuple_new(std::tuple_size<std::tuple<Types...>>{}), deleter}
|
||||
{
|
||||
std::apply(std::bind_front(&Tuple::bind<Types...>, this), std::move(value));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
std::size_t idx = 0;
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void
|
||||
bindAt(std::size_t const idx, Type&& value) const
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + " at idx " + to_string(idx) + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TupleIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/* implicit */ TupleIterator(CassIterator* ptr);
|
||||
|
||||
[[nodiscard]] static TupleIterator
|
||||
fromTuple(CassValue const* value);
|
||||
|
||||
template <typename... Types>
|
||||
[[nodiscard]] std::tuple<Types...>
|
||||
extract() const
|
||||
{
|
||||
return {extractNext<Types>()...};
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename Type>
|
||||
Type
|
||||
extractNext() const
|
||||
{
|
||||
using std::to_string;
|
||||
Type output;
|
||||
|
||||
if (not cass_iterator_next(*this))
|
||||
throw std::logic_error("Could not extract next value from tuple iterator");
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64 from tuple");
|
||||
output = static_cast<decayed_t>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
183
src/config/Config.cpp
Normal file
183
src/config/Config.cpp
Normal file
@@ -0,0 +1,183 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <fstream>
|
||||
|
||||
namespace clio {
|
||||
|
||||
// Note: `store_(store)` MUST use `()` instead of `{}` otherwise gcc
|
||||
// picks `initializer_list` constructor and anything passed becomes an
|
||||
// array :-D
|
||||
Config::Config(boost::json::value store) : store_(std::move(store))
|
||||
{
|
||||
}
|
||||
|
||||
Config::operator bool() const noexcept
|
||||
{
|
||||
return not store_.is_null();
|
||||
}
|
||||
|
||||
bool
|
||||
Config::contains(key_type key) const
|
||||
{
|
||||
return lookup(key).has_value();
|
||||
}
|
||||
|
||||
std::optional<boost::json::value>
|
||||
Config::lookup(key_type key) const
|
||||
{
|
||||
if (store_.is_null())
|
||||
return std::nullopt;
|
||||
|
||||
std::reference_wrapper<boost::json::value const> cur = std::cref(store_);
|
||||
auto hasBrokenPath = false;
|
||||
auto tokenized = detail::Tokenizer<key_type, Separator>{key};
|
||||
std::string subkey{};
|
||||
|
||||
auto maybeSection = tokenized.next();
|
||||
while (maybeSection.has_value())
|
||||
{
|
||||
auto section = maybeSection.value();
|
||||
subkey += section;
|
||||
|
||||
if (not hasBrokenPath)
|
||||
{
|
||||
if (not cur.get().is_object())
|
||||
throw detail::StoreException("Not an object at '" + subkey + "'");
|
||||
if (not cur.get().as_object().contains(section))
|
||||
hasBrokenPath = true;
|
||||
else
|
||||
cur = std::cref(cur.get().as_object().at(section));
|
||||
}
|
||||
|
||||
subkey += Separator;
|
||||
maybeSection = tokenized.next();
|
||||
}
|
||||
|
||||
if (hasBrokenPath)
|
||||
return std::nullopt;
|
||||
return std::make_optional(cur);
|
||||
}
|
||||
|
||||
std::optional<Config::array_type>
|
||||
Config::maybeArray(key_type key) const
|
||||
{
|
||||
try
|
||||
{
|
||||
auto maybe_arr = lookup(key);
|
||||
if (maybe_arr && maybe_arr->is_array())
|
||||
{
|
||||
auto& arr = maybe_arr->as_array();
|
||||
array_type out;
|
||||
out.reserve(arr.size());
|
||||
|
||||
std::transform(std::begin(arr), std::end(arr), std::back_inserter(out), [](auto&& element) {
|
||||
return Config{std::move(element)};
|
||||
});
|
||||
return std::make_optional<array_type>(std::move(out));
|
||||
}
|
||||
}
|
||||
catch (detail::StoreException const&)
|
||||
{
|
||||
// ignore store error, but rethrow key errors
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::array(key_type key) const
|
||||
{
|
||||
if (auto maybe_arr = maybeArray(key); maybe_arr)
|
||||
return maybe_arr.value();
|
||||
throw std::logic_error("No array found at '" + key + "'");
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::arrayOr(key_type key, array_type fallback) const
|
||||
{
|
||||
if (auto maybe_arr = maybeArray(key); maybe_arr)
|
||||
return maybe_arr.value();
|
||||
return fallback;
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::arrayOrThrow(key_type key, std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeArray(key).value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
Config
|
||||
Config::section(key_type key) const
|
||||
{
|
||||
auto maybe_element = lookup(key);
|
||||
if (maybe_element && maybe_element->is_object())
|
||||
return Config{std::move(*maybe_element)};
|
||||
throw std::logic_error("No section found at '" + key + "'");
|
||||
}
|
||||
|
||||
Config::array_type
|
||||
Config::array() const
|
||||
{
|
||||
if (not store_.is_array())
|
||||
throw std::logic_error("_self_ is not an array");
|
||||
|
||||
array_type out;
|
||||
auto const& arr = store_.as_array();
|
||||
out.reserve(arr.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(arr), std::cend(arr), std::back_inserter(out), [](auto const& element) { return Config{element}; });
|
||||
return out;
|
||||
}
|
||||
|
||||
Config
|
||||
ConfigReader::open(std::filesystem::path path)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::ifstream in(path, std::ios::in | std::ios::binary);
|
||||
if (in)
|
||||
{
|
||||
std::stringstream contents;
|
||||
contents << in.rdbuf();
|
||||
auto opts = boost::json::parse_options{};
|
||||
opts.allow_comments = true;
|
||||
return Config{boost::json::parse(contents.str(), {}, opts)};
|
||||
}
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
LogService::error() << "Could not read configuration file from '" << path.string() << "': " << e.what();
|
||||
}
|
||||
|
||||
return Config{};
|
||||
}
|
||||
|
||||
} // namespace clio
|
||||
401
src/config/Config.h
Normal file
401
src/config/Config.h
Normal file
@@ -0,0 +1,401 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <config/detail/Helpers.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace clio {
|
||||
|
||||
/**
|
||||
* @brief Convenience wrapper to query a JSON configuration file.
|
||||
*
|
||||
* Any custom data type can be supported by implementing the right `tag_invoke`
|
||||
* for `boost::json::value_to`.
|
||||
*/
|
||||
class Config final
|
||||
{
|
||||
boost::json::value store_;
|
||||
static constexpr char Separator = '.';
|
||||
|
||||
public:
|
||||
using key_type = std::string; /*! The type of key used */
|
||||
using array_type = std::vector<Config>; /*! The type of array used */
|
||||
using write_cursor_type = std::pair<std::optional<std::reference_wrapper<boost::json::value>>, key_type>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Config object.
|
||||
* @param store boost::json::value that backs this instance
|
||||
*/
|
||||
explicit Config(boost::json::value store = {});
|
||||
|
||||
//
|
||||
// Querying the store
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Checks whether underlying store is not null.
|
||||
*
|
||||
* @return true If the store is null
|
||||
* @return false If the store is not null
|
||||
*/
|
||||
operator bool() const noexcept;
|
||||
|
||||
/**
|
||||
* @brief Checks whether something exists under given key.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return true If something exists under key
|
||||
* @return false If nothing exists under key
|
||||
* @throws std::logic_error If the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
contains(key_type key) const;
|
||||
|
||||
//
|
||||
// Key value access
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key that returns std::optional.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned wrapped in an optional. If the value exists but the conversion
|
||||
* to Result is not possible - a runtime_error will be thrown. If the value
|
||||
* does not exist under the specified key - std::nullopt is returned.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @return std::optional<Result> Optional value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not possible
|
||||
* or key is of invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] std::optional<Result>
|
||||
maybeValue(key_type key) const
|
||||
{
|
||||
auto maybe_element = lookup(key);
|
||||
if (maybe_element)
|
||||
return std::make_optional<Result>(checkedAs<Result>(key, *maybe_element));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible OR the value does not exist - a logic_error
|
||||
* will be thrown.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @return Result Value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not
|
||||
* possible, value does not exist under specified key path or the key is of
|
||||
* invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
value(key_type key) const
|
||||
{
|
||||
return maybeValue<Result>(key).value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key with fallback.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible - a logic_error will be thrown. If the value
|
||||
* does not exist under the specified key - user specified fallback is
|
||||
* returned.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @param fallback The fallback value
|
||||
* @return Result Value of desired type
|
||||
* @throws std::logic_error Thrown if conversion to Result is not possible
|
||||
* or the key is of invalid format
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOr(key_type key, Result fallback) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>(key).value_or(fallback);
|
||||
}
|
||||
catch (detail::StoreException const&)
|
||||
{
|
||||
return fallback;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching values by key with custom error handling.
|
||||
*
|
||||
* Will attempt to fetch the value under the desired key. If the value
|
||||
* exists and can be represented by the desired type Result then it will be
|
||||
* returned. If the value exists but the conversion
|
||||
* to Result is not possible OR the value does not exist - a runtime_error
|
||||
* will be thrown with the user specified message.
|
||||
*
|
||||
* @tparam Result The desired return type
|
||||
* @param key The key to check
|
||||
* @param err The custom error message
|
||||
* @return Result Value of desired type
|
||||
* @throws std::runtime_error Thrown if conversion to Result is not possible
|
||||
* or value does not exist under key
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOrThrow(key_type key, std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>(key).value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key that returns std::optional.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be
|
||||
* returned wrapped in an optional. If the array does not exist under the
|
||||
* specified key - std::nullopt is returned.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return std::optional<array_type> Optional array
|
||||
* @throws std::logic_error Thrown if the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] std::optional<array_type>
|
||||
maybeArray(key_type key) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be
|
||||
* returned. If the array does not exist under the
|
||||
* specified key an std::logic_error is thrown.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return array_type The array
|
||||
* @throws std::logic_error Thrown if there is no array under the desired
|
||||
* key or the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
array(key_type key) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key with fallback.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be returned.
|
||||
* If the array does not exist or another type is stored under the desired
|
||||
* key - user specified fallback is returned.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @param fallback The fallback array
|
||||
* @return array_type The array
|
||||
* @throws std::logic_error Thrown if the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
arrayOr(key_type key, array_type fallback) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching an array by key with custom error handling.
|
||||
*
|
||||
* Will attempt to fetch an array under the desired key. If the array
|
||||
* exists then it will be returned.
|
||||
* If the array does not exist or another type is stored under the desired
|
||||
* key - std::runtime_error is thrown with the user specified error message.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @param err The custom error message
|
||||
* @return array_type The array
|
||||
* @throws std::runtime_error Thrown if there is no array under the desired
|
||||
* key
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
arrayOrThrow(key_type key, std::string_view err) const;
|
||||
|
||||
/**
|
||||
* @brief Interface for fetching a sub section by key.
|
||||
*
|
||||
* Will attempt to fetch an entire section under the desired key and return
|
||||
* it as a Config instance. If the section does not exist or another type is
|
||||
* stored under the desired key - std::logic_error is thrown.
|
||||
*
|
||||
* @param key The key to check
|
||||
* @return Config Section represented as a separate instance of Config
|
||||
* @throws std::logic_error Thrown if there is no section under the
|
||||
* desired key or the key is of invalid format
|
||||
*/
|
||||
[[nodiscard]] Config
|
||||
section(key_type key) const;
|
||||
|
||||
//
|
||||
// Direct self-value access
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance. Wraps as std::optional.
|
||||
*
|
||||
* See @ref maybeValue(key_type) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] std::optional<Result>
|
||||
maybeValue() const
|
||||
{
|
||||
if (store_.is_null())
|
||||
return std::nullopt;
|
||||
return std::make_optional<Result>(checkedAs<Result>("_self_", store_));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance.
|
||||
*
|
||||
* See @ref value(key_type) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
value() const
|
||||
{
|
||||
return maybeValue<Result>().value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance with user-specified fallback.
|
||||
*
|
||||
* See @ref valueOr(key_type, Result) const for how this works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOr(Result fallback) const
|
||||
{
|
||||
return maybeValue<Result>().valueOr(fallback);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the value directly referred to by the
|
||||
* instance with user-specified error message.
|
||||
*
|
||||
* See @ref valueOrThrow(key_type, std::string_view) const for how this
|
||||
* works.
|
||||
*/
|
||||
template <typename Result>
|
||||
[[nodiscard]] Result
|
||||
valueOrThrow(std::string_view err) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return maybeValue<Result>().value();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
throw std::runtime_error(err.data());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interface for reading the array directly referred to by the
|
||||
* instance.
|
||||
*
|
||||
* See @ref array(key_type) const for how this works.
|
||||
*/
|
||||
[[nodiscard]] array_type
|
||||
array() const;
|
||||
|
||||
private:
|
||||
template <typename Return>
|
||||
[[nodiscard]] Return
|
||||
checkedAs(key_type key, boost::json::value const& value) const
|
||||
{
|
||||
using boost::json::value_to;
|
||||
|
||||
auto has_error = false;
|
||||
if constexpr (std::is_same_v<Return, bool>)
|
||||
{
|
||||
if (not value.is_bool())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (std::is_same_v<Return, std::string>)
|
||||
{
|
||||
if (not value.is_string())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (std::is_same_v<Return, double>)
|
||||
{
|
||||
if (not value.is_number())
|
||||
has_error = true;
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<Return, uint64_t> || std::is_convertible_v<Return, int64_t>)
|
||||
{
|
||||
if (not value.is_int64() && not value.is_uint64())
|
||||
has_error = true;
|
||||
}
|
||||
|
||||
if (has_error)
|
||||
throw std::runtime_error(
|
||||
"Type for key '" + key + "' is '" + std::string{to_string(value.kind())} + "' in JSON but requested '" +
|
||||
detail::typeName<Return>() + "'");
|
||||
|
||||
return value_to<Return>(value);
|
||||
}
|
||||
|
||||
std::optional<boost::json::value>
|
||||
lookup(key_type key) const;
|
||||
|
||||
write_cursor_type
|
||||
lookupForWrite(key_type key);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Simple configuration file reader.
|
||||
*
|
||||
* Reads the JSON file under specified path and creates a @ref Config object
|
||||
* from its contents.
|
||||
*/
|
||||
class ConfigReader final
|
||||
{
|
||||
public:
|
||||
static Config
|
||||
open(std::filesystem::path path);
|
||||
};
|
||||
|
||||
} // namespace clio
|
||||
164
src/config/detail/Helpers.h
Normal file
164
src/config/detail/Helpers.h
Normal file
@@ -0,0 +1,164 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Thrown when a KeyPath related error occurs
|
||||
*/
|
||||
struct KeyException : public ::std::logic_error
|
||||
{
|
||||
KeyException(::std::string msg) : ::std::logic_error{msg}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Thrown when a Store (config's storage) related error occurs.
|
||||
*/
|
||||
struct StoreException : public ::std::logic_error
|
||||
{
|
||||
StoreException(::std::string msg) : ::std::logic_error{msg}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Simple string tokenizer. Used by @ref Config.
|
||||
*
|
||||
* @tparam KeyType The type of key to use
|
||||
* @tparam Separator The separator character
|
||||
*/
|
||||
template <typename KeyType, char Separator>
|
||||
class Tokenizer final
|
||||
{
|
||||
using opt_key_t = std::optional<KeyType>;
|
||||
KeyType key_;
|
||||
KeyType token_{};
|
||||
std::queue<KeyType> tokens_{};
|
||||
|
||||
public:
|
||||
explicit Tokenizer(KeyType key) : key_{key}
|
||||
{
|
||||
if (key.empty())
|
||||
throw KeyException("Empty key");
|
||||
|
||||
for (auto const& c : key)
|
||||
{
|
||||
if (c == Separator)
|
||||
saveToken();
|
||||
else
|
||||
token_ += c;
|
||||
}
|
||||
|
||||
saveToken();
|
||||
}
|
||||
|
||||
[[nodiscard]] opt_key_t
|
||||
next()
|
||||
{
|
||||
if (tokens_.empty())
|
||||
return std::nullopt;
|
||||
auto token = tokens_.front();
|
||||
tokens_.pop();
|
||||
return std::make_optional(std::move(token));
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
saveToken()
|
||||
{
|
||||
if (token_.empty())
|
||||
throw KeyException("Empty token in key '" + key_ + "'.");
|
||||
tokens_.push(std::move(token_));
|
||||
token_ = {};
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static constexpr const char*
|
||||
typeName()
|
||||
{
|
||||
return typeid(T).name();
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<uint64_t>()
|
||||
{
|
||||
return "uint64_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<int64_t>()
|
||||
{
|
||||
return "int64_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<uint32_t>()
|
||||
{
|
||||
return "uint32_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<int32_t>()
|
||||
{
|
||||
return "int32_t";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<bool>()
|
||||
{
|
||||
return "bool";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<std::string>()
|
||||
{
|
||||
return "std::string";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<const char*>()
|
||||
{
|
||||
return "const char*";
|
||||
}
|
||||
|
||||
template <>
|
||||
constexpr const char*
|
||||
typeName<double>()
|
||||
{
|
||||
return "double";
|
||||
}
|
||||
|
||||
}; // namespace clio::detail
|
||||
@@ -1,5 +1,24 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
@@ -7,20 +26,20 @@
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
|
||||
/// This datastructure is used to keep track of the sequence of the most recent
|
||||
/// ledger validated by the network. There are two methods that will wait until
|
||||
/// certain conditions are met. This datastructure is able to be "stopped". When
|
||||
/// the datastructure is stopped, any threads currently waiting are unblocked.
|
||||
/// Any later calls to methods of this datastructure will not wait. Once the
|
||||
/// datastructure is stopped, the datastructure remains stopped for the rest of
|
||||
/// its lifetime.
|
||||
/**
|
||||
* @brief This datastructure is used to keep track of the sequence of the most recent ledger validated by the network.
|
||||
*
|
||||
* There are two methods that will wait until certain conditions are met. This datastructure is able to be "stopped".
|
||||
* When the datastructure is stopped, any threads currently waiting are unblocked.
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers
|
||||
{
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
|
||||
std::condition_variable cv_;
|
||||
|
||||
public:
|
||||
@@ -30,8 +49,11 @@ public:
|
||||
return std::make_shared<NetworkValidatedLedgers>();
|
||||
}
|
||||
|
||||
/// Notify the datastructure that idx has been validated by the network
|
||||
/// @param idx sequence validated by network
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network
|
||||
*
|
||||
* @param idx sequence validated by network
|
||||
*/
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
@@ -41,10 +63,13 @@ public:
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// Get most recently validated sequence. If no ledgers are known to have
|
||||
/// been validated, this function waits until the next ledger is validated
|
||||
/// @return sequence of most recently validated ledger. empty optional if
|
||||
/// the datastructure has been stopped
|
||||
/**
|
||||
* @brief Get most recently validated sequence.
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getMostRecent()
|
||||
{
|
||||
@@ -53,19 +78,18 @@ public:
|
||||
return max_;
|
||||
}
|
||||
|
||||
/// Waits for the sequence to be validated by the network
|
||||
/// @param sequence to wait for
|
||||
/// @return true if sequence was validated, false otherwise
|
||||
/// a return value of false means the datastructure has been stopped
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network
|
||||
*
|
||||
* @param sequence to wait for
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
bool
|
||||
waitUntilValidatedByNetwork(
|
||||
uint32_t sequence,
|
||||
std::optional<uint32_t> maxWaitMs = {})
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {})
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool {
|
||||
return (max_ && sequence <= *max_);
|
||||
};
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs)
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
else
|
||||
@@ -74,10 +98,13 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
/// Generic thread-safe queue with an optional maximum size
|
||||
/// Note, we can't use a lockfree queue here, since we need the ability to wait
|
||||
/// for an element to be added or removed from the queue. These waits are
|
||||
/// blocking calls.
|
||||
// TODO: does the note make sense? lockfree queues provide the same blocking behaviour just without mutex, don't they?
|
||||
/**
|
||||
* @brief Generic thread-safe queue with a max capacity
|
||||
*
|
||||
* @note (original note) We can't use a lockfree queue here, since we need the ability to wait for an element to be
|
||||
* added or removed from the queue. These waits are blocking calls.
|
||||
*/
|
||||
template <class T>
|
||||
class ThreadSafeQueue
|
||||
{
|
||||
@@ -85,75 +112,96 @@ class ThreadSafeQueue
|
||||
|
||||
mutable std::mutex m_;
|
||||
std::condition_variable cv_;
|
||||
std::optional<uint32_t> maxSize_;
|
||||
uint32_t maxSize_;
|
||||
|
||||
public:
|
||||
/// @param maxSize maximum size of the queue. Calls that would cause the
|
||||
/// queue to exceed this size will block until free space is available
|
||||
/**
|
||||
* @brief Create an instance of the queue
|
||||
*
|
||||
* @param maxSize maximum size of the queue. Calls that would cause the queue to exceed this size will block until
|
||||
* free space is available
|
||||
*/
|
||||
ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
|
||||
{
|
||||
}
|
||||
|
||||
/// Create a queue with no maximum size
|
||||
ThreadSafeQueue() = default;
|
||||
|
||||
/// @param elt element to push onto queue
|
||||
/// if maxSize is set, this method will block until free space is available
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
*
|
||||
* @param elt element to push onto queue
|
||||
*/
|
||||
void
|
||||
push(T const& elt)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
// if queue has a max size, wait until not full
|
||||
if (maxSize_)
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= maxSize_; });
|
||||
queue_.push(elt);
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// @param elt element to push onto queue. elt is moved from
|
||||
/// if maxSize is set, this method will block until free space is available
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
*
|
||||
* @param elt element to push onto queue. elt is moved from
|
||||
*/
|
||||
void
|
||||
push(T&& elt)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
// if queue has a max size, wait until not full
|
||||
if (maxSize_)
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= maxSize_; });
|
||||
queue_.push(std::move(elt));
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// @return element popped from queue. Will block until queue is non-empty
|
||||
/**
|
||||
* @brief Pop element from the queue
|
||||
*
|
||||
* Note: Will block until queue is non-empty
|
||||
*
|
||||
* @return element popped from queue
|
||||
*/
|
||||
T
|
||||
pop()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return !queue_.empty(); });
|
||||
|
||||
T ret = std::move(queue_.front());
|
||||
queue_.pop();
|
||||
// if queue has a max size, unblock any possible pushers
|
||||
if (maxSize_)
|
||||
cv_.notify_all();
|
||||
|
||||
cv_.notify_all();
|
||||
return ret;
|
||||
}
|
||||
/// @return element popped from queue. Will block until queue is non-empty
|
||||
|
||||
/**
|
||||
* @brief Attempt to pop an element
|
||||
*
|
||||
* @return element popped from queue or empty optional if queue was empty
|
||||
*/
|
||||
std::optional<T>
|
||||
tryPop()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
std::scoped_lock lck(m_);
|
||||
if (queue_.empty())
|
||||
return {};
|
||||
|
||||
T ret = std::move(queue_.front());
|
||||
queue_.pop();
|
||||
// if queue has a max size, unblock any possible pushers
|
||||
if (maxSize_)
|
||||
cv_.notify_all();
|
||||
|
||||
cv_.notify_all();
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
/// Parititions the uint256 keyspace into numMarkers partitions, each of equal
|
||||
/// size.
|
||||
/**
|
||||
* @brief Parititions the uint256 keyspace into numMarkers partitions, each of equal size.
|
||||
*
|
||||
* @param numMarkers total markers to partition for
|
||||
*/
|
||||
inline std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
@@ -171,5 +219,3 @@ getMarkers(size_t numMarkers)
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
|
||||
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
|
||||
259
src/etl/ETLService.cpp
Normal file
259
src/etl/ETLService.cpp
Normal file
@@ -0,0 +1,259 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLService.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
{
|
||||
if (finishSequence_ && startSequence > *finishSequence_)
|
||||
return {};
|
||||
|
||||
log_.debug() << "Starting etl pipeline";
|
||||
state_.isWriting = true;
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng || rng->maxSequence < startSequence - 1)
|
||||
{
|
||||
assert(false);
|
||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||
}
|
||||
|
||||
auto const begin = std::chrono::system_clock::now();
|
||||
auto extractors = std::vector<std::unique_ptr<ExtractorType>>{};
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i)
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
|
||||
|
||||
auto transformer = TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, startSequence, state_};
|
||||
transformer.waitTillFinished(); // suspend current thread until exit condition is met
|
||||
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
|
||||
|
||||
// wait for all of the extractors to stop
|
||||
for (auto& t : extractors)
|
||||
t->waitTillFinished();
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
log_.debug() << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
log_.debug() << "Stopping etl pipeline";
|
||||
return lastPublishedSeq;
|
||||
}
|
||||
|
||||
// Main loop of ETL.
|
||||
// The software begins monitoring the ledgers that are validated by the nework.
|
||||
// The member networkValidatedLedgers_ keeps track of the sequences of ledgers validated by the network.
|
||||
// Whenever a ledger is validated by the network, the software looks for that ledger in the database. Once the ledger is
|
||||
// found in the database, the software publishes that ledger to the ledgers stream. If a network validated ledger is not
|
||||
// found in the database after a certain amount of time, then the software attempts to take over responsibility of the
|
||||
// ETL process, where it writes new ledgers to the database. The software will relinquish control of the ETL process if
|
||||
// it detects that another process has taken over ETL.
|
||||
void
|
||||
ETLService::monitor()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng)
|
||||
{
|
||||
log_.info() << "Database is empty. Will download a ledger "
|
||||
"from the network.";
|
||||
std::optional<ripple::LedgerInfo> ledger;
|
||||
|
||||
if (startSequence_)
|
||||
{
|
||||
log_.info() << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
log_.info() << "Ledger " << *mostRecentValidated << " has been validated. "
|
||||
<< "Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "The wait for the next validated "
|
||||
<< "ledger has been aborted. "
|
||||
<< "Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (ledger)
|
||||
{
|
||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (startSequence_)
|
||||
log_.warn() << "start sequence specified but db is already populated";
|
||||
|
||||
log_.info() << "Database already populated. Picking up from the tip of history";
|
||||
cacheLoader_.load(rng->maxSequence);
|
||||
}
|
||||
|
||||
assert(rng);
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
log_.debug() << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
{
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
log_.info() << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
log_.warn() << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
log_.info() << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::monitorReadOnly()
|
||||
{
|
||||
log_.debug() << "Starting reporting in strict read only mode";
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
uint32_t latestSequence;
|
||||
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
latestSequence = *net;
|
||||
else
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
latestSequence = rng->maxSequence;
|
||||
}
|
||||
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence)
|
||||
{
|
||||
ledgerPublisher_.publish(latestSequence, {});
|
||||
latestSequence = latestSequence + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs first.
|
||||
// Even if we don't hear from rippled, if ledgers are being written to the db, we publish them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::run()
|
||||
{
|
||||
log_.info() << "Starting reporting etl";
|
||||
state_.isStopping = false;
|
||||
|
||||
doWork();
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::doWork()
|
||||
{
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("rippled: ETLService worker");
|
||||
|
||||
if (state_.isReadOnly)
|
||||
monitorReadOnly();
|
||||
else
|
||||
monitor();
|
||||
});
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
, networkValidatedLedgers_(ledgers)
|
||||
, cacheLoader_(config, ioc, backend, backend->cache())
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, subscriptions, state_)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.valueOr("read_only", state_.isReadOnly);
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
}
|
||||
239
src/etl/ETLService.h
Normal file
239
src/etl/ETLService.h
Normal file
@@ -0,0 +1,239 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/Source.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/CacheLoader.h>
|
||||
#include <etl/impl/ExtractionDataPipe.h>
|
||||
#include <etl/impl/Extractor.h>
|
||||
#include <etl/impl/LedgerFetcher.h>
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <etl/impl/LedgerPublisher.h>
|
||||
#include <etl/impl/Transformer.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
class SubscriptionManager;
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
* databases.
|
||||
*
|
||||
* Usually, multiple different processes share access to the same network accessible databases, in which case only one
|
||||
* such process is performing ETL and writing to the database. The other processes simply monitor the database for new
|
||||
* ledgers, and publish those ledgers to the various subscription streams. If a monitoring process determines that the
|
||||
* ETL writer has failed (no new ledgers written for some time), the process will attempt to become the ETL writer.
|
||||
*
|
||||
* If there are multiple monitoring processes that try to become the ETL writer at the same time, one will win out, and
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService
|
||||
{
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = clio::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = clio::detail::CacheLoader<Backend::LedgerCache>;
|
||||
using LedgerFetcherType = clio::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = clio::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = clio::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = clio::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using TransformerType = clio::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
std::shared_ptr<NetworkValidatedLedgersType> networkValidatedLedgers_;
|
||||
|
||||
std::uint32_t extractorThreads_ = 1;
|
||||
std::thread worker_;
|
||||
|
||||
CacheLoaderType cacheLoader_;
|
||||
LedgerFetcherType ledgerFetcher_;
|
||||
LedgerLoaderType ledgerLoader_;
|
||||
LedgerPublisherType ledgerPublisher_;
|
||||
|
||||
SystemState state_;
|
||||
|
||||
size_t numMarkers_ = 2;
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
size_t txnThreshold_ = 0;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of ETLService
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param balancer Load balancer to use
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers);
|
||||
|
||||
static std::shared_ptr<ETLService>
|
||||
make_ETLService(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
etl->run();
|
||||
|
||||
return etl;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stops components and joins worker thread
|
||||
*/
|
||||
~ETLService()
|
||||
{
|
||||
log_.info() << "onStop called";
|
||||
log_.debug() << "Stopping Reporting ETL";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
|
||||
log_.debug() << "Joined ETLService worker thread";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last ledger close, in seconds
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
{
|
||||
return ledgerPublisher_.lastCloseAgeSeconds();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get state of ETL as a JSON object
|
||||
*/
|
||||
boost::json::object
|
||||
getInfo() const
|
||||
{
|
||||
boost::json::object result;
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = state_.isWriting.load();
|
||||
result["read_only"] = state_.isReadOnly;
|
||||
auto last = ledgerPublisher_.getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(ledgerPublisher_.lastPublishAgeSeconds());
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
*
|
||||
* Extracts ledgers and writes them to the database, until a write conflict occurs (or the server shuts down).
|
||||
* @note database must already be populated when this function is called
|
||||
*
|
||||
* @param startSequence the first ledger to extract
|
||||
* @return the last ledger written to the database, if any
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence, int offset);
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers.
|
||||
*
|
||||
* Also monitor the database to see if any process is writing those ledgers.
|
||||
* This function is called when the application starts, and will only return when the application is shutting down.
|
||||
* If the software detects the database is empty, this function will call loadInitialLedger(). If the software
|
||||
* detects ledgers are not being written, this function calls runETLPipeline(). Otherwise, this function publishes
|
||||
* ledgers as they are written to the database.
|
||||
*/
|
||||
void
|
||||
monitor();
|
||||
|
||||
/**
|
||||
* @brief Monitor the database for newly written ledgers.
|
||||
*
|
||||
* Similar to the monitor(), except this function will never call runETLPipeline() or loadInitialLedger().
|
||||
* This function only publishes ledgers as they are written to the database.
|
||||
*/
|
||||
void
|
||||
monitorReadOnly();
|
||||
|
||||
/**
|
||||
* @return true if stopping; false otherwise
|
||||
*/
|
||||
bool
|
||||
isStopping()
|
||||
{
|
||||
return state_.isStopping;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of markers to use during the initial ledger download
|
||||
*
|
||||
* This is equivelent to the degree of parallelism during the initial ledger download.
|
||||
*
|
||||
* @return the number of markers
|
||||
*/
|
||||
std::uint32_t
|
||||
getNumMarkers()
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service
|
||||
*/
|
||||
void
|
||||
run();
|
||||
|
||||
/**
|
||||
* @brief Spawn the worker thread and start monitoring
|
||||
*/
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,719 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
class ETLLoadBalancer;
|
||||
class ETLSource;
|
||||
class ProbingETLSource;
|
||||
class SubscriptionManager;
|
||||
|
||||
/// This class manages a connection to a single ETL source. This is almost
|
||||
/// always a rippled node, but really could be another reporting node. This
|
||||
/// class subscribes to the ledgers and transactions_proposed streams of the
|
||||
/// associated rippled node, and keeps track of which ledgers the rippled node
|
||||
/// has. This class also has methods for extracting said ledgers. Lastly this
|
||||
/// class forwards transactions received on the transactions_proposed streams to
|
||||
/// any subscribers.
|
||||
class ForwardCache
|
||||
{
|
||||
using response_type = std::optional<boost::json::object>;
|
||||
|
||||
mutable std::atomic_bool stopping_ = false;
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::unordered_map<std::string, response_type> latestForwarded_;
|
||||
|
||||
boost::asio::io_context::strand strand_;
|
||||
boost::asio::steady_timer timer_;
|
||||
ETLSource const& source_;
|
||||
std::uint32_t duration_ = 10;
|
||||
|
||||
void
|
||||
clear();
|
||||
|
||||
public:
|
||||
ForwardCache(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
ETLSource const& source)
|
||||
: strand_(ioc), timer_(strand_), source_(source)
|
||||
{
|
||||
if (config.contains("cache") && !config.at("cache").is_array())
|
||||
throw std::runtime_error("ETLSource cache must be array");
|
||||
|
||||
if (config.contains("cache_duration") &&
|
||||
!config.at("cache_duration").is_int64())
|
||||
throw std::runtime_error(
|
||||
"ETLSource cache_duration must be a number");
|
||||
|
||||
duration_ = config.contains("cache_duration")
|
||||
? config.at("cache_duration").as_int64()
|
||||
: 10;
|
||||
|
||||
auto commands = config.contains("cache") ? config.at("cache").as_array()
|
||||
: boost::json::array{};
|
||||
|
||||
for (auto const& command : commands)
|
||||
{
|
||||
if (!command.is_string())
|
||||
throw std::runtime_error(
|
||||
"ETLSource forward command must be array of strings");
|
||||
|
||||
latestForwarded_[command.as_string().c_str()] = {};
|
||||
}
|
||||
}
|
||||
|
||||
// This is to be called every freshenDuration_ seconds.
|
||||
// It will request information from this etlSource, and
|
||||
// will populate the cache with the latest value. If the
|
||||
// request fails, it will evict that value from the cache.
|
||||
void
|
||||
freshen();
|
||||
|
||||
std::optional<boost::json::object>
|
||||
get(boost::json::object const& command) const;
|
||||
};
|
||||
|
||||
class ETLSource
|
||||
{
|
||||
public:
|
||||
virtual bool
|
||||
isConnected() const = 0;
|
||||
|
||||
virtual boost::json::object
|
||||
toJson() const = 0;
|
||||
|
||||
virtual void
|
||||
run() = 0;
|
||||
|
||||
virtual void
|
||||
pause() = 0;
|
||||
|
||||
virtual void
|
||||
resume() = 0;
|
||||
|
||||
virtual std::string
|
||||
toString() const = 0;
|
||||
|
||||
virtual bool
|
||||
hasLedger(uint32_t sequence) const = 0;
|
||||
|
||||
virtual std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects = true,
|
||||
bool getObjectNeighbors = false) = 0;
|
||||
|
||||
virtual bool
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly = false) = 0;
|
||||
|
||||
virtual std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual ~ETLSource()
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
friend ForwardCache;
|
||||
friend ProbingETLSource;
|
||||
|
||||
virtual std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
};
|
||||
|
||||
struct ETLSourceHooks
|
||||
{
|
||||
enum class Action { STOP, PROCEED };
|
||||
|
||||
std::function<Action(boost::beast::error_code)> onConnected;
|
||||
std::function<Action(boost::beast::error_code)> onDisconnected;
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class ETLSourceImpl : public ETLSource
|
||||
{
|
||||
std::string wsPort_;
|
||||
|
||||
std::string grpcPort_;
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
|
||||
boost::asio::ip::tcp::resolver resolver_;
|
||||
|
||||
boost::beast::flat_buffer readBuffer_;
|
||||
|
||||
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
|
||||
|
||||
std::string validatedLedgersRaw_;
|
||||
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
|
||||
|
||||
// beast::Journal journal_;
|
||||
|
||||
mutable std::mutex mtx_;
|
||||
|
||||
std::atomic_bool connected_{false};
|
||||
|
||||
// true if this ETL source is forwarding transactions received on the
|
||||
// transactions_proposed stream. There are usually multiple ETL sources,
|
||||
// so to avoid forwarding the same transaction multiple times, we only
|
||||
// forward from one particular ETL source at a time.
|
||||
std::atomic_bool forwardingStream_{false};
|
||||
|
||||
// The last time a message was received on the ledgers stream
|
||||
std::chrono::system_clock::time_point lastMsgTime_;
|
||||
mutable std::mutex lastMsgTimeMtx_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||
ETLLoadBalancer& balancer_;
|
||||
|
||||
ForwardCache forwardCache_;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
protected:
|
||||
Derived&
|
||||
derived()
|
||||
{
|
||||
return static_cast<Derived&>(*this);
|
||||
}
|
||||
|
||||
std::string ip_;
|
||||
|
||||
size_t numFailures_ = 0;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
|
||||
// used for retrying connections
|
||||
boost::asio::steady_timer timer_;
|
||||
|
||||
std::atomic_bool closing_{false};
|
||||
|
||||
std::atomic_bool paused_{false};
|
||||
|
||||
ETLSourceHooks hooks_;
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
|
||||
|
||||
auto const host = ip_;
|
||||
auto const port = wsPort_;
|
||||
|
||||
resolver_.async_resolve(host, port, [this](auto ec, auto results) {
|
||||
onResolve(ec, results);
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
~ETLSourceImpl()
|
||||
{
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
bool
|
||||
isConnected() const override
|
||||
{
|
||||
return connected_;
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point
|
||||
getLastMsgTime() const
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
return lastMsgTime_;
|
||||
}
|
||||
|
||||
void
|
||||
setLastMsgTime()
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
lastMsgTime_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
/// Create ETL source without gRPC endpoint
|
||||
/// Fetch ledger and load initial ledger will fail for this source
|
||||
/// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||
ETLSourceImpl(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: resolver_(boost::asio::make_strand(ioContext))
|
||||
, networkValidatedLedgers_(networkValidatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
, balancer_(balancer)
|
||||
, forwardCache_(config, ioContext, *this)
|
||||
, ioc_(ioContext)
|
||||
, timer_(ioContext)
|
||||
, hooks_(hooks)
|
||||
{
|
||||
if (config.contains("ip"))
|
||||
{
|
||||
auto ipJs = config.at("ip").as_string();
|
||||
ip_ = {ipJs.c_str(), ipJs.size()};
|
||||
}
|
||||
if (config.contains("ws_port"))
|
||||
{
|
||||
auto portjs = config.at("ws_port").as_string();
|
||||
wsPort_ = {portjs.c_str(), portjs.size()};
|
||||
}
|
||||
if (config.contains("grpc_port"))
|
||||
{
|
||||
auto portjs = config.at("grpc_port").as_string();
|
||||
grpcPort_ = {portjs.c_str(), portjs.size()};
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{
|
||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(
|
||||
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< "Exception while creating stub = " << e.what()
|
||||
<< " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @param sequence ledger sequence to check for
|
||||
/// @return true if this source has the desired ledger
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_)
|
||||
{
|
||||
if (sequence >= pair.first && sequence <= pair.second)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (sequence < pair.first)
|
||||
{
|
||||
// validatedLedgers_ is a sorted list of disjoint ranges
|
||||
// if the sequence comes before this range, the sequence will
|
||||
// come before all subsequent ranges
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// process the validated range received on the ledgers stream. set the
|
||||
/// appropriate member variable
|
||||
/// @param range validated range received on ledgers stream
|
||||
void
|
||||
setValidatedRange(std::string const& range)
|
||||
{
|
||||
std::vector<std::pair<uint32_t, uint32_t>> pairs;
|
||||
std::vector<std::string> ranges;
|
||||
boost::split(ranges, range, boost::is_any_of(","));
|
||||
for (auto& pair : ranges)
|
||||
{
|
||||
std::vector<std::string> minAndMax;
|
||||
|
||||
boost::split(minAndMax, pair, boost::is_any_of("-"));
|
||||
|
||||
if (minAndMax.size() == 1)
|
||||
{
|
||||
uint32_t sequence = std::stoll(minAndMax[0]);
|
||||
pairs.push_back(std::make_pair(sequence, sequence));
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(minAndMax.size() == 2);
|
||||
uint32_t min = std::stoll(minAndMax[0]);
|
||||
uint32_t max = std::stoll(minAndMax[1]);
|
||||
pairs.push_back(std::make_pair(min, max));
|
||||
}
|
||||
}
|
||||
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) {
|
||||
return left.first < right.first;
|
||||
});
|
||||
|
||||
// we only hold the lock here, to avoid blocking while string processing
|
||||
std::lock_guard lck(mtx_);
|
||||
validatedLedgers_ = std::move(pairs);
|
||||
validatedLedgersRaw_ = range;
|
||||
}
|
||||
|
||||
/// @return the validated range of this source
|
||||
/// @note this is only used by server_info
|
||||
std::string
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
|
||||
/// Fetch the specified ledger
|
||||
/// @param ledgerSequence sequence of the ledger to fetch
|
||||
/// @getObjects whether to get the account state diff between this ledger
|
||||
/// and the prior one
|
||||
/// @return the extracted data and the result status
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects = true,
|
||||
bool getObjectNeighbors = false) override;
|
||||
|
||||
std::string
|
||||
toString() const override
|
||||
{
|
||||
return "{ validated_ledger : " + getValidatedRange() +
|
||||
" , ip : " + ip_ + " , web socket port : " + wsPort_ +
|
||||
", grpc port : " + grpcPort_ + " }";
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
toJson() const override
|
||||
{
|
||||
boost::json::object res;
|
||||
res["validated_range"] = getValidatedRange();
|
||||
res["is_connected"] = std::to_string(isConnected());
|
||||
res["ip"] = ip_;
|
||||
res["ws_port"] = wsPort_;
|
||||
res["grpc_port"] = grpcPort_;
|
||||
auto last = getLastMsgTime();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
res["last_msg_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now() - getLastMsgTime())
|
||||
.count());
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Download a ledger in full
|
||||
/// @param ledgerSequence sequence of the ledger to download
|
||||
/// @param writeQueue queue to push downloaded ledger objects
|
||||
/// @return true if the download was successful
|
||||
bool
|
||||
loadInitialLedger(
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly = false) override;
|
||||
|
||||
/// Attempt to reconnect to the ETL source
|
||||
void
|
||||
reconnect(boost::beast::error_code ec);
|
||||
|
||||
/// Pause the source effectively stopping it from trying to reconnect
|
||||
void
|
||||
pause() override
|
||||
{
|
||||
paused_ = true;
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
/// Resume the source allowing it to reconnect again
|
||||
void
|
||||
resume() override
|
||||
{
|
||||
paused_ = false;
|
||||
derived().close(true);
|
||||
}
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onResolve(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type results);
|
||||
|
||||
/// Callback
|
||||
virtual void
|
||||
onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type
|
||||
endpoint) = 0;
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onHandshake(boost::beast::error_code ec);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onWrite(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onRead(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/// Handle the most recently received message
|
||||
/// @return true if the message was handled successfully. false on error
|
||||
bool
|
||||
handleMessage();
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
};
|
||||
|
||||
class PlainETLSource : public ETLSourceImpl<PlainETLSource>
|
||||
{
|
||||
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>>
|
||||
ws_;
|
||||
|
||||
public:
|
||||
PlainETLSource(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: ETLSourceImpl(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
std::move(hooks))
|
||||
, ws_(std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc)))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
override;
|
||||
|
||||
/// Close the websocket
|
||||
/// @param startAgain whether to reconnect
|
||||
void
|
||||
close(bool startAgain);
|
||||
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>&
|
||||
ws()
|
||||
{
|
||||
return *ws_;
|
||||
}
|
||||
};
|
||||
|
||||
class SslETLSource : public ETLSourceImpl<SslETLSource>
|
||||
{
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx_;
|
||||
|
||||
std::unique_ptr<boost::beast::websocket::stream<
|
||||
boost::beast::ssl_stream<boost::beast::tcp_stream>>>
|
||||
ws_;
|
||||
|
||||
public:
|
||||
SslETLSource(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
ETLSourceHooks hooks)
|
||||
: ETLSourceImpl(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
std::move(hooks))
|
||||
, sslCtx_(sslCtx)
|
||||
, ws_(std::make_unique<boost::beast::websocket::stream<
|
||||
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_),
|
||||
*sslCtx_))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
override;
|
||||
|
||||
void
|
||||
onSslHandshake(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint);
|
||||
|
||||
/// Close the websocket
|
||||
/// @param startAgain whether to reconnect
|
||||
void
|
||||
close(bool startAgain);
|
||||
|
||||
boost::beast::websocket::stream<
|
||||
boost::beast::ssl_stream<boost::beast::tcp_stream>>&
|
||||
ws()
|
||||
{
|
||||
return *ws_;
|
||||
}
|
||||
};
|
||||
|
||||
/// This class is used to manage connections to transaction processing processes
|
||||
/// This class spawns a listener for each etl source, which listens to messages
|
||||
/// on the ledgers stream (to keep track of which ledgers have been validated by
|
||||
/// the network, and the range of ledgers each etl source has). This class also
|
||||
/// allows requests for ledger data to be load balanced across all possible etl
|
||||
/// sources.
|
||||
class ETLLoadBalancer
|
||||
{
|
||||
private:
|
||||
std::vector<std::unique_ptr<ETLSource>> sources_;
|
||||
|
||||
std::uint32_t downloadRanges_ = 16;
|
||||
|
||||
public:
|
||||
ETLLoadBalancer(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||
|
||||
static std::shared_ptr<ETLLoadBalancer>
|
||||
make_ETLLoadBalancer(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
{
|
||||
return std::make_shared<ETLLoadBalancer>(
|
||||
config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
|
||||
~ETLLoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
|
||||
/// Load the initial ledger, writing data to the queue
|
||||
/// @param sequence sequence of ledger to download
|
||||
void
|
||||
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
|
||||
|
||||
/// Fetch data for a specific ledger. This function will continuously try
|
||||
/// to fetch data for the specified ledger until the fetch succeeds, the
|
||||
/// ledger is found in the database, or the server is shutting down.
|
||||
/// @param ledgerSequence sequence of ledger to fetch data for
|
||||
/// @param getObjects if true, fetch diff between specified ledger and
|
||||
/// previous
|
||||
/// @return the extracted data, if extraction was successful. If the ledger
|
||||
/// was found in the database or the server is shutting down, the optional
|
||||
/// will be empty
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
bool getObjectNeighbors);
|
||||
|
||||
/// Determine whether messages received on the transactions_proposed stream
|
||||
/// should be forwarded to subscribing clients. The server subscribes to
|
||||
/// transactions_proposed on multiple ETLSources, yet only forwards messages
|
||||
/// from one source at any given time (to avoid sending duplicate messages
|
||||
/// to clients).
|
||||
/// @param in ETLSource in question
|
||||
/// @return true if messages should be forwarded
|
||||
bool
|
||||
shouldPropagateTxnStream(ETLSource* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
assert(src);
|
||||
// We pick the first ETLSource encountered that is connected
|
||||
if (src->isConnected())
|
||||
{
|
||||
if (src.get() == in)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If no sources connected, then this stream has not been forwarded
|
||||
return true;
|
||||
}
|
||||
|
||||
boost::json::value
|
||||
toJson() const
|
||||
{
|
||||
boost::json::array ret;
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
ret.push_back(src->toJson());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Forward a JSON RPC request to a randomly selected rippled node
|
||||
/// @param request JSON-RPC request
|
||||
/// @return response received from rippled node
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
private:
|
||||
/// f is a function that takes an ETLSource as an argument and returns a
|
||||
/// bool. Attempt to execute f for one randomly chosen ETLSource that has
|
||||
/// the specified ledger. If f returns false, another randomly chosen
|
||||
/// ETLSource is used. The process repeats until f returns true.
|
||||
/// @param f function to execute. This function takes the ETL source as an
|
||||
/// argument, and returns a bool.
|
||||
/// @param ledgerSequence f is executed for each ETLSource that has this
|
||||
/// ledger
|
||||
/// @return true if f was eventually executed successfully. false if the
|
||||
/// ledger was found in the database or the server is shutting down
|
||||
template <class Func>
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
|
||||
#endif
|
||||
235
src/etl/LoadBalancer.cpp
Normal file
235
src/etl/LoadBalancer.cpp
Normal file
@@ -0,0 +1,235 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/src.hpp>
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
std::unique_ptr<Source>
|
||||
LoadBalancer::make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer)
|
||||
{
|
||||
auto src =
|
||||
std::make_unique<ProbingSource>(config, ioContext, backend, subscriptions, networkValidatedLedgers, balancer);
|
||||
|
||||
src->run();
|
||||
|
||||
return src;
|
||||
}
|
||||
|
||||
std::shared_ptr<LoadBalancer>
|
||||
LoadBalancer::make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
|
||||
LoadBalancer::LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
||||
{
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
else if (backend->fetchLedgerRange())
|
||||
downloadRanges_ = 4;
|
||||
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioContext, backend, subscriptions, nwvl, *this);
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
log_.info() << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
auto const success = execute(
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res)
|
||||
log_.error() << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
else
|
||||
response = std::move(data);
|
||||
|
||||
return res;
|
||||
},
|
||||
sequence);
|
||||
return {std::move(response), success};
|
||||
}
|
||||
|
||||
LoadBalancer::OptionalGetLedgerResponseType
|
||||
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
GetLedgerResponseType response;
|
||||
bool success = execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
|
||||
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
log.info() << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
log.warn() << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
},
|
||||
ledgerSequence);
|
||||
if (success)
|
||||
return response;
|
||||
else
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
bool
|
||||
LoadBalancer::shouldPropagateTxnStream(Source* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
assert(src);
|
||||
|
||||
// We pick the first Source encountered that is connected
|
||||
if (src->isConnected())
|
||||
return *src == *in;
|
||||
}
|
||||
|
||||
// If no sources connected, then this stream has not been forwarded
|
||||
return true;
|
||||
}
|
||||
|
||||
boost::json::value
|
||||
LoadBalancer::toJson() const
|
||||
{
|
||||
boost::json::array ret;
|
||||
for (auto& src : sources_)
|
||||
ret.push_back(src->toJson());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
log_.debug() << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
<< " - source = " << source->toString();
|
||||
// Originally, it was (source->hasLedger(ledgerSequence) || true)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
This || true is only needed when loading full history standalone */
|
||||
if (source->hasLedger(ledgerSequence))
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
log_.debug() << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
log_.info() << "Ledger sequence " << ledgerSequence << " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
161
src/etl/LoadBalancer.h
Normal file
161
src/etl/LoadBalancer.h
Normal file
@@ -0,0 +1,161 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
class Source;
|
||||
class ProbingSource;
|
||||
class SubscriptionManager;
|
||||
|
||||
/**
|
||||
* @brief This class is used to manage connections to transaction processing processes
|
||||
*
|
||||
* This class spawns a listener for each etl source, which listens to messages on the ledgers stream (to keep track of
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer
|
||||
{
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<Source>> sources_;
|
||||
std::uint32_t downloadRanges_ = 16;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the load balancer
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioContext io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param nwvl The network validated ledgers datastructure
|
||||
*/
|
||||
LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
|
||||
~LoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue
|
||||
*
|
||||
* @param sequence sequence of ledger to download
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger
|
||||
*
|
||||
* This function will continuously try to fetch data for the specified ledger until the fetch succeeds, the ledger
|
||||
* is found in the database, or the server is shutting down.
|
||||
*
|
||||
* @param ledgerSequence sequence of ledger to fetch data for
|
||||
* @param getObjects if true, fetch diff between specified ledger and previous
|
||||
* @return the extracted data, if extraction was successful. If the ledger was found in the database or the server
|
||||
* is shutting down, the optional will be empty
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors);
|
||||
|
||||
/**
|
||||
* @brief Determine whether messages received on the transactions_proposed stream should be forwarded to subscribing
|
||||
* clients.
|
||||
*
|
||||
* The server subscribes to transactions_proposed on multiple Sources, yet only forwards messages from one source at
|
||||
* any given time (to avoid sending duplicate messages to clients).
|
||||
*
|
||||
* @param in Source in question
|
||||
* @return true if messages should be forwarded
|
||||
*/
|
||||
bool
|
||||
shouldPropagateTxnStream(Source* in) const;
|
||||
|
||||
/**
|
||||
* @return JSON representation of the state of this load balancer
|
||||
*/
|
||||
boost::json::value
|
||||
toJson() const;
|
||||
|
||||
/**
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node
|
||||
*
|
||||
* @param request JSON-RPC request
|
||||
* @return response received from rippled node
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Execute a function on a randomly selected source
|
||||
*
|
||||
* @note f is a function that takes an Source as an argument and returns a bool.
|
||||
* Attempt to execute f for one randomly chosen Source that has the specified ledger. If f returns false, another
|
||||
* randomly chosen Source is used. The process repeats until f returns true.
|
||||
*
|
||||
* @param f function to execute. This function takes the ETL source as an argument, and returns a bool.
|
||||
* @param ledgerSequence f is executed for each Source that has this ledger
|
||||
* @return true if f was eventually executed successfully. false if the ledger was found in the database or the
|
||||
* server is shutting down
|
||||
*/
|
||||
template <class Func>
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
@@ -1,4 +1,22 @@
|
||||
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/protocol/STBase.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
@@ -26,25 +44,18 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE)
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
continue;
|
||||
|
||||
if (!owner)
|
||||
owner = ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
owner = ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
{
|
||||
ripple::STArray const& toAddNFTs =
|
||||
node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(finalIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
}
|
||||
@@ -62,32 +73,23 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// as rippled outputs all fields in final fields even if they were
|
||||
// not changed.
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields)
|
||||
.downcast<ripple::STObject>();
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (!previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
continue;
|
||||
|
||||
ripple::STArray const& toAddNFTs =
|
||||
previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(prevIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(prevIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
|
||||
ripple::STArray const& toAddFinalNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddFinalNFTs.begin(),
|
||||
toAddFinalNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,13 +104,11 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
std::inserter(tokenIDResult, tokenIDResult.begin()));
|
||||
if (tokenIDResult.size() == 1 && owner)
|
||||
return {
|
||||
{NFTTransactionsData(
|
||||
tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
|
||||
{NFTTransactionsData(tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenIDResult.front(), *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
|
||||
std::stringstream msg;
|
||||
msg << __func__ << " - unexpected NFTokenMint data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
@@ -116,49 +116,43 @@ std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
|
||||
std::vector<NFTTransactionsData> const txs = {
|
||||
NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
|
||||
std::vector<NFTTransactionsData> const txs = {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
|
||||
|
||||
// Determine who owned the token when it was burned by finding an
|
||||
// NFTokenPage that was deleted or modified that contains this
|
||||
// tokenID.
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE ||
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfCreatedNode)
|
||||
continue;
|
||||
|
||||
// NFT burn can result in an NFTokenPage being modified to no longer
|
||||
// include the target, or an NFTokenPage being deleted. If this is
|
||||
// modified, we want to look for the target in the fields prior to
|
||||
// modification. If deleted, it's possible that the page was modified
|
||||
// to remove the target NFT prior to the entire page being deleted. In
|
||||
// this case, we need to look in the PreviousFields. Otherwise, the
|
||||
// page was not modified prior to deleting and we need to look in the
|
||||
// FinalFields.
|
||||
// modification. If deleted, it's possible that the page was
|
||||
// modified to remove the target NFT prior to the entire page being
|
||||
// deleted. In this case, we need to look in the PreviousFields.
|
||||
// Otherwise, the page was not modified prior to deleting and we
|
||||
// need to look in the FinalFields.
|
||||
std::optional<ripple::STArray> prevNFTs;
|
||||
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||
{
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields)
|
||||
.downcast<ripple::STObject>();
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||
prevNFTs = node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
prevNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
|
||||
if (!prevNFTs)
|
||||
continue;
|
||||
|
||||
auto const nft = std::find_if(
|
||||
prevNFTs->begin(),
|
||||
prevNFTs->end(),
|
||||
[&tokenID](ripple::STObject const& candidate) {
|
||||
auto const nft =
|
||||
std::find_if(prevNFTs->begin(), prevNFTs->end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != prevNFTs->end())
|
||||
@@ -166,92 +160,74 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
txs,
|
||||
NFTsData(
|
||||
tokenID,
|
||||
ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
txMeta,
|
||||
true));
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
msg << __func__ << " - could not determine owner at burntime for tx "
|
||||
<< sttx.getTransactionID();
|
||||
msg << " - could not determine owner at burntime for tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenAcceptOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
// If we have the buy offer from this tx, we can determine the owner
|
||||
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||
// object.
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||
{
|
||||
auto const affectedBuyOffer = std::find_if(
|
||||
txMeta.getNodes().begin(),
|
||||
txMeta.getNodes().end(),
|
||||
[&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||
sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
auto const affectedBuyOffer =
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
});
|
||||
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
ripple::uint256 const tokenID =
|
||||
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
ripple::uint256 const tokenID = affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
|
||||
ripple::AccountID const owner =
|
||||
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
ripple::AccountID const owner = affectedBuyOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, owner, txMeta, false)};
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, NFTsData(tokenID, owner, txMeta, false)};
|
||||
}
|
||||
|
||||
// Otherwise we have to infer the new owner from the affected nodes.
|
||||
auto const affectedSellOffer = std::find_if(
|
||||
txMeta.getNodes().begin(),
|
||||
txMeta.getNodes().end(),
|
||||
[&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) ==
|
||||
sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
auto const affectedSellOffer =
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
});
|
||||
if (affectedSellOffer == txMeta.getNodes().end())
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
ripple::uint256 const tokenID =
|
||||
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
ripple::uint256 const tokenID = affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
|
||||
ripple::AccountID const seller =
|
||||
affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
ripple::AccountID const seller = affectedSellOffer->peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_PAGE ||
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfDeletedNode)
|
||||
continue;
|
||||
|
||||
ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid(
|
||||
node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
ripple::AccountID const nodeOwner =
|
||||
ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
if (nodeOwner == seller)
|
||||
continue;
|
||||
|
||||
@@ -265,12 +241,9 @@ getNFTokenAcceptOfferData(
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
}();
|
||||
|
||||
auto const nft = std::find_if(
|
||||
nfts.begin(),
|
||||
nfts.end(),
|
||||
[&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != nfts.end())
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
@@ -278,8 +251,7 @@ getNFTokenAcceptOfferData(
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
|
||||
<< sttx.getTransactionID();
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
|
||||
@@ -288,40 +260,28 @@ getNFTokenAcceptOfferData(
|
||||
// transaction using this feature. This transaction also never returns an
|
||||
// NFTsData because it does not change the state of an NFT itself.
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCancelOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
std::vector<NFTTransactionsData> txs;
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
|
||||
ripple::ltNFTOKEN_OFFER)
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER)
|
||||
continue;
|
||||
|
||||
ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldH256(ripple::sfNFTokenID);
|
||||
ripple::uint256 const tokenID =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldH256(ripple::sfNFTokenID);
|
||||
txs.emplace_back(tokenID, txMeta, sttx.getTransactionID());
|
||||
}
|
||||
|
||||
// Deduplicate any transactions based on tokenID/txIdx combo. Can't just
|
||||
// use txIdx because in this case one tx can cancel offers for several
|
||||
// NFTs.
|
||||
std::sort(
|
||||
txs.begin(),
|
||||
txs.end(),
|
||||
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID < b.tokenID &&
|
||||
a.transactionIndex < b.transactionIndex;
|
||||
});
|
||||
auto last = std::unique(
|
||||
txs.begin(),
|
||||
txs.end(),
|
||||
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID == b.tokenID &&
|
||||
a.transactionIndex == b.transactionIndex;
|
||||
});
|
||||
std::sort(txs.begin(), txs.end(), [](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID < b.tokenID && a.transactionIndex < b.transactionIndex;
|
||||
});
|
||||
auto last = std::unique(txs.begin(), txs.end(), [](NFTTransactionsData const& a, NFTTransactionsData const& b) {
|
||||
return a.tokenID == b.tokenID && a.transactionIndex == b.transactionIndex;
|
||||
});
|
||||
txs.erase(last, txs.end());
|
||||
return {txs, {}};
|
||||
}
|
||||
@@ -329,20 +289,13 @@ getNFTokenCancelOfferData(
|
||||
// This transaction never returns an NFTokensData because it does not
|
||||
// change the state of an NFT itself.
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCreateOfferData(
|
||||
ripple::TxMeta const& txMeta,
|
||||
ripple::STTx const& sttx)
|
||||
getNFTokenCreateOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
return {
|
||||
{NFTTransactionsData(
|
||||
sttx.getFieldH256(ripple::sfNFTokenID),
|
||||
txMeta,
|
||||
sttx.getTransactionID())},
|
||||
{}};
|
||||
return {{NFTTransactionsData(sttx.getFieldH256(ripple::sfNFTokenID), txMeta, sttx.getTransactionID())}, {}};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||
return {{}, {}};
|
||||
@@ -368,3 +321,20 @@ getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
return {{}, {}};
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob)
|
||||
{
|
||||
std::vector<NFTsData> nfts;
|
||||
ripple::STLedgerEntry const sle =
|
||||
ripple::STLedgerEntry(ripple::SerialIter{blob.data(), blob.size()}, ripple::uint256::fromVoid(key.data()));
|
||||
|
||||
if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
return nfts;
|
||||
|
||||
auto const owner = ripple::AccountID::fromVoid(key.data());
|
||||
for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens))
|
||||
nfts.emplace_back(node.getFieldH256(ripple::sfNFTokenID), seq, owner, node.getFieldVL(ripple::sfURI));
|
||||
|
||||
return nfts;
|
||||
}
|
||||
|
||||
37
src/etl/NFTHelpers.h
Normal file
37
src/etl/NFTHelpers.h
Normal file
@@ -0,0 +1,37 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from TX via ETLService
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from ledger object via loadInitialLedger
|
||||
*/
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
|
||||
@@ -1,190 +0,0 @@
|
||||
#include <etl/ProbingETLSource.h>
|
||||
|
||||
ProbingETLSource::ProbingETLSource(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
: ioc_{ioc}
|
||||
, sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<SslETLSource>(
|
||||
config,
|
||||
ioc,
|
||||
std::ref(sslCtx_),
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
make_SSLHooks())}
|
||||
, plainSrc_{make_shared<PlainETLSource>(
|
||||
config,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
nwvl,
|
||||
balancer,
|
||||
make_PlainHooks())}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::run()
|
||||
{
|
||||
sslSrc_->run();
|
||||
plainSrc_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::pause()
|
||||
{
|
||||
sslSrc_->pause();
|
||||
plainSrc_->pause();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingETLSource::resume()
|
||||
{
|
||||
sslSrc_->resume();
|
||||
plainSrc_->resume();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::isConnected() const
|
||||
{
|
||||
return currentSrc_ && currentSrc_->isConnected();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::hasLedger(uint32_t sequence) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return false;
|
||||
return currentSrc_->hasLedger(sequence);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
ProbingETLSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->toJson();
|
||||
}
|
||||
|
||||
std::string
|
||||
ProbingETLSource::toString() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return "{ probing }";
|
||||
return currentSrc_->toString();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingETLSource::loadInitialLedger(
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return false;
|
||||
return currentSrc_->loadInitialLedger(
|
||||
ledgerSequence, numMarkers, cacheOnly);
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ProbingETLSource::fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
bool getObjectNeighbors)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->fetchLedger(
|
||||
ledgerSequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingETLSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingETLSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
ETLSourceHooks
|
||||
ProbingETLSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Selected WSS as the main source: "
|
||||
<< currentSrc_->toString();
|
||||
}
|
||||
return ETLSourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
|
||||
ETLSourceHooks
|
||||
ProbingETLSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Selected Plain WS as the main source: "
|
||||
<< currentSrc_->toString();
|
||||
}
|
||||
return ETLSourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
return ETLSourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <etl/ETLSource.h>
|
||||
#include <mutex>
|
||||
|
||||
/// This ETLSource implementation attempts to connect over both secure websocket
|
||||
/// and plain websocket. First to connect pauses the other and the probing is
|
||||
/// considered done at this point. If however the connected source loses
|
||||
/// connection the probing is kickstarted again.
|
||||
class ProbingETLSource : public ETLSource
|
||||
{
|
||||
std::mutex mtx_;
|
||||
boost::asio::io_context& ioc_;
|
||||
boost::asio::ssl::context sslCtx_;
|
||||
std::shared_ptr<ETLSource> sslSrc_;
|
||||
std::shared_ptr<ETLSource> plainSrc_;
|
||||
std::shared_ptr<ETLSource> currentSrc_;
|
||||
|
||||
public:
|
||||
ProbingETLSource(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
ETLLoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{
|
||||
boost::asio::ssl::context::tlsv12});
|
||||
|
||||
~ProbingETLSource() = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
|
||||
void
|
||||
pause() override;
|
||||
|
||||
void
|
||||
resume() override;
|
||||
|
||||
bool
|
||||
isConnected() const override;
|
||||
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override;
|
||||
|
||||
boost::json::object
|
||||
toJson() const override;
|
||||
|
||||
std::string
|
||||
toString() const override;
|
||||
|
||||
bool
|
||||
loadInitialLedger(
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t numMarkers,
|
||||
bool cacheOnly = false) override;
|
||||
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects = true,
|
||||
bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
ETLSourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
|
||||
ETLSourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
|
||||
#endif
|
||||
202
src/etl/ProbingSource.cpp
Normal file
202
src/etl/ProbingSource.cpp
Normal file
@@ -0,0 +1,202 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
ProbingSource::ProbingSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
: sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<
|
||||
SslSource>(config, ioc, std::ref(sslCtx_), backend, subscriptions, nwvl, balancer, make_SSLHooks())}
|
||||
, plainSrc_{make_shared<PlainSource>(config, ioc, backend, subscriptions, nwvl, balancer, make_PlainHooks())}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
ProbingSource::run()
|
||||
{
|
||||
sslSrc_->run();
|
||||
plainSrc_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingSource::pause()
|
||||
{
|
||||
sslSrc_->pause();
|
||||
plainSrc_->pause();
|
||||
}
|
||||
|
||||
void
|
||||
ProbingSource::resume()
|
||||
{
|
||||
sslSrc_->resume();
|
||||
plainSrc_->resume();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingSource::isConnected() const
|
||||
{
|
||||
return currentSrc_ && currentSrc_->isConnected();
|
||||
}
|
||||
|
||||
bool
|
||||
ProbingSource::hasLedger(uint32_t sequence) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return false;
|
||||
return currentSrc_->hasLedger(sequence);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
ProbingSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
{
|
||||
boost::json::object sourcesJson = {
|
||||
{"ws", plainSrc_->toJson()},
|
||||
{"wss", sslSrc_->toJson()},
|
||||
};
|
||||
|
||||
return {
|
||||
{"probing", sourcesJson},
|
||||
};
|
||||
}
|
||||
return currentSrc_->toJson();
|
||||
}
|
||||
|
||||
std::string
|
||||
ProbingSource::toString() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return "{probing... ws: " + plainSrc_->toString() + ", wss: " + sslSrc_->toString() + "}";
|
||||
return currentSrc_->toString();
|
||||
}
|
||||
|
||||
boost::uuids::uuid
|
||||
ProbingSource::token() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return boost::uuids::nil_uuid();
|
||||
return currentSrc_->token();
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
ProbingSource::loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {{}, false};
|
||||
return currentSrc_->loadInitialLedger(ledgerSequence, numMarkers, cacheOnly);
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, ProbingSource::GetLedgerResponseType>
|
||||
ProbingSource::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
SourceHooks
|
||||
ProbingSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
log_.info() << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
|
||||
SourceHooks
|
||||
ProbingSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
log_.info() << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
124
src/etl/ProbingSource.h
Normal file
124
src/etl/ProbingSource.h
Normal file
@@ -0,0 +1,124 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
/**
|
||||
* @brief This Source implementation attempts to connect over both secure websocket and plain websocket.
|
||||
*
|
||||
* First to connect pauses the other and the probing is considered done at this point.
|
||||
* If however the connected source loses connection the probing is kickstarted again.
|
||||
*/
|
||||
class ProbingSource : public Source
|
||||
{
|
||||
public:
|
||||
// TODO: inject when unit tests will be written for ProbingSource
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::mutex mtx_;
|
||||
boost::asio::ssl::context sslCtx_;
|
||||
std::shared_ptr<Source> sslSrc_;
|
||||
std::shared_ptr<Source> plainSrc_;
|
||||
std::shared_ptr<Source> currentSrc_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the probing source
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param nwvl The network validated ledgers datastructure
|
||||
* @param balancer Load balancer to use
|
||||
* @param sslCtx The SSL context to use; defaults to tlsv12
|
||||
*/
|
||||
ProbingSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
|
||||
|
||||
~ProbingSource() = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
|
||||
void
|
||||
pause() override;
|
||||
|
||||
void
|
||||
resume() override;
|
||||
|
||||
bool
|
||||
isConnected() const override;
|
||||
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override;
|
||||
|
||||
boost::json::object
|
||||
toJson() const override;
|
||||
|
||||
std::string
|
||||
toString() const override;
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
|
||||
std::pair<grpc::Status, GetLedgerResponseType>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const override;
|
||||
|
||||
boost::uuids::uuid
|
||||
token() const override;
|
||||
|
||||
private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
SourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
|
||||
SourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
@@ -22,7 +22,7 @@ read-only mode. In read-only mode, the server does not perform ETL and simply
|
||||
publishes new ledgers as they are written to the database.
|
||||
If the database is not updated within a certain time period
|
||||
(currently hard coded at 20 seconds), clio will begin the ETL
|
||||
process and start writing to the database. Postgres will report an error when
|
||||
process and start writing to the database. The database will report an error when
|
||||
trying to write a record with a key that already exists. ETL uses this error to
|
||||
determine that another process is writing to the database, and subsequently
|
||||
falls back to a soft read-only mode. clio can also operate in strict
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,378 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/ETLSource.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
/**
|
||||
* Helper function for the ReportingETL, implemented in NFTHelpers.cpp, to
|
||||
* pull to-write data out of a transaction that relates to NFTs.
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
struct FormattedTransactionsData
|
||||
{
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
};
|
||||
class SubscriptionManager;
|
||||
|
||||
/**
|
||||
* This class is responsible for continuously extracting data from a
|
||||
* p2p node, and writing that data to the databases. Usually, multiple different
|
||||
* processes share access to the same network accessible databases, in which
|
||||
* case only one such process is performing ETL and writing to the database. The
|
||||
* other processes simply monitor the database for new ledgers, and publish
|
||||
* those ledgers to the various subscription streams. If a monitoring process
|
||||
* determines that the ETL writer has failed (no new ledgers written for some
|
||||
* time), the process will attempt to become the ETL writer. If there are
|
||||
* multiple monitoring processes that try to become the ETL writer at the same
|
||||
* time, one will win out, and the others will fall back to
|
||||
* monitoring/publishing. In this sense, this class dynamically transitions from
|
||||
* monitoring to writing and from writing to monitoring, based on the activity
|
||||
* of other processes running on different machines.
|
||||
*/
|
||||
class ReportingETL
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
|
||||
std::optional<std::uint32_t> onlineDeleteInterval_;
|
||||
std::uint32_t extractorThreads_ = 1;
|
||||
|
||||
enum class CacheLoadStyle { ASYNC, SYNC, NOT_AT_ALL };
|
||||
|
||||
CacheLoadStyle cacheLoadStyle_ = CacheLoadStyle::ASYNC;
|
||||
|
||||
// number of diffs to use to generate cursors to traverse the ledger in
|
||||
// parallel during initial cache download
|
||||
size_t numCacheDiffs_ = 32;
|
||||
// number of markers to use at one time to traverse the ledger in parallel
|
||||
// during initial cache download
|
||||
size_t numCacheMarkers_ = 48;
|
||||
// number of ledger objects to fetch concurrently per marker during cache
|
||||
// download
|
||||
size_t cachePageFetchSize_ = 512;
|
||||
// thread responsible for syncing the cache on startup
|
||||
std::thread cacheDownloader_;
|
||||
|
||||
std::thread worker_;
|
||||
boost::asio::io_context& ioContext_;
|
||||
|
||||
/// Strand to ensure that ledgers are published in order.
|
||||
/// If ETL is started far behind the network, ledgers will be written and
|
||||
/// published very rapidly. Monitoring processes will publish ledgers as
|
||||
/// they are written. However, to publish a ledger, the monitoring process
|
||||
/// needs to read all of the transactions for that ledger from the database.
|
||||
/// Reading the transactions from the database requires network calls, which
|
||||
/// can be slow. It is imperative however that the monitoring processes keep
|
||||
/// up with the writer, else the monitoring processes will not be able to
|
||||
/// detect if the writer failed. Therefore, publishing each ledger (which
|
||||
/// includes reading all of the transactions from the database) is done from
|
||||
/// the application wide asio io_service, and a strand is used to ensure
|
||||
/// ledgers are published in order
|
||||
boost::asio::io_context::strand publishStrand_;
|
||||
|
||||
/// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an
|
||||
/// arbitrary number of ETL sources and load balances ETL requests across
|
||||
/// those sources.
|
||||
|
||||
/// Mechanism for detecting when the network has validated a new ledger.
|
||||
/// This class provides a way to wait for a specific ledger to be validated
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
|
||||
|
||||
/// Whether the software is stopping
|
||||
std::atomic_bool stopping_ = false;
|
||||
/// Whether the software is performing online delete
|
||||
// TODO this needs to live in the database, so diff servers can coordinate
|
||||
// deletion
|
||||
std::atomic_bool deleting_ = false;
|
||||
|
||||
/// This variable controls the number of GetLedgerData calls that will be
|
||||
/// executed in parallel during the initial ledger download. GetLedgerData
|
||||
/// allows clients to page through a ledger over many RPC calls.
|
||||
/// GetLedgerData returns a marker that is used as an offset in a subsequent
|
||||
/// call. If numMarkers_ is greater than 1, there will be multiple chains of
|
||||
/// GetLedgerData calls iterating over different parts of the same ledger in
|
||||
/// parallel. This can dramatically speed up the time to download the
|
||||
/// initial ledger. However, a higher value for this member variable puts
|
||||
/// more load on the ETL source.
|
||||
size_t numMarkers_ = 2;
|
||||
|
||||
/// Whether the process is in strict read-only mode. In strict read-only
|
||||
/// mode, the process will never attempt to become the ETL writer, and will
|
||||
/// only publish ledgers as they are written to the database.
|
||||
bool readOnly_ = false;
|
||||
|
||||
/// Whether the process is writing to the database. Used by server_info
|
||||
std::atomic_bool writing_ = false;
|
||||
|
||||
/// Ledger sequence to start ETL from. If this is empty, ETL will start from
|
||||
/// the next ledger validated by the network. If this is set, and the
|
||||
/// database is already populated, an error is thrown.
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
|
||||
size_t txnThreshold_ = 0;
|
||||
|
||||
/// The time that the most recently published ledger was published. Used by
|
||||
/// server_info
|
||||
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
||||
|
||||
mutable std::shared_mutex publishTimeMtx_;
|
||||
|
||||
void
|
||||
setLastPublish()
|
||||
{
|
||||
std::unique_lock lck(publishTimeMtx_);
|
||||
lastPublish_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
/// The time that the most recently published ledger was closed.
|
||||
std::chrono::time_point<ripple::NetClock> lastCloseTime_;
|
||||
|
||||
mutable std::shared_mutex closeTimeMtx_;
|
||||
|
||||
void
|
||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||
{
|
||||
std::unique_lock lck(closeTimeMtx_);
|
||||
lastCloseTime_ = lastCloseTime;
|
||||
}
|
||||
|
||||
/// Download a ledger with specified sequence in full, via GetLedgerData,
|
||||
/// and write the data to the databases. This takes several minutes or
|
||||
/// longer.
|
||||
/// @param sequence the sequence of the ledger to download
|
||||
/// @return The ledger downloaded, with a full transaction and account state
|
||||
/// map
|
||||
std::optional<ripple::LedgerInfo>
|
||||
loadInitialLedger(uint32_t sequence);
|
||||
|
||||
/// Populates the cache by walking through the given ledger. Should only be
|
||||
/// called once. The default behavior is to return immediately and populate
|
||||
/// the cache in the background. This can be overridden via config
|
||||
/// parameter, to populate synchronously, or not at all
|
||||
void
|
||||
loadCache(uint32_t seq);
|
||||
|
||||
/// Run ETL. Extracts ledgers and writes them to the database, until a
|
||||
/// write conflict occurs (or the server shuts down).
|
||||
/// @note database must already be populated when this function is
|
||||
/// called
|
||||
/// @param startSequence the first ledger to extract
|
||||
/// @return the last ledger written to the database, if any
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence, int offset);
|
||||
|
||||
/// Monitor the network for newly validated ledgers. Also monitor the
|
||||
/// database to see if any process is writing those ledgers. This function
|
||||
/// is called when the application starts, and will only return when the
|
||||
/// application is shutting down. If the software detects the database is
|
||||
/// empty, this function will call loadInitialLedger(). If the software
|
||||
/// detects ledgers are not being written, this function calls
|
||||
/// runETLPipeline(). Otherwise, this function publishes ledgers as they are
|
||||
/// written to the database.
|
||||
void
|
||||
monitor();
|
||||
|
||||
/// Monitor the database for newly written ledgers.
|
||||
/// Similar to the monitor(), except this function will never call
|
||||
/// runETLPipeline() or loadInitialLedger(). This function only publishes
|
||||
/// ledgers as they are written to the database.
|
||||
void
|
||||
monitorReadOnly();
|
||||
|
||||
/// Extract data for a particular ledger from an ETL source. This function
|
||||
/// continously tries to extract the specified ledger (using all available
|
||||
/// ETL sources) until the extraction succeeds, or the server shuts down.
|
||||
/// @param sequence sequence of the ledger to extract
|
||||
/// @return ledger header and transaction+metadata blobs. Empty optional
|
||||
/// if the server is shutting down
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedgerData(uint32_t sequence);
|
||||
|
||||
/// Extract data for a particular ledger from an ETL source. This function
|
||||
/// continously tries to extract the specified ledger (using all available
|
||||
/// ETL sources) until the extraction succeeds, or the server shuts down.
|
||||
/// @param sequence sequence of the ledger to extract
|
||||
/// @return ledger header, transaction+metadata blobs, and all ledger
|
||||
/// objects created, modified or deleted between this ledger and the parent.
|
||||
/// Empty optional if the server is shutting down
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedgerDataAndDiff(uint32_t sequence);
|
||||
|
||||
/// Insert all of the extracted transactions into the ledger, returning
|
||||
/// transactions related to accounts, transactions related to NFTs, and
|
||||
/// NFTs themselves for later processsing.
|
||||
/// @param ledger ledger to insert transactions into
|
||||
/// @param data data extracted from an ETL source
|
||||
/// @return struct that contains the neccessary info to write to the
|
||||
/// account_transactions/account_tx and nft_token_transactions tables
|
||||
/// (mostly transaction hashes, corresponding nodestore hashes and affected
|
||||
/// accounts)
|
||||
FormattedTransactionsData
|
||||
insertTransactions(
|
||||
ripple::LedgerInfo const& ledger,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& data);
|
||||
|
||||
// TODO update this documentation
|
||||
/// Build the next ledger using the previous ledger and the extracted data.
|
||||
/// This function calls insertTransactions()
|
||||
/// @note rawData should be data that corresponds to the ledger immediately
|
||||
/// following parent
|
||||
/// @param parent the previous ledger
|
||||
/// @param rawData data extracted from an ETL source
|
||||
/// @return the newly built ledger and data to write to Postgres
|
||||
std::pair<ripple::LedgerInfo, bool>
|
||||
buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData);
|
||||
|
||||
/// Attempt to read the specified ledger from the database, and then publish
|
||||
/// that ledger to the ledgers stream.
|
||||
/// @param ledgerSequence the sequence of the ledger to publish
|
||||
/// @param maxAttempts the number of times to attempt to read the ledger
|
||||
/// from the database. 1 attempt per second
|
||||
/// @return whether the ledger was found in the database and published
|
||||
bool
|
||||
publishLedger(uint32_t ledgerSequence, std::optional<uint32_t> maxAttempts);
|
||||
|
||||
/// Publish the passed in ledger
|
||||
/// @param ledger the ledger to publish
|
||||
void
|
||||
publishLedger(ripple::LedgerInfo const& lgrInfo);
|
||||
|
||||
bool
|
||||
isStopping()
|
||||
{
|
||||
return stopping_;
|
||||
}
|
||||
|
||||
/// Get the number of markers to use during the initial ledger download.
|
||||
/// This is equivelent to the degree of parallelism during the initial
|
||||
/// ledger download
|
||||
/// @return the number of markers
|
||||
std::uint32_t
|
||||
getNumMarkers()
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
|
||||
/// start all of the necessary components and begin ETL
|
||||
void
|
||||
run()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << "Starting reporting etl";
|
||||
stopping_ = false;
|
||||
|
||||
doWork();
|
||||
}
|
||||
|
||||
void
|
||||
doWork();
|
||||
|
||||
public:
|
||||
ReportingETL(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgers> ledgers);
|
||||
|
||||
static std::shared_ptr<ReportingETL>
|
||||
make_ReportingETL(
|
||||
boost::json::object const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgers> ledgers)
|
||||
{
|
||||
auto etl = std::make_shared<ReportingETL>(
|
||||
config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
etl->run();
|
||||
|
||||
return etl;
|
||||
}
|
||||
|
||||
~ReportingETL()
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << "onStop called";
|
||||
BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL";
|
||||
stopping_ = true;
|
||||
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
if (cacheDownloader_.joinable())
|
||||
cacheDownloader_.join();
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
getInfo() const
|
||||
{
|
||||
boost::json::object result;
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = writing_.load();
|
||||
result["read_only"] = readOnly_;
|
||||
auto last = getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] =
|
||||
std::to_string(lastPublishAgeSeconds());
|
||||
return result;
|
||||
}
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const
|
||||
{
|
||||
std::shared_lock lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
lastPublishAgeSeconds() const
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now() - getLastPublish())
|
||||
.count();
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
{
|
||||
std::shared_lock lck(closeTimeMtx_);
|
||||
auto now = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
auto closeTime = lastCloseTime_.time_since_epoch().count();
|
||||
if (now < (rippleEpochStart + closeTime))
|
||||
return 0;
|
||||
return now - (rippleEpochStart + closeTime);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
819
src/etl/Source.cpp
Normal file
819
src/etl/Source.cpp
Normal file
@@ -0,0 +1,819 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
static boost::beast::websocket::stream_base::timeout
|
||||
make_TimeoutOption()
|
||||
{
|
||||
// See #289 for details.
|
||||
// TODO: investigate the issue and find if there is a solution other than
|
||||
// introducing artificial timeouts.
|
||||
if (true)
|
||||
{
|
||||
// The only difference between this and the suggested client role is
|
||||
// that idle_timeout is set to 20 instead of none()
|
||||
auto opt = boost::beast::websocket::stream_base::timeout{};
|
||||
opt.handshake_timeout = std::chrono::seconds(30);
|
||||
opt.idle_timeout = std::chrono::seconds(20);
|
||||
opt.keep_alive_pings = false;
|
||||
return opt;
|
||||
}
|
||||
else
|
||||
{
|
||||
return boost::beast::websocket::stream_base::timeout::suggested(boost::beast::role_type::client);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
if (connected_)
|
||||
hooks_.onDisconnected(ec);
|
||||
|
||||
connected_ = false;
|
||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
std::string err = ec.message();
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec.category() == boost::asio::error::get_ssl_category())
|
||||
{
|
||||
err = std::string(" (") + boost::lexical_cast<std::string>(ERR_GET_LIB(ec.value())) + "," +
|
||||
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
|
||||
// ERR_PACK /* crypto/err/err.h */
|
||||
char buf[128];
|
||||
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
|
||||
err += buf;
|
||||
|
||||
std::cout << err << std::endl;
|
||||
}
|
||||
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
log_.error() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
log_.trace() << "async_wait : ec = " << ec;
|
||||
derived().close(startAgain);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
|
||||
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
|
||||
derived().onConnect(ec, ep);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
derived().ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().next_layer().async_handshake(
|
||||
boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::onSslHandshake(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Perform the websocket handshake
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (auto action = hooks_.onConnected(ec); action == SourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object jv{
|
||||
{"command", "subscribe"}, {"streams", {"ledger", "manifests", "validations", "transactions_proposed"}}};
|
||||
std::string s = boost::json::serialize(jv);
|
||||
log_.trace() << "Sending subscribe stream message";
|
||||
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client");
|
||||
|
||||
req.set("X-User", "coro-client");
|
||||
}));
|
||||
|
||||
// Send the message
|
||||
derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onWrite(boost::beast::error_code ec, size_t bytesWritten)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
// if error or error reading message, start over
|
||||
if (ec)
|
||||
{
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
handleMessage();
|
||||
boost::beast::flat_buffer buffer;
|
||||
swap(readBuffer_, buffer);
|
||||
|
||||
log_.trace() << "calling async_read - " << toString();
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
bool
|
||||
SourceImpl<Derived>::handleMessage()
|
||||
{
|
||||
log_.trace() << toString();
|
||||
|
||||
setLastMsgTime();
|
||||
connected_ = true;
|
||||
try
|
||||
{
|
||||
std::string msg{static_cast<char const*>(readBuffer_.data().data()), readBuffer_.size()};
|
||||
log_.trace() << msg;
|
||||
boost::json::value raw = boost::json::parse(msg);
|
||||
log_.trace() << "parsed";
|
||||
boost::json::object response = raw.as_object();
|
||||
|
||||
uint32_t ledgerIndex = 0;
|
||||
if (response.contains("result"))
|
||||
{
|
||||
boost::json::object result = response["result"].as_object();
|
||||
if (result.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = result["ledger_index"].as_int64();
|
||||
}
|
||||
if (result.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = result["validated_ledgers"].as_string();
|
||||
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "ledgerClosed")
|
||||
{
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = response["ledger_index"].as_int64();
|
||||
}
|
||||
if (response.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = response["validated_ledgers"].as_string();
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (balancer_.shouldPropagateTxnStream(this))
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "validationReceived")
|
||||
{
|
||||
subscriptions_->forwardValidation(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "manifestReceived")
|
||||
{
|
||||
subscriptions_->forwardManifest(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " << toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move to detail
|
||||
class AsyncCallData
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
|
||||
std::unique_ptr<grpc::ClientContext> context_;
|
||||
|
||||
grpc::Status status_;
|
||||
unsigned char nextPrefix_;
|
||||
|
||||
std::string lastKey_;
|
||||
|
||||
public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
log_.debug() << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
}
|
||||
|
||||
enum class CallStatus { MORE, DONE, ERRORED };
|
||||
|
||||
CallStatus
|
||||
process(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
{
|
||||
log_.trace() << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
log_.error() << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
log_.error() << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
auto const numObjects = cur_->ledger_objects().objects_size();
|
||||
log_.debug() << "Writing " << numObjects << " objects";
|
||||
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (((unsigned char)obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
log_.debug() << "Wrote " << numObjects << " objects. Got more: " << (more ? "YES" : "NO");
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
|
||||
void
|
||||
call(std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub, grpc::CompletionQueue& cq)
|
||||
{
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
rpc->Finish(next_.get(), &status_, this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
getLastKey()
|
||||
{
|
||||
return lastKey_;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
SourceImpl<Derived>::loadInitialLedger(uint32_t sequence, uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!stub_)
|
||||
return {{}, false};
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
void* tag;
|
||||
bool ok = false;
|
||||
std::vector<AsyncCallData> calls;
|
||||
auto markers = getMarkers(numMarkers);
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
std::optional<ripple::uint256> nextMarker;
|
||||
|
||||
if (i + 1 < markers.size())
|
||||
nextMarker = markers[i + 1];
|
||||
|
||||
calls.emplace_back(sequence, markers[i], nextMarker);
|
||||
}
|
||||
|
||||
log_.debug() << "Starting data download for ledger " << sequence << ". Using source = " << toString();
|
||||
|
||||
for (auto& c : calls)
|
||||
c.call(stub_, cq);
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
size_t incr = 500000;
|
||||
size_t progress = incr;
|
||||
std::vector<std::string> edgeKeys;
|
||||
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok))
|
||||
{
|
||||
assert(tag);
|
||||
auto ptr = static_cast<AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
log_.error() << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
numFinished++;
|
||||
log_.debug() << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
|
||||
if (result == AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
log_.info() << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_.info() << "Finished loadInitialLedger. cache size = " << backend_->cache().size();
|
||||
return {std::move(edgeKeys), !abort};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
SourceImpl<Derived>::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
request.mutable_ledger()->set_sequence(ledgerSequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
request.set_get_objects(getObjects);
|
||||
request.set_get_object_neighbors(getObjectNeighbors);
|
||||
request.set_user("ETL");
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
log_.warn() << "SourceImpl::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString() << " status = " << status.error_message();
|
||||
}
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
log_.debug() << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
|
||||
return requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
log_.trace() << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!connected_)
|
||||
{
|
||||
log_.error() << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioc_};
|
||||
|
||||
log_.trace() << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
||||
|
||||
// Look up the domain name
|
||||
auto const results = resolver.async_resolve(ip_, wsPort_, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
ws->next_layer().expires_after(std::chrono::seconds(3));
|
||||
|
||||
log_.trace() << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
// and to tell rippled to charge the client IP for RPC
|
||||
// resources. See "secure_gateway" in
|
||||
//
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) {
|
||||
req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
}));
|
||||
log_.trace() << "client ip: " << clientIp;
|
||||
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
log_.trace() << "Sending request";
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(request)), yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
auto begin = static_cast<char const*>(buffer.data().data());
|
||||
auto end = begin + buffer.data().size();
|
||||
auto parsed = boost::json::parse(std::string(begin, end));
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
log_.error() << "Error parsing response: " << std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
log_.trace() << "Successfully forward request";
|
||||
|
||||
response = parsed.as_object();
|
||||
|
||||
response["forwarded"] = true;
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
568
src/etl/Source.h
Normal file
568
src/etl/Source.h
Normal file
@@ -0,0 +1,568 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <etl/impl/ForwardCache.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
|
||||
class LoadBalancer;
|
||||
class Source;
|
||||
class ProbingSource;
|
||||
class SubscriptionManager;
|
||||
|
||||
// TODO: we use Source so that we can store a vector of Sources
|
||||
// but we also use CRTP for implementation of the common logic - this is a bit strange because CRTP as used here is
|
||||
// supposed to be used instead of an abstract base.
|
||||
// Maybe we should rework this a bit. At this point there is not too much use in the CRTP implementation - we can move
|
||||
// things into the base class instead.
|
||||
|
||||
/**
|
||||
* @brief Base class for all ETL sources
|
||||
*/
|
||||
class Source
|
||||
{
|
||||
public:
|
||||
virtual bool
|
||||
isConnected() const = 0;
|
||||
|
||||
virtual boost::json::object
|
||||
toJson() const = 0;
|
||||
|
||||
virtual void
|
||||
run() = 0;
|
||||
|
||||
virtual void
|
||||
pause() = 0;
|
||||
|
||||
virtual void
|
||||
resume() = 0;
|
||||
|
||||
virtual std::string
|
||||
toString() const = 0;
|
||||
|
||||
virtual bool
|
||||
hasLedger(uint32_t sequence) const = 0;
|
||||
|
||||
virtual std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) = 0;
|
||||
|
||||
virtual std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false) = 0;
|
||||
|
||||
virtual std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
virtual boost::uuids::uuid
|
||||
token() const = 0;
|
||||
|
||||
virtual ~Source() = default;
|
||||
|
||||
bool
|
||||
operator==(Source const& other) const
|
||||
{
|
||||
return token() == other.token();
|
||||
}
|
||||
|
||||
protected:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
private:
|
||||
friend clio::detail::ForwardCache;
|
||||
friend ProbingSource;
|
||||
|
||||
virtual std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Hooks for source events such as connects and disconnects
|
||||
*/
|
||||
struct SourceHooks
|
||||
{
|
||||
enum class Action { STOP, PROCEED };
|
||||
|
||||
std::function<Action(boost::beast::error_code)> onConnected;
|
||||
std::function<Action(boost::beast::error_code)> onDisconnected;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Base implementation of shared source logic (using CRTP)
|
||||
*/
|
||||
template <class Derived>
|
||||
class SourceImpl : public Source
|
||||
{
|
||||
std::string wsPort_;
|
||||
std::string grpcPort_;
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
boost::asio::ip::tcp::resolver resolver_;
|
||||
boost::beast::flat_buffer readBuffer_;
|
||||
|
||||
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
|
||||
std::string validatedLedgersRaw_{"N/A"};
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
|
||||
|
||||
mutable std::mutex mtx_;
|
||||
std::atomic_bool connected_{false};
|
||||
|
||||
// true if this ETL source is forwarding transactions received on the transactions_proposed stream. There are
|
||||
// usually multiple ETL sources, so to avoid forwarding the same transaction multiple times, we only forward from
|
||||
// one particular ETL source at a time.
|
||||
std::atomic_bool forwardingStream_{false};
|
||||
|
||||
std::chrono::system_clock::time_point lastMsgTime_;
|
||||
mutable std::mutex lastMsgTimeMtx_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||
LoadBalancer& balancer_;
|
||||
|
||||
clio::detail::ForwardCache forwardCache_;
|
||||
boost::uuids::uuid uuid_;
|
||||
|
||||
protected:
|
||||
std::string ip_;
|
||||
size_t numFailures_ = 0;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
boost::asio::steady_timer timer_;
|
||||
|
||||
std::atomic_bool closing_{false};
|
||||
std::atomic_bool paused_{false};
|
||||
|
||||
SourceHooks hooks_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create ETL source without gRPC endpoint
|
||||
*
|
||||
* Fetch ledger and load initial ledger will fail for this source.
|
||||
* Primarly used in read-only mode, to monitor when ledgers are validated.
|
||||
*/
|
||||
SourceImpl(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
: resolver_(boost::asio::make_strand(ioContext))
|
||||
, networkValidatedLedgers_(networkValidatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
, balancer_(balancer)
|
||||
, forwardCache_(config, ioContext, *this)
|
||||
, ioc_(ioContext)
|
||||
, timer_(ioContext)
|
||||
, hooks_(hooks)
|
||||
{
|
||||
static boost::uuids::random_generator uuidGenerator;
|
||||
uuid_ = uuidGenerator();
|
||||
|
||||
ip_ = config.valueOr<std::string>("ip", {});
|
||||
wsPort_ = config.valueOr<std::string>("ws_port", {});
|
||||
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value)
|
||||
{
|
||||
grpcPort_ = *value;
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||
log_.debug() << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.debug() << "Exception while creating stub = " << e.what() << " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~SourceImpl()
|
||||
{
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
bool
|
||||
isConnected() const override
|
||||
{
|
||||
return connected_;
|
||||
}
|
||||
|
||||
boost::uuids::uuid
|
||||
token() const override
|
||||
{
|
||||
return uuid_;
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point
|
||||
getLastMsgTime() const
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
return lastMsgTime_;
|
||||
}
|
||||
|
||||
void
|
||||
setLastMsgTime()
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
lastMsgTime_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
/**
|
||||
* @param sequence ledger sequence to check for
|
||||
* @return true if this source has the desired ledger
|
||||
*/
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_)
|
||||
{
|
||||
if (sequence >= pair.first && sequence <= pair.second)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (sequence < pair.first)
|
||||
{
|
||||
// validatedLedgers_ is a sorted list of disjoint ranges
|
||||
// if the sequence comes before this range, the sequence will
|
||||
// come before all subsequent ranges
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Process the validated range received on the ledgers stream. set the appropriate member variable
|
||||
*
|
||||
* @param range validated range received on ledgers stream
|
||||
*/
|
||||
void
|
||||
setValidatedRange(std::string const& range)
|
||||
{
|
||||
std::vector<std::pair<uint32_t, uint32_t>> pairs;
|
||||
std::vector<std::string> ranges;
|
||||
boost::split(ranges, range, boost::is_any_of(","));
|
||||
for (auto& pair : ranges)
|
||||
{
|
||||
std::vector<std::string> minAndMax;
|
||||
|
||||
boost::split(minAndMax, pair, boost::is_any_of("-"));
|
||||
|
||||
if (minAndMax.size() == 1)
|
||||
{
|
||||
uint32_t sequence = std::stoll(minAndMax[0]);
|
||||
pairs.push_back(std::make_pair(sequence, sequence));
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(minAndMax.size() == 2);
|
||||
uint32_t min = std::stoll(minAndMax[0]);
|
||||
uint32_t max = std::stoll(minAndMax[1]);
|
||||
pairs.push_back(std::make_pair(min, max));
|
||||
}
|
||||
}
|
||||
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; });
|
||||
|
||||
// we only hold the lock here, to avoid blocking while string processing
|
||||
std::lock_guard lck(mtx_);
|
||||
validatedLedgers_ = std::move(pairs);
|
||||
validatedLedgersRaw_ = range;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the validated range of this source
|
||||
* @note this is only used by server_info
|
||||
*/
|
||||
std::string
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Fetch the specified ledger
|
||||
*
|
||||
* @param ledgerSequence sequence of the ledger to fetch @getObjects whether to get the account state diff between
|
||||
* this ledger and the prior one
|
||||
* @return the extracted data and the result status
|
||||
*/
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
/**
|
||||
* @brief Produces a human-readable string with info about the source
|
||||
*/
|
||||
std::string
|
||||
toString() const override
|
||||
{
|
||||
return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ + ", web socket port: " + wsPort_ +
|
||||
", grpc port: " + grpcPort_ + "}";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Produces stats for this source in a json object
|
||||
* @return json object with stats
|
||||
*/
|
||||
boost::json::object
|
||||
toJson() const override
|
||||
{
|
||||
boost::json::object res;
|
||||
|
||||
res["validated_range"] = getValidatedRange();
|
||||
res["is_connected"] = std::to_string(isConnected());
|
||||
res["ip"] = ip_;
|
||||
res["ws_port"] = wsPort_;
|
||||
res["grpc_port"] = grpcPort_;
|
||||
|
||||
auto last = getLastMsgTime();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
res["last_msg_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - getLastMsgTime())
|
||||
.count());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Download a ledger in full
|
||||
*
|
||||
* @param ledgerSequence sequence of the ledger to download
|
||||
* @param writeQueue queue to push downloaded ledger objects
|
||||
* @return true if the download was successful
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
|
||||
/**
|
||||
* @brief Attempt to reconnect to the ETL source
|
||||
*/
|
||||
void
|
||||
reconnect(boost::beast::error_code ec);
|
||||
|
||||
/**
|
||||
* @brief Pause the source effectively stopping it from trying to reconnect
|
||||
*/
|
||||
void
|
||||
pause() override
|
||||
{
|
||||
paused_ = true;
|
||||
derived().close(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resume the source allowing it to reconnect again
|
||||
*/
|
||||
void
|
||||
resume() override
|
||||
{
|
||||
paused_ = false;
|
||||
derived().close(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Callback for resolving the server host
|
||||
*/
|
||||
void
|
||||
onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results);
|
||||
|
||||
/**
|
||||
* @brief Callback for connection to the server
|
||||
*/
|
||||
virtual void
|
||||
onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) = 0;
|
||||
|
||||
/**
|
||||
* @brief Callback for handshake with the server
|
||||
*/
|
||||
void
|
||||
onHandshake(boost::beast::error_code ec);
|
||||
|
||||
/**
|
||||
* @brief Callback for writing data
|
||||
*/
|
||||
void
|
||||
onWrite(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/**
|
||||
* @brief Callback for data available to read
|
||||
*/
|
||||
void
|
||||
onRead(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/**
|
||||
* @brief Handle the most recently received message
|
||||
* @return true if the message was handled successfully. false on error
|
||||
*/
|
||||
bool
|
||||
handleMessage();
|
||||
|
||||
/**
|
||||
* @brief Forward a request to rippled
|
||||
* @return response wrapped in an optional on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const override;
|
||||
|
||||
protected:
|
||||
Derived&
|
||||
derived()
|
||||
{
|
||||
return static_cast<Derived&>(*this);
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
log_.trace() << toString();
|
||||
|
||||
auto const host = ip_;
|
||||
auto const port = wsPort_;
|
||||
|
||||
resolver_.async_resolve(host, port, [this](auto ec, auto results) { onResolve(ec, results); });
|
||||
}
|
||||
};
|
||||
|
||||
class PlainSource : public SourceImpl<PlainSource>
|
||||
{
|
||||
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>> ws_;
|
||||
|
||||
public:
|
||||
PlainSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, nwvl, balancer, std::move(hooks))
|
||||
, ws_(std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc)))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Callback for connection to the server
|
||||
*/
|
||||
void
|
||||
onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
override;
|
||||
|
||||
/**
|
||||
* @brief Close the websocket
|
||||
* @param startAgain whether to reconnect
|
||||
*/
|
||||
void
|
||||
close(bool startAgain);
|
||||
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>&
|
||||
ws()
|
||||
{
|
||||
return *ws_;
|
||||
}
|
||||
};
|
||||
|
||||
class SslSource : public SourceImpl<SslSource>
|
||||
{
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx_;
|
||||
|
||||
std::unique_ptr<boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>> ws_;
|
||||
|
||||
public:
|
||||
SslSource(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, nwvl, balancer, std::move(hooks))
|
||||
, sslCtx_(sslCtx)
|
||||
, ws_(std::make_unique<boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_),
|
||||
*sslCtx_))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Callback for connection to the server
|
||||
*/
|
||||
void
|
||||
onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
override;
|
||||
|
||||
/**
|
||||
* @brief Callback for SSL handshake completion
|
||||
*/
|
||||
void
|
||||
onSslHandshake(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint);
|
||||
|
||||
/**
|
||||
* @brief Close the websocket
|
||||
* @param startAgain whether to reconnect
|
||||
*/
|
||||
void
|
||||
close(bool startAgain);
|
||||
|
||||
boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>&
|
||||
ws()
|
||||
{
|
||||
return *ws_;
|
||||
}
|
||||
};
|
||||
50
src/etl/SystemState.h
Normal file
50
src/etl/SystemState.h
Normal file
@@ -0,0 +1,50 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
struct SystemState
|
||||
{
|
||||
/**
|
||||
* @brief Whether the process is in strict read-only mode
|
||||
*
|
||||
* In strict read-only mode, the process will never attempt to become the ETL writer, and will only publish ledgers
|
||||
* as they are written to the database.
|
||||
*/
|
||||
bool isReadOnly = false;
|
||||
|
||||
/**
|
||||
* @brief Whether the process is writing to the database.
|
||||
*
|
||||
* Used by server_info
|
||||
*/
|
||||
std::atomic_bool isWriting = false;
|
||||
|
||||
/**
|
||||
* @brief Whether the software is stopping
|
||||
*/
|
||||
std::atomic_bool isStopping = false;
|
||||
|
||||
/**
|
||||
* @brief Whether a write conflict was detected
|
||||
*/
|
||||
std::atomic_bool writeConflict = false;
|
||||
};
|
||||
434
src/etl/impl/CacheLoader.h
Normal file
434
src/etl/impl/CacheLoader.h
Normal file
@@ -0,0 +1,434 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Cache loading interface
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CacheLoader
|
||||
{
|
||||
enum class LoadStyle { ASYNC, SYNC, NOT_AT_ALL };
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<boost::asio::io_context> ioContext_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
LoadStyle cacheLoadStyle_ = LoadStyle::ASYNC;
|
||||
|
||||
// number of diffs to use to generate cursors to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheDiffs_ = 32;
|
||||
|
||||
// number of markers to use at one time to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheMarkers_ = 48;
|
||||
|
||||
// number of ledger objects to fetch concurrently per marker during cache download
|
||||
size_t cachePageFetchSize_ = 512;
|
||||
|
||||
struct ClioPeer
|
||||
{
|
||||
std::string ip;
|
||||
int port;
|
||||
};
|
||||
|
||||
std::vector<ClioPeer> clioPeers_;
|
||||
|
||||
std::thread thread_;
|
||||
std::atomic_bool stopping_ = false;
|
||||
|
||||
public:
|
||||
CacheLoader(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& ledgerCache)
|
||||
: ioContext_{std::ref(ioc)}, backend_{backend}, cache_{ledgerCache}
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
auto const cache = config.section("cache");
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry)
|
||||
{
|
||||
if (boost::iequals(*entry, "sync"))
|
||||
cacheLoadStyle_ = LoadStyle::SYNC;
|
||||
if (boost::iequals(*entry, "async"))
|
||||
cacheLoadStyle_ = LoadStyle::ASYNC;
|
||||
if (boost::iequals(*entry, "none") or boost::iequals(*entry, "no"))
|
||||
cacheLoadStyle_ = LoadStyle::NOT_AT_ALL;
|
||||
}
|
||||
|
||||
numCacheDiffs_ = cache.valueOr<size_t>("num_diffs", numCacheDiffs_);
|
||||
numCacheMarkers_ = cache.valueOr<size_t>("num_markers", numCacheMarkers_);
|
||||
cachePageFetchSize_ = cache.valueOr<size_t>("page_fetch_size", cachePageFetchSize_);
|
||||
|
||||
if (auto peers = cache.maybeArray("peers"); peers)
|
||||
{
|
||||
for (auto const& peer : *peers)
|
||||
{
|
||||
auto ip = peer.value<std::string>("ip");
|
||||
auto port = peer.value<uint32_t>("port");
|
||||
|
||||
// todo: use emplace_back when clang is ready
|
||||
clioPeers_.push_back({ip, port});
|
||||
}
|
||||
|
||||
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
std::shuffle(std::begin(clioPeers_), std::end(clioPeers_), std::default_random_engine(seed));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~CacheLoader()
|
||||
{
|
||||
stop();
|
||||
if (thread_.joinable())
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Populates the cache by walking through the given ledger.
|
||||
*
|
||||
* Should only be called once. The default behavior is to return immediately and populate the cache in the
|
||||
* background. This can be overridden via config parameter, to populate synchronously, or not at all.
|
||||
*/
|
||||
void
|
||||
load(uint32_t seq)
|
||||
{
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL)
|
||||
{
|
||||
cache_.get().setDisabled();
|
||||
log_.warn() << "Cache is disabled. Not loading";
|
||||
return;
|
||||
}
|
||||
|
||||
if (cache_.get().isFull())
|
||||
{
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (clioPeers_.size() > 0)
|
||||
{
|
||||
boost::asio::spawn(ioContext_.get(), [this, seq](boost::asio::yield_context yield) {
|
||||
for (auto const& peer : clioPeers_)
|
||||
{
|
||||
// returns true on success
|
||||
if (loadCacheFromClioPeer(seq, peer.ip, std::to_string(peer.port), yield))
|
||||
return;
|
||||
}
|
||||
|
||||
// if we couldn't successfully load from any peers, load from db
|
||||
loadCacheFromDb(seq);
|
||||
});
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
loadCacheFromDb(seq);
|
||||
}
|
||||
|
||||
// If loading synchronously, poll cache until full
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull())
|
||||
{
|
||||
log_.debug() << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||
if (cache_.get().isFull())
|
||||
log_.info() << "Cache is full. Cache size = " << cache_.get().size();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
stop()
|
||||
{
|
||||
stopping_ = true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
loadCacheFromClioPeer(
|
||||
uint32_t ledgerIndex,
|
||||
std::string const& ip,
|
||||
std::string const& port,
|
||||
boost::asio::yield_context& yield)
|
||||
{
|
||||
log_.info() << "Loading cache from peer. ip = " << ip << " . port = " << port;
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioContext_.get()};
|
||||
|
||||
log_.trace() << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioContext_.get());
|
||||
|
||||
// Look up the domain name
|
||||
auto const results = resolver.async_resolve(ip, port, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
log_.trace() << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
return false;
|
||||
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip, "/", yield[ec]);
|
||||
if (ec)
|
||||
return false;
|
||||
|
||||
std::optional<boost::json::value> marker;
|
||||
|
||||
log_.trace() << "Sending request";
|
||||
auto getRequest = [&](auto marker) {
|
||||
boost::json::object request = {
|
||||
{"command", "ledger_data"},
|
||||
{"ledger_index", ledgerIndex},
|
||||
{"binary", true},
|
||||
{"out_of_order", true},
|
||||
{"limit", 2048}};
|
||||
|
||||
if (marker)
|
||||
request["marker"] = *marker;
|
||||
return request;
|
||||
};
|
||||
|
||||
bool started = false;
|
||||
size_t numAttempts = 0;
|
||||
do
|
||||
{
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(getRequest(marker))), yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << "error writing = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << "error reading = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
auto raw = beast::buffers_to_string(buffer.data());
|
||||
auto parsed = boost::json::parse(raw);
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
log_.error() << "Error parsing response: " << raw;
|
||||
return false;
|
||||
}
|
||||
log_.trace() << "Successfully parsed response " << parsed;
|
||||
|
||||
if (auto const& response = parsed.as_object(); response.contains("error"))
|
||||
{
|
||||
log_.error() << "Response contains error: " << response;
|
||||
auto const& err = response.at("error");
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound")
|
||||
{
|
||||
++numAttempts;
|
||||
if (numAttempts >= 5)
|
||||
{
|
||||
log_.error() << " ledger not found at peer after 5 attempts. "
|
||||
"peer = "
|
||||
<< ip << " ledger = " << ledgerIndex
|
||||
<< ". Check your config and the health of the peer";
|
||||
return false;
|
||||
}
|
||||
log_.warn() << "Ledger not found. ledger = " << ledgerIndex << ". Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
continue;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
started = true;
|
||||
auto const& response = parsed.as_object()["result"].as_object();
|
||||
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool())
|
||||
{
|
||||
log_.error() << "cache not full for clio node. ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
if (response.contains("marker"))
|
||||
marker = response.at("marker");
|
||||
else
|
||||
marker = {};
|
||||
|
||||
auto const& state = response.at("state").as_array();
|
||||
|
||||
std::vector<Backend::LedgerObject> objects;
|
||||
objects.reserve(state.size());
|
||||
for (auto const& ledgerObject : state)
|
||||
{
|
||||
auto const& obj = ledgerObject.as_object();
|
||||
|
||||
Backend::LedgerObject stateObject = {};
|
||||
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str()))
|
||||
{
|
||||
log_.error() << "failed to parse object id";
|
||||
return false;
|
||||
}
|
||||
boost::algorithm::unhex(obj.at("data").as_string().c_str(), std::back_inserter(stateObject.blob));
|
||||
objects.push_back(std::move(stateObject));
|
||||
}
|
||||
cache_.get().update(objects, ledgerIndex, true);
|
||||
|
||||
if (marker)
|
||||
log_.debug() << "At marker " << *marker;
|
||||
} while (marker || !started);
|
||||
|
||||
log_.info() << "Finished downloading ledger from clio node. ip = " << ip;
|
||||
|
||||
cache_.get().setFull();
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Encountered exception : " << e.what() << " - ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
loadCacheFromDb(uint32_t seq)
|
||||
{
|
||||
std::vector<Backend::LedgerObject> diff;
|
||||
std::vector<std::optional<ripple::uint256>> cursors;
|
||||
|
||||
auto append = [](auto&& a, auto&& b) { a.insert(std::end(a), std::begin(b), std::end(b)); };
|
||||
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i)
|
||||
{
|
||||
append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq - i, yield);
|
||||
}));
|
||||
}
|
||||
|
||||
std::sort(diff.begin(), diff.end(), [](auto a, auto b) {
|
||||
return a.key < b.key || (a.key == b.key && a.blob.size() < b.blob.size());
|
||||
});
|
||||
|
||||
diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end());
|
||||
|
||||
cursors.push_back({});
|
||||
for (auto& obj : diff)
|
||||
if (obj.blob.size())
|
||||
cursors.push_back({obj.key});
|
||||
cursors.push_back({});
|
||||
|
||||
std::stringstream cursorStr;
|
||||
for (auto& c : cursors)
|
||||
if (c)
|
||||
cursorStr << ripple::strHex(*c) << ", ";
|
||||
|
||||
log_.info() << "Loading cache. num cursors = " << cursors.size() - 1;
|
||||
log_.trace() << "cursors = " << cursorStr.str();
|
||||
|
||||
thread_ = std::thread{[this, seq, cursors]() {
|
||||
auto startTime = std::chrono::system_clock::now();
|
||||
auto markers = std::make_shared<std::atomic_int>(0);
|
||||
auto numRemaining = std::make_shared<std::atomic_int>(cursors.size() - 1);
|
||||
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
||||
{
|
||||
auto const start = cursors[i];
|
||||
auto const end = cursors[i + 1];
|
||||
|
||||
markers->wait(numCacheMarkers_);
|
||||
++(*markers);
|
||||
|
||||
boost::asio::spawn(
|
||||
ioContext_.get(),
|
||||
[this, seq, start, end, numRemaining, startTime, markers](boost::asio::yield_context yield) {
|
||||
std::optional<ripple::uint256> cursor = start;
|
||||
std::string cursorStr =
|
||||
cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(Backend::firstKey);
|
||||
log_.debug() << "Starting a cursor: " << cursorStr << " markers = " << *markers;
|
||||
|
||||
while (not stopping_)
|
||||
{
|
||||
auto res = Backend::retryOnTimeout([this, seq, &cursor, &yield]() {
|
||||
return backend_->fetchLedgerPage(cursor, seq, cachePageFetchSize_, false, yield);
|
||||
});
|
||||
|
||||
cache_.get().update(res.objects, seq, true);
|
||||
|
||||
if (!res.cursor || (end && *(res.cursor) > *end))
|
||||
break;
|
||||
|
||||
log_.trace() << "Loading cache. cache size = " << cache_.get().size()
|
||||
<< " - cursor = " << ripple::strHex(res.cursor.value())
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
|
||||
cursor = std::move(res.cursor);
|
||||
}
|
||||
|
||||
--(*markers);
|
||||
markers->notify_one();
|
||||
|
||||
if (--(*numRemaining) == 0)
|
||||
{
|
||||
auto endTime = std::chrono::system_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::seconds>(endTime - startTime);
|
||||
|
||||
log_.info() << "Finished loading cache. cache size = " << cache_.get().size() << ". Took "
|
||||
<< duration.count() << " seconds";
|
||||
cache_.get().setFull();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "Finished a cursor. num remaining = " << *numRemaining
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
}
|
||||
});
|
||||
}
|
||||
}};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
134
src/etl/impl/ExtractionDataPipe.h
Normal file
134
src/etl/impl/ExtractionDataPipe.h
Normal file
@@ -0,0 +1,134 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief A collection of thread safe async queues used by Extractor and Transformer to communicate
|
||||
*/
|
||||
template <typename RawDataType>
|
||||
class ExtractionDataPipe
|
||||
{
|
||||
public:
|
||||
using DataType = std::optional<RawDataType>;
|
||||
using QueueType = ThreadSafeQueue<DataType>; // TODO: probably should use boost::lockfree::queue instead?
|
||||
|
||||
constexpr static auto TOTAL_MAX_IN_QUEUE = 1000u;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
uint32_t stride_;
|
||||
uint32_t startSequence_;
|
||||
|
||||
std::vector<std::shared_ptr<QueueType>> queues_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new instance of the extraction data pipe
|
||||
*
|
||||
* @param stride
|
||||
* @param startSequence
|
||||
*/
|
||||
ExtractionDataPipe(uint32_t stride, uint32_t startSequence) : stride_{stride}, startSequence_{startSequence}
|
||||
{
|
||||
auto const maxQueueSize = TOTAL_MAX_IN_QUEUE / stride;
|
||||
for (size_t i = 0; i < stride_; ++i)
|
||||
queues_.push_back(std::make_unique<QueueType>(maxQueueSize));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push new data package for the specified sequence.
|
||||
*
|
||||
* Note: Potentially blocks until the underlying queue can accomodate another entry.
|
||||
*
|
||||
* @param sequence The sequence for which to enqueue the data package
|
||||
* @param data The data to store
|
||||
*/
|
||||
void
|
||||
push(uint32_t sequence, DataType&& data)
|
||||
{
|
||||
getQueue(sequence)->push(std::move(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get data package for the given sequence
|
||||
*
|
||||
* Note: Potentially blocks until data is available.
|
||||
*
|
||||
* @param sequence The sequence for which data is required
|
||||
* @return The data wrapped in an optional; nullopt means that there is no more data to expect
|
||||
*/
|
||||
DataType
|
||||
popNext(uint32_t sequence)
|
||||
{
|
||||
return getQueue(sequence)->pop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Get the stride
|
||||
*/
|
||||
uint32_t
|
||||
getStride() const
|
||||
{
|
||||
return stride_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Hint the Transformer that the queue is done sending data
|
||||
* @param sequence The sequence for which the extractor queue is to be hinted
|
||||
*/
|
||||
void
|
||||
finish(uint32_t sequence)
|
||||
{
|
||||
// empty optional hints the Transformer to shut down
|
||||
push(sequence, std::nullopt);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Unblocks internal queues
|
||||
*
|
||||
* Note: For now this must be called by the ETL when Transformer exits.
|
||||
*/
|
||||
void
|
||||
cleanup()
|
||||
{
|
||||
// TODO: this should not have to be called by hand. it should be done via RAII
|
||||
for (auto i = 0u; i < stride_; ++i)
|
||||
getQueue(i)->tryPop(); // pop from each queue that might be blocked on a push
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<QueueType>
|
||||
getQueue(uint32_t sequence)
|
||||
{
|
||||
log_.debug() << "Grabbing extraction queue for " << sequence << "; start was " << startSequence_;
|
||||
return queues_[(sequence - startSequence_) % stride_];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
140
src/etl/impl/Extractor.h
Normal file
140
src/etl/impl/Extractor.h
Normal file
@@ -0,0 +1,140 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Extractor thread that is fetching GRPC data and enqueue it on the DataPipeType
|
||||
*/
|
||||
template <typename DataPipeType, typename NetworkValidatedLedgersType, typename LedgerFetcherType>
|
||||
class Extractor
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
std::shared_ptr<NetworkValidatedLedgersType> networkValidatedLedgers_;
|
||||
std::reference_wrapper<LedgerFetcherType> ledgerFetcher_;
|
||||
uint32_t startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
std::thread thread_;
|
||||
|
||||
public:
|
||||
Extractor(
|
||||
DataPipeType& pipe,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> networkValidatedLedgers,
|
||||
LedgerFetcherType& ledgerFetcher,
|
||||
uint32_t startSequence,
|
||||
std::optional<uint32_t> finishSequence,
|
||||
SystemState const& state)
|
||||
: pipe_(std::ref(pipe))
|
||||
, networkValidatedLedgers_{networkValidatedLedgers}
|
||||
, ledgerFetcher_{std::ref(ledgerFetcher)}
|
||||
, startSequence_{startSequence}
|
||||
, finishSequence_{finishSequence}
|
||||
, state_{std::cref(state)}
|
||||
{
|
||||
thread_ = std::thread([this]() { process(); });
|
||||
}
|
||||
|
||||
~Extractor()
|
||||
{
|
||||
if (thread_.joinable())
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
void
|
||||
waitTillFinished()
|
||||
{
|
||||
assert(thread_.joinable());
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
process()
|
||||
{
|
||||
beast::setCurrentThreadName("ETLService extract");
|
||||
|
||||
double totalTime = 0.0;
|
||||
auto currentSequence = startSequence_;
|
||||
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence))
|
||||
{
|
||||
auto [fetchResponse, time] = util::timed<std::chrono::duration<double>>(
|
||||
[this, currentSequence]() { return ledgerFetcher_.get().fetchDataAndDiff(currentSequence); });
|
||||
totalTime += time;
|
||||
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns false if the server is shutting down, or if
|
||||
// the ledger was found in the database (which means another process already wrote the ledger that this
|
||||
// process was trying to extract; this is a form of a write conflict).
|
||||
// Otherwise, fetchDataAndDiff will keep trying to fetch the specified ledger until successful.
|
||||
if (!fetchResponse)
|
||||
break;
|
||||
|
||||
// TODO: extract this part into a strategy perhaps
|
||||
auto const tps = fetchResponse->transactions_list().transactions_size() / time;
|
||||
log_.info() << "Extract phase time = " << time << "; Extract phase tps = " << tps
|
||||
<< "; Avg extract time = " << totalTime / (currentSequence - startSequence_ + 1)
|
||||
<< "; seq = " << currentSequence;
|
||||
|
||||
pipe_.get().push(currentSequence, std::move(fetchResponse));
|
||||
currentSequence += pipe_.get().getStride();
|
||||
}
|
||||
|
||||
pipe_.get().finish(startSequence_);
|
||||
}
|
||||
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return state_.get().isStopping;
|
||||
}
|
||||
|
||||
bool
|
||||
hasWriteConflict() const
|
||||
{
|
||||
return state_.get().writeConflict;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldFinish(uint32_t seq) const
|
||||
{
|
||||
// Stopping conditions:
|
||||
// - if there is a write conflict in the load thread, the ETL mechanism should stop.
|
||||
// - if the entire server is shutting down - this can be detected in a variety of ways.
|
||||
// - when the given sequence is past the finishSequence in case one is specified
|
||||
return hasWriteConflict() || isStopping() || (finishSequence_ && seq > *finishSequence_);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
83
src/etl/impl/ForwardCache.cpp
Normal file
83
src/etl/impl/ForwardCache.cpp
Normal file
@@ -0,0 +1,83 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/Source.h>
|
||||
#include <etl/impl/ForwardCache.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
void
|
||||
ForwardCache::freshen()
|
||||
{
|
||||
log_.trace() << "Freshening ForwardCache";
|
||||
|
||||
auto numOutstanding = std::make_shared<std::atomic_uint>(latestForwarded_.size());
|
||||
|
||||
for (auto const& cacheEntry : latestForwarded_)
|
||||
{
|
||||
boost::asio::spawn(
|
||||
strand_, [this, numOutstanding, command = cacheEntry.first](boost::asio::yield_context yield) {
|
||||
boost::json::object request = {{"command", command}};
|
||||
auto resp = source_.requestFromRippled(request, {}, yield);
|
||||
|
||||
if (!resp || resp->contains("error"))
|
||||
resp = {};
|
||||
|
||||
{
|
||||
std::scoped_lock lk(mtx_);
|
||||
latestForwarded_[command] = resp;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForwardCache::clear()
|
||||
{
|
||||
std::scoped_lock lk(mtx_);
|
||||
for (auto& cacheEntry : latestForwarded_)
|
||||
latestForwarded_[cacheEntry.first] = {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ForwardCache::get(boost::json::object const& request) const
|
||||
{
|
||||
std::optional<std::string> command = {};
|
||||
if (request.contains("command") && !request.contains("method") && request.at("command").is_string())
|
||||
command = request.at("command").as_string().c_str();
|
||||
else if (request.contains("method") && !request.contains("command") && request.at("method").is_string())
|
||||
command = request.at("method").as_string().c_str();
|
||||
|
||||
if (!command)
|
||||
return {};
|
||||
if (RPC::specifiesCurrentOrClosedLedger(request))
|
||||
return {};
|
||||
|
||||
std::shared_lock lk(mtx_);
|
||||
if (!latestForwarded_.contains(*command))
|
||||
return {};
|
||||
|
||||
return {latestForwarded_.at(*command)};
|
||||
}
|
||||
|
||||
} // namespace clio::detail
|
||||
82
src/etl/impl/ForwardCache.h
Normal file
82
src/etl/impl/ForwardCache.h
Normal file
@@ -0,0 +1,82 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
class Source;
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Cache for rippled responses
|
||||
*/
|
||||
class ForwardCache
|
||||
{
|
||||
using ResponseType = std::optional<boost::json::object>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::unordered_map<std::string, ResponseType> latestForwarded_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
Source const& source_;
|
||||
std::uint32_t duration_ = 10;
|
||||
|
||||
void
|
||||
clear();
|
||||
|
||||
public:
|
||||
ForwardCache(clio::Config const& config, boost::asio::io_context& ioc, Source const& source)
|
||||
: strand_(ioc), source_(source)
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
auto commands = config.arrayOrThrow("cache", "Source cache must be array");
|
||||
|
||||
if (config.contains("cache_duration"))
|
||||
duration_ = config.valueOrThrow<uint32_t>("cache_duration", "Source cache_duration must be a number");
|
||||
|
||||
for (auto const& command : commands)
|
||||
{
|
||||
auto key = command.valueOrThrow<std::string>("Source forward command must be array of strings");
|
||||
latestForwarded_[key] = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
freshen();
|
||||
|
||||
std::optional<boost::json::object>
|
||||
get(boost::json::object const& command) const;
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
102
src/etl/impl/LedgerFetcher.h
Normal file
102
src/etl/impl/LedgerFetcher.h
Normal file
@@ -0,0 +1,102 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief GRPC Ledger data fetcher
|
||||
*/
|
||||
template <typename LoadBalancerType>
|
||||
class LedgerFetcher
|
||||
{
|
||||
public:
|
||||
using OptionalGetLedgerResponseType = typename LoadBalancerType::OptionalGetLedgerResponseType;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the fetcher
|
||||
*/
|
||||
LedgerFetcher(std::shared_ptr<BackendInterface> backend, std::shared_ptr<LoadBalancerType> balancer)
|
||||
: backend_(backend), loadBalancer_(balancer)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Extract data for a particular ledger from an ETL source
|
||||
*
|
||||
* This function continously tries to extract the specified ledger (using all available ETL sources) until the
|
||||
* extraction succeeds, or the server shuts down.
|
||||
*
|
||||
* @param sequence sequence of the ledger to extract
|
||||
* @return ledger header and transaction+metadata blobs; empty optional if the server is shutting down
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchData(uint32_t seq)
|
||||
{
|
||||
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
|
||||
|
||||
auto response = loadBalancer_->fetchLedger(seq, false, false);
|
||||
if (response)
|
||||
log_.trace() << "GetLedger reply = " << response->DebugString();
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Extract diff data for a particular ledger from an ETL source.
|
||||
*
|
||||
* This function continously tries to extract the specified ledger (using all available ETL sources) until the
|
||||
* extraction succeeds, or the server shuts down.
|
||||
*
|
||||
* @param sequence sequence of the ledger to extract
|
||||
* @return ledger header, transaction+metadata blobs, and all ledger objects created, modified or deleted between
|
||||
* this ledger and the parent; Empty optional if the server is shutting down
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchDataAndDiff(uint32_t seq)
|
||||
{
|
||||
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
|
||||
|
||||
auto response = loadBalancer_->fetchLedger(
|
||||
seq, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= seq);
|
||||
if (response)
|
||||
log_.trace() << "GetLedger reply = " << response->DebugString();
|
||||
|
||||
return response;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
255
src/etl/impl/LedgerLoader.h
Normal file
255
src/etl/impl/LedgerLoader.h
Normal file
@@ -0,0 +1,255 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/LedgerFetcher.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
struct FormattedTransactionsData
|
||||
{
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
};
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Loads ledger data into the DB
|
||||
*/
|
||||
template <typename LoadBalancerType, typename LedgerFetcherType>
|
||||
class LedgerLoader
|
||||
{
|
||||
public:
|
||||
using GetLedgerResponseType = typename LoadBalancerType::GetLedgerResponseType;
|
||||
using OptionalGetLedgerResponseType = typename LoadBalancerType::OptionalGetLedgerResponseType;
|
||||
using RawLedgerObjectType = typename LoadBalancerType::RawLedgerObjectType;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
std::reference_wrapper<LedgerFetcherType> fetcher_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the loader
|
||||
*/
|
||||
LedgerLoader(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
LedgerFetcherType& fetcher,
|
||||
SystemState const& state)
|
||||
: backend_{backend}, loadBalancer_{balancer}, fetcher_{std::ref(fetcher)}, state_{std::cref(state)}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Insert extracted transaction into the ledger
|
||||
*
|
||||
* Insert all of the extracted transactions into the ledger, returning transactions related to accounts,
|
||||
* transactions related to NFTs, and NFTs themselves for later processsing.
|
||||
*
|
||||
* @param ledger ledger to insert transactions into
|
||||
* @param data data extracted from an ETL source
|
||||
* @return struct that contains the neccessary info to write to the account_transactions/account_tx and
|
||||
* nft_token_transactions tables (mostly transaction hashes, corresponding nodestore hashes and affected accounts)
|
||||
*/
|
||||
FormattedTransactionsData
|
||||
insertTransactions(ripple::LedgerInfo const& ledger, GetLedgerResponseType& data)
|
||||
{
|
||||
FormattedTransactionsData result;
|
||||
|
||||
for (auto& txn : *(data.mutable_transactions_list()->mutable_transactions()))
|
||||
{
|
||||
std::string* raw = txn.mutable_transaction_blob();
|
||||
|
||||
ripple::SerialIter it{raw->data(), raw->size()};
|
||||
ripple::STTx sttx{it};
|
||||
|
||||
log_.trace() << "Inserting transaction = " << sttx.getTransactionID();
|
||||
|
||||
ripple::TxMeta txMeta{sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
||||
|
||||
auto const [nftTxs, maybeNFT] = getNFTDataFromTx(txMeta, sttx);
|
||||
result.nfTokenTxData.insert(result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end());
|
||||
if (maybeNFT)
|
||||
result.nfTokensData.push_back(*maybeNFT);
|
||||
|
||||
auto journal = ripple::debugLog();
|
||||
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
|
||||
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
|
||||
backend_->writeTransaction(
|
||||
std::move(keyStr),
|
||||
ledger.seq,
|
||||
ledger.closeTime.time_since_epoch().count(),
|
||||
std::move(*raw),
|
||||
std::move(*txn.mutable_metadata_blob()));
|
||||
}
|
||||
|
||||
// Remove all but the last NFTsData for each id. unique removes all but the first of a group, so we want to
|
||||
// reverse sort by transaction index
|
||||
std::sort(result.nfTokensData.begin(), result.nfTokensData.end(), [](NFTsData const& a, NFTsData const& b) {
|
||||
return a.tokenID > b.tokenID && a.transactionIndex > b.transactionIndex;
|
||||
});
|
||||
|
||||
// Now we can unique the NFTs by tokenID.
|
||||
auto last = std::unique(
|
||||
result.nfTokensData.begin(), result.nfTokensData.end(), [](NFTsData const& a, NFTsData const& b) {
|
||||
return a.tokenID == b.tokenID;
|
||||
});
|
||||
result.nfTokensData.erase(last, result.nfTokensData.end());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Download a ledger with specified sequence in full
|
||||
*
|
||||
* Note: This takes several minutes or longer.
|
||||
*
|
||||
* @param sequence the sequence of the ledger to download
|
||||
* @return The ledger downloaded, with a full transaction and account state map
|
||||
*/
|
||||
std::optional<ripple::LedgerInfo>
|
||||
loadInitialLedger(uint32_t sequence)
|
||||
{
|
||||
// check that database is actually empty
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
log_.fatal() << "Database is not empty";
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
// Fetch the ledger from the network. This function will not return until either the fetch is successful, or the
|
||||
// server is being shutdown. This only fetches the ledger header and the transactions+metadata
|
||||
OptionalGetLedgerResponseType ledgerData{fetcher_.get().fetchData(sequence)};
|
||||
if (!ledgerData)
|
||||
return {};
|
||||
|
||||
ripple::LedgerInfo lgrInfo = util::deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
|
||||
|
||||
log_.debug() << "Deserialized ledger header. " << util::toString(lgrInfo);
|
||||
|
||||
auto timeDiff = util::timed<std::chrono::duration<double>>([this, sequence, &lgrInfo, &ledgerData]() {
|
||||
backend_->startWrites();
|
||||
|
||||
log_.debug() << "Started writes";
|
||||
|
||||
backend_->writeLedger(lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
|
||||
|
||||
log_.debug() << "Wrote ledger";
|
||||
FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, *ledgerData);
|
||||
log_.debug() << "Inserted txns";
|
||||
|
||||
// download the full account state map. This function downloads full
|
||||
// ledger data and pushes the downloaded data into the writeQueue.
|
||||
// asyncWriter consumes from the queue and inserts the data into the
|
||||
// Ledger object. Once the below call returns, all data has been pushed
|
||||
// into the queue
|
||||
auto [edgeKeys, success] = loadBalancer_->loadInitialLedger(sequence);
|
||||
|
||||
if (success)
|
||||
{
|
||||
size_t numWrites = 0;
|
||||
backend_->cache().setFull();
|
||||
|
||||
auto seconds = util::timed<std::chrono::seconds>([this, edgeKeys = &edgeKeys, sequence, &numWrites]() {
|
||||
for (auto& key : *edgeKeys)
|
||||
{
|
||||
log_.debug() << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
ripple::uint256 prev = Backend::firstKey;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
assert(cur);
|
||||
if (prev == Backend::firstKey)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
{
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
{
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
log_.debug() << "Writing book successor = " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
}
|
||||
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
log_.info() << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(Backend::lastKey));
|
||||
++numWrites;
|
||||
});
|
||||
|
||||
log_.info() << "Looping through cache and submitting all writes took " << seconds
|
||||
<< " seconds. numWrites = " << std::to_string(numWrites);
|
||||
}
|
||||
|
||||
log_.debug() << "Loaded initial ledger";
|
||||
|
||||
if (not state_.get().isStopping)
|
||||
{
|
||||
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
|
||||
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||
}
|
||||
|
||||
backend_->finishWrites(sequence);
|
||||
});
|
||||
|
||||
log_.debug() << "Time to download and store ledger = " << timeDiff;
|
||||
return lgrInfo;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
252
src/etl/impl/LedgerPublisher.h
Normal file
252
src/etl/impl/LedgerPublisher.h
Normal file
@@ -0,0 +1,252 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/**
|
||||
* @brief Publishes ledgers in a synchronized fashion.
|
||||
*
|
||||
* If ETL is started far behind the network, ledgers will be written and published very rapidly. Monitoring processes
|
||||
* will publish ledgers as they are written. However, to publish a ledger, the monitoring process needs to read all of
|
||||
* the transactions for that ledger from the database. Reading the transactions from the database requires network
|
||||
* calls, which can be slow. It is imperative however that the monitoring processes keep up with the writer, else the
|
||||
* monitoring processes will not be able to detect if the writer failed. Therefore, publishing each ledger (which
|
||||
* includes reading all of the transactions from the database) is done from the application wide asio io_service, and a
|
||||
* strand is used to ensure ledgers are published in order.
|
||||
*/
|
||||
template <typename SubscriptionManagerType>
|
||||
class LedgerPublisher
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
boost::asio::io_context::strand publishStrand_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
std::chrono::time_point<ripple::NetClock> lastCloseTime_;
|
||||
mutable std::shared_mutex closeTimeMtx_;
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
||||
mutable std::shared_mutex publishTimeMtx_;
|
||||
|
||||
std::optional<uint32_t> lastPublishedSequence_;
|
||||
mutable std::shared_mutex lastPublishedSeqMtx_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the publisher
|
||||
*/
|
||||
LedgerPublisher(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
SystemState const& state)
|
||||
: publishStrand_{ioc}, backend_{backend}, subscriptions_{subscriptions}, state_{std::cref(state)}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Attempt to read the specified ledger from the database, and then publish that ledger to the ledgers
|
||||
* stream.
|
||||
*
|
||||
* @param ledgerSequence the sequence of the ledger to publish
|
||||
* @param maxAttempts the number of times to attempt to read the ledger from the database. 1 attempt per second
|
||||
* @return whether the ledger was found in the database and published
|
||||
*/
|
||||
bool
|
||||
publish(uint32_t ledgerSequence, std::optional<uint32_t> maxAttempts)
|
||||
{
|
||||
log_.info() << "Attempting to publish ledger = " << ledgerSequence;
|
||||
size_t numAttempts = 0;
|
||||
while (not state_.get().isStopping)
|
||||
{
|
||||
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!range || range->maxSequence < ledgerSequence)
|
||||
{
|
||||
log_.debug() << "Trying to publish. Could not find "
|
||||
"ledger with sequence = "
|
||||
<< ledgerSequence;
|
||||
|
||||
// We try maxAttempts times to publish the ledger, waiting one second in between each attempt.
|
||||
if (maxAttempts && numAttempts >= maxAttempts)
|
||||
{
|
||||
log_.debug() << "Failed to publish ledger after " << numAttempts << " attempts.";
|
||||
return false;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
++numAttempts;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto lgr = Backend::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerBySequence(ledgerSequence, yield); });
|
||||
|
||||
assert(lgr);
|
||||
publish(*lgr);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Publish the passed in ledger
|
||||
*
|
||||
* All ledgers are published thru publishStrand_ which ensures that all publishes are performed in a serial fashion.
|
||||
*
|
||||
* @param lgrInfo the ledger to publish
|
||||
*/
|
||||
void
|
||||
publish(ripple::LedgerInfo const& lgrInfo)
|
||||
{
|
||||
boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() {
|
||||
log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
|
||||
if (!state_.get().isWriting)
|
||||
{
|
||||
log_.info() << "Updating cache";
|
||||
|
||||
std::vector<Backend::LedgerObject> diff = Backend::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerDiff(lgrInfo.seq, yield); });
|
||||
|
||||
backend_->cache().update(diff, lgrInfo.seq); // todo: inject cache to update, don't use backend cache
|
||||
backend_->updateRange(lgrInfo.seq);
|
||||
}
|
||||
|
||||
setLastClose(lgrInfo.closeTime);
|
||||
auto age = lastCloseAgeSeconds();
|
||||
|
||||
// if the ledger closed over 10 minutes ago, assume we are still catching up and don't publish
|
||||
// TODO: this probably should be a strategy
|
||||
if (age < 600)
|
||||
{
|
||||
std::optional<ripple::Fees> fees = Backend::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchFees(lgrInfo.seq, yield); });
|
||||
|
||||
std::vector<Backend::TransactionAndMetadata> transactions = Backend::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield); });
|
||||
|
||||
auto ledgerRange = backend_->fetchLedgerRange();
|
||||
assert(ledgerRange);
|
||||
assert(fees);
|
||||
|
||||
std::string range =
|
||||
std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
|
||||
|
||||
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
|
||||
|
||||
for (auto& txAndMeta : transactions)
|
||||
subscriptions_->pubTransaction(txAndMeta, lgrInfo);
|
||||
|
||||
subscriptions_->pubBookChanges(lgrInfo, transactions);
|
||||
|
||||
setLastPublishTime();
|
||||
log_.info() << "Published ledger " << std::to_string(lgrInfo.seq);
|
||||
}
|
||||
else
|
||||
log_.info() << "Skipping publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
});
|
||||
|
||||
// we track latest publish-requested seq, not necessarily already published
|
||||
setLastPublishedSequence(lgrInfo.seq);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last publish, in seconds
|
||||
*/
|
||||
std::uint32_t
|
||||
lastPublishAgeSeconds() const
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - getLastPublish())
|
||||
.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get last publish time as a time point
|
||||
*/
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const
|
||||
{
|
||||
std::shared_lock lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last ledger close, in seconds
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
{
|
||||
std::shared_lock lck(closeTimeMtx_);
|
||||
auto now = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
auto closeTime = lastCloseTime_.time_since_epoch().count();
|
||||
if (now < (rippleEpochStart + closeTime))
|
||||
return 0;
|
||||
return now - (rippleEpochStart + closeTime);
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
getLastPublishedSequence() const
|
||||
{
|
||||
std::scoped_lock lck(lastPublishedSeqMtx_);
|
||||
return lastPublishedSequence_;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||
{
|
||||
std::scoped_lock lck(closeTimeMtx_);
|
||||
lastCloseTime_ = lastCloseTime;
|
||||
}
|
||||
|
||||
void
|
||||
setLastPublishTime()
|
||||
{
|
||||
std::scoped_lock lck(publishTimeMtx_);
|
||||
lastPublish_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
void
|
||||
setLastPublishedSequence(std::optional<uint32_t> lastPublishedSequence)
|
||||
{
|
||||
std::scoped_lock lck(lastPublishedSeqMtx_);
|
||||
lastPublishedSequence_ = lastPublishedSequence;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
408
src/etl/impl/Transformer.h
Normal file
408
src/etl/impl/Transformer.h
Normal file
@@ -0,0 +1,408 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
* 1) loading of data into db should not really be part of transform right?
|
||||
* 2) can we just prepare the data and give it to the loader afterwards?
|
||||
* 3) how to deal with cache update that is needed to write successors if neighbours not included?
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Transformer thread that prepares new ledger out of raw data from GRPC
|
||||
*/
|
||||
template <typename DataPipeType, typename LedgerLoaderType, typename LedgerPublisherType>
|
||||
class Transformer
|
||||
{
|
||||
using GetLedgerResponseType = typename LedgerLoaderType::GetLedgerResponseType;
|
||||
using RawLedgerObjectType = typename LedgerLoaderType::RawLedgerObjectType;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<LedgerLoaderType> loader_;
|
||||
std::reference_wrapper<LedgerPublisherType> publisher_;
|
||||
uint32_t startSequence_;
|
||||
std::reference_wrapper<SystemState> state_; // shared state for ETL
|
||||
|
||||
std::thread thread_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the transformer
|
||||
*
|
||||
* This spawns a new thread that reads from the data pipe and writes ledgers to the DB using LedgerLoader and
|
||||
* LedgerPublisher.
|
||||
*/
|
||||
Transformer(
|
||||
DataPipeType& pipe,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
LedgerLoaderType& loader,
|
||||
LedgerPublisherType& publisher,
|
||||
uint32_t startSequence,
|
||||
SystemState& state)
|
||||
: pipe_(std::ref(pipe))
|
||||
, backend_{backend}
|
||||
, loader_(std::ref(loader))
|
||||
, publisher_(std::ref(publisher))
|
||||
, startSequence_{startSequence}
|
||||
, state_{std::ref(state)}
|
||||
{
|
||||
thread_ = std::thread([this]() { process(); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Joins the transformer thread
|
||||
*/
|
||||
~Transformer()
|
||||
{
|
||||
if (thread_.joinable())
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Block calling thread until transformer thread exits
|
||||
*/
|
||||
void
|
||||
waitTillFinished()
|
||||
{
|
||||
assert(thread_.joinable());
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
process()
|
||||
{
|
||||
beast::setCurrentThreadName("ETLService transform");
|
||||
uint32_t currentSequence = startSequence_;
|
||||
|
||||
while (not hasWriteConflict())
|
||||
{
|
||||
auto fetchResponse = pipe_.get().popNext(currentSequence);
|
||||
++currentSequence;
|
||||
|
||||
// if fetchResponse is an empty optional, the extracter thread has stopped and the transformer should
|
||||
// stop as well
|
||||
if (!fetchResponse)
|
||||
break;
|
||||
|
||||
if (isStopping())
|
||||
continue;
|
||||
|
||||
auto const start = std::chrono::system_clock::now();
|
||||
auto [lgrInfo, success] = buildNextLedger(*fetchResponse);
|
||||
|
||||
if (success)
|
||||
{
|
||||
auto const numTxns = fetchResponse->transactions_list().transactions_size();
|
||||
auto const numObjects = fetchResponse->ledger_objects().objects_size();
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const duration = ((end - start).count()) / 1000000000.0;
|
||||
|
||||
log_.info() << "Load phase of etl : "
|
||||
<< "Successfully wrote ledger! Ledger info: " << util::toString(lgrInfo)
|
||||
<< ". txn count = " << numTxns << ". object count = " << numObjects
|
||||
<< ". load time = " << duration << ". load txns per second = " << numTxns / duration
|
||||
<< ". load objs per second = " << numObjects / duration;
|
||||
|
||||
// success is false if the ledger was already written
|
||||
publisher_.get().publish(lgrInfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Error writing ledger. " << util::toString(lgrInfo);
|
||||
}
|
||||
|
||||
setWriteConflict(not success);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO update this documentation
|
||||
/**
|
||||
* @brief Build the next ledger using the previous ledger and the extracted data.
|
||||
* @note rawData should be data that corresponds to the ledger immediately following the previous seq.
|
||||
*
|
||||
* @param rawData data extracted from an ETL source
|
||||
* @return the newly built ledger and data to write to the database
|
||||
*/
|
||||
std::pair<ripple::LedgerInfo, bool>
|
||||
buildNextLedger(GetLedgerResponseType& rawData)
|
||||
{
|
||||
log_.debug() << "Beginning ledger update";
|
||||
ripple::LedgerInfo lgrInfo = util::deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
|
||||
|
||||
log_.debug() << "Deserialized ledger header. " << util::toString(lgrInfo);
|
||||
backend_->startWrites();
|
||||
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
||||
|
||||
writeSuccessors(lgrInfo, rawData);
|
||||
updateCache(lgrInfo, rawData);
|
||||
|
||||
log_.debug() << "Inserted/modified/deleted all objects. Number of objects = "
|
||||
<< rawData.ledger_objects().objects_size();
|
||||
|
||||
auto insertTxResult = loader_.get().insertTransactions(lgrInfo, rawData);
|
||||
|
||||
log_.debug() << "Inserted all transactions. Number of transactions = "
|
||||
<< rawData.transactions_list().transactions_size();
|
||||
|
||||
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
|
||||
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||
|
||||
auto [success, duration] =
|
||||
util::timed<std::chrono::duration<double>>([&]() { return backend_->finishWrites(lgrInfo.seq); });
|
||||
|
||||
log_.debug() << "Finished writes. Total time: " << std::to_string(duration);
|
||||
log_.debug() << "Finished ledger update: " << util::toString(lgrInfo);
|
||||
|
||||
return {lgrInfo, success};
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update cache from new ledger data.
|
||||
*
|
||||
* @param lgrInfo Ledger info
|
||||
* @param rawData Ledger data from GRPC
|
||||
*/
|
||||
void
|
||||
updateCache(ripple::LedgerInfo const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
{
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(rawData.ledger_objects().objects_size());
|
||||
|
||||
// TODO change these to unordered_set
|
||||
std::set<ripple::uint256> bookSuccessorsToCalculate;
|
||||
std::set<ripple::uint256> modified;
|
||||
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
||||
{
|
||||
auto key = ripple::uint256::fromVoidChecked(obj.key());
|
||||
assert(key);
|
||||
|
||||
cacheUpdates.push_back({*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
log_.debug() << "key = " << ripple::strHex(*key) << " - mod type = " << obj.mod_type();
|
||||
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED && !rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors not included. using cache";
|
||||
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
auto const blob = obj.mutable_data();
|
||||
auto checkBookBase = false;
|
||||
auto const isDeleted = (blob->size() == 0);
|
||||
|
||||
if (isDeleted)
|
||||
{
|
||||
auto const old = backend_->cache().get(*key, lgrInfo.seq - 1);
|
||||
assert(old);
|
||||
checkBookBase = isBookDir(*key, *old);
|
||||
}
|
||||
else
|
||||
{
|
||||
checkBookBase = isBookDir(*key, *blob);
|
||||
}
|
||||
|
||||
if (checkBookBase)
|
||||
{
|
||||
log_.debug() << "Is book dir. Key = " << ripple::strHex(*key);
|
||||
|
||||
auto const bookBase = getBookBase(*key);
|
||||
auto const oldFirstDir = backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1);
|
||||
assert(oldFirstDir);
|
||||
|
||||
// We deleted the first directory, or we added a directory prior to the old first directory
|
||||
if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key))
|
||||
{
|
||||
log_.debug() << "Need to recalculate book base successor. base = " << ripple::strHex(bookBase)
|
||||
<< " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted
|
||||
<< " - seq = " << lgrInfo.seq;
|
||||
bookSuccessorsToCalculate.insert(bookBase);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (obj.mod_type() == RawLedgerObjectType::MODIFIED)
|
||||
modified.insert(*key);
|
||||
|
||||
backend_->writeLedgerObject(std::move(*obj.mutable_key()), lgrInfo.seq, std::move(*obj.mutable_data()));
|
||||
}
|
||||
|
||||
backend_->cache().update(cacheUpdates, lgrInfo.seq);
|
||||
|
||||
// rippled didn't send successor information, so use our cache
|
||||
if (!rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors not included. using cache";
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
for (auto const& obj : cacheUpdates)
|
||||
{
|
||||
if (modified.count(obj.key))
|
||||
continue;
|
||||
|
||||
auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq);
|
||||
if (!lb)
|
||||
lb = {Backend::firstKey, {}};
|
||||
|
||||
auto ub = backend_->cache().getSuccessor(obj.key, lgrInfo.seq);
|
||||
if (!ub)
|
||||
ub = {Backend::lastKey, {}};
|
||||
|
||||
if (obj.blob.size() == 0)
|
||||
{
|
||||
log_.debug() << "writing successor for deleted object " << ripple::strHex(obj.key) << " - "
|
||||
<< ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key);
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
}
|
||||
else
|
||||
{
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(obj.key));
|
||||
backend_->writeSuccessor(uint256ToString(obj.key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
|
||||
log_.debug() << "writing successor for new object " << ripple::strHex(lb->key) << " - "
|
||||
<< ripple::strHex(obj.key) << " - " << ripple::strHex(ub->key);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const& base : bookSuccessorsToCalculate)
|
||||
{
|
||||
auto succ = backend_->cache().getSuccessor(base, lgrInfo.seq);
|
||||
if (succ)
|
||||
{
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(succ->key));
|
||||
|
||||
log_.debug() << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(succ->key);
|
||||
}
|
||||
else
|
||||
{
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(Backend::lastKey));
|
||||
|
||||
log_.debug() << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(Backend::lastKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write successors info into DB
|
||||
*
|
||||
* @param lgrInfo Ledger info
|
||||
* @param rawData Ledger data from GRPC
|
||||
*/
|
||||
void
|
||||
writeSuccessors(ripple::LedgerInfo const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
{
|
||||
// Write successor info, if included from rippled
|
||||
if (rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors included";
|
||||
|
||||
for (auto& obj : *(rawData.mutable_book_successors()))
|
||||
{
|
||||
auto firstBook = std::move(*obj.mutable_first_book());
|
||||
if (!firstBook.size())
|
||||
firstBook = uint256ToString(Backend::lastKey);
|
||||
log_.debug() << "writing book successor " << ripple::strHex(obj.book_base()) << " - "
|
||||
<< ripple::strHex(firstBook);
|
||||
|
||||
backend_->writeSuccessor(std::move(*obj.mutable_book_base()), lgrInfo.seq, std::move(firstBook));
|
||||
}
|
||||
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
||||
{
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED)
|
||||
{
|
||||
std::string* predPtr = obj.mutable_predecessor();
|
||||
if (!predPtr->size())
|
||||
*predPtr = uint256ToString(Backend::firstKey);
|
||||
std::string* succPtr = obj.mutable_successor();
|
||||
if (!succPtr->size())
|
||||
*succPtr = uint256ToString(Backend::lastKey);
|
||||
|
||||
if (obj.mod_type() == RawLedgerObjectType::DELETED)
|
||||
{
|
||||
log_.debug() << "Modifying successors for deleted object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.debug() << "adding successor for new object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::string{obj.key()});
|
||||
backend_->writeSuccessor(std::string{obj.key()}, lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
}
|
||||
else
|
||||
log_.debug() << "object modified " << ripple::strHex(obj.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return state_.get().isStopping;
|
||||
}
|
||||
|
||||
bool
|
||||
hasWriteConflict() const
|
||||
{
|
||||
return state_.get().writeConflict;
|
||||
}
|
||||
|
||||
void
|
||||
setWriteConflict(bool conflict)
|
||||
{
|
||||
state_.get().writeConflict = conflict;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
192
src/log/Logger.cpp
Normal file
192
src/log/Logger.cpp
Normal file
@@ -0,0 +1,192 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <filesystem>
|
||||
|
||||
namespace clio {
|
||||
|
||||
Logger LogService::general_log_ = Logger{"General"};
|
||||
Logger LogService::alert_log_ = Logger{"Alert"};
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& stream, Severity sev)
|
||||
{
|
||||
static constexpr std::array<const char*, 6> labels = {
|
||||
"TRC",
|
||||
"DBG",
|
||||
"NFO",
|
||||
"WRN",
|
||||
"ERR",
|
||||
"FTL",
|
||||
};
|
||||
|
||||
return stream << labels.at(static_cast<int>(sev));
|
||||
}
|
||||
|
||||
Severity
|
||||
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_string())
|
||||
throw std::runtime_error("`log_level` must be a string");
|
||||
auto const& logLevel = value.as_string();
|
||||
|
||||
if (boost::iequals(logLevel, "trace"))
|
||||
return Severity::TRC;
|
||||
else if (boost::iequals(logLevel, "debug"))
|
||||
return Severity::DBG;
|
||||
else if (boost::iequals(logLevel, "info"))
|
||||
return Severity::NFO;
|
||||
else if (boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
|
||||
return Severity::WRN;
|
||||
else if (boost::iequals(logLevel, "error"))
|
||||
return Severity::ERR;
|
||||
else if (boost::iequals(logLevel, "fatal"))
|
||||
return Severity::FTL;
|
||||
else
|
||||
throw std::runtime_error(
|
||||
"Could not parse `log_level`: expected `trace`, `debug`, `info`, "
|
||||
"`warning`, `error` or `fatal`");
|
||||
}
|
||||
|
||||
void
|
||||
LogService::init(Config const& config)
|
||||
{
|
||||
namespace src = boost::log::sources;
|
||||
namespace keywords = boost::log::keywords;
|
||||
namespace sinks = boost::log::sinks;
|
||||
|
||||
boost::log::add_common_attributes();
|
||||
boost::log::register_simple_formatter_factory<Severity, char>("Severity");
|
||||
auto const defaultFormat =
|
||||
"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% "
|
||||
"%Message%";
|
||||
std::string format = config.valueOr<std::string>("log_format", defaultFormat);
|
||||
|
||||
if (config.valueOr("log_to_console", false))
|
||||
{
|
||||
boost::log::add_console_log(std::cout, keywords::format = format);
|
||||
}
|
||||
|
||||
auto logDir = config.maybeValue<std::string>("log_directory");
|
||||
if (logDir)
|
||||
{
|
||||
boost::filesystem::path dirPath{logDir.value()};
|
||||
if (!boost::filesystem::exists(dirPath))
|
||||
boost::filesystem::create_directories(dirPath);
|
||||
auto const rotationSize = config.valueOr<uint64_t>("log_rotation_size", 2048u) * 1024u * 1024u;
|
||||
auto const rotationPeriod = config.valueOr<uint32_t>("log_rotation_hour_interval", 12u);
|
||||
auto const dirSize = config.valueOr<uint64_t>("log_directory_max_size", 50u * 1024u) * 1024u * 1024u;
|
||||
auto fileSink = boost::log::add_file_log(
|
||||
keywords::file_name = dirPath / "clio.log",
|
||||
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
|
||||
keywords::auto_flush = true,
|
||||
keywords::format = format,
|
||||
keywords::open_mode = std::ios_base::app,
|
||||
keywords::rotation_size = rotationSize,
|
||||
keywords::time_based_rotation =
|
||||
sinks::file::rotation_at_time_interval(boost::posix_time::hours(rotationPeriod)));
|
||||
fileSink->locked_backend()->set_file_collector(
|
||||
sinks::file::make_collector(keywords::target = dirPath, keywords::max_size = dirSize));
|
||||
fileSink->locked_backend()->scan_for_files();
|
||||
}
|
||||
|
||||
// get default severity, can be overridden per channel using
|
||||
// the `log_channels` array
|
||||
auto defaultSeverity = config.valueOr<Severity>("log_level", Severity::NFO);
|
||||
static constexpr std::array<const char*, 7> channels = {
|
||||
"General",
|
||||
"WebServer",
|
||||
"Backend",
|
||||
"RPC",
|
||||
"ETL",
|
||||
"Subscriptions",
|
||||
"Performance",
|
||||
};
|
||||
|
||||
auto core = boost::log::core::get();
|
||||
auto min_severity = boost::log::expressions::channel_severity_filter(log_channel, log_severity);
|
||||
|
||||
for (auto const& channel : channels)
|
||||
min_severity[channel] = defaultSeverity;
|
||||
min_severity["Alert"] = Severity::WRN; // Channel for alerts, always warning severity
|
||||
|
||||
for (auto const overrides = config.arrayOr("log_channels", {}); auto const& cfg : overrides)
|
||||
{
|
||||
auto name = cfg.valueOrThrow<std::string>("channel", "Channel name is required");
|
||||
if (not std::count(std::begin(channels), std::end(channels), name))
|
||||
throw std::runtime_error("Can't override settings for log channel " + name + ": invalid channel");
|
||||
|
||||
min_severity[name] = cfg.valueOr<Severity>("log_level", defaultSeverity);
|
||||
}
|
||||
|
||||
core->set_filter(min_severity);
|
||||
LogService::info() << "Default log level = " << defaultSeverity;
|
||||
}
|
||||
|
||||
Logger::Pump
|
||||
Logger::trace(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::TRC, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::debug(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::DBG, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::info(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::NFO, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::warn(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::WRN, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::error(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::ERR, loc};
|
||||
};
|
||||
Logger::Pump
|
||||
Logger::fatal(source_location_t const& loc) const
|
||||
{
|
||||
return {logger_, Severity::FTL, loc};
|
||||
};
|
||||
|
||||
std::string
|
||||
Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const
|
||||
{
|
||||
auto const file_path = std::string{loc.file_name()};
|
||||
auto idx = file_path.size();
|
||||
while (max_depth-- > 0)
|
||||
{
|
||||
idx = file_path.rfind('/', idx - 1);
|
||||
if (idx == std::string::npos || idx == 0)
|
||||
break;
|
||||
}
|
||||
return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' + std::to_string(loc.line());
|
||||
}
|
||||
|
||||
} // namespace clio
|
||||
306
src/log/Logger.h
Normal file
306
src/log/Logger.h
Normal file
@@ -0,0 +1,306 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/log/core/core.hpp>
|
||||
#include <boost/log/expressions/predicates/channel_severity_filter.hpp>
|
||||
#include <boost/log/sinks/unlocked_frontend.hpp>
|
||||
#include <boost/log/sources/record_ostream.hpp>
|
||||
#include <boost/log/sources/severity_channel_logger.hpp>
|
||||
#include <boost/log/sources/severity_feature.hpp>
|
||||
#include <boost/log/sources/severity_logger.hpp>
|
||||
#include <boost/log/utility/manipulators/add_value.hpp>
|
||||
#include <boost/log/utility/setup/common_attributes.hpp>
|
||||
#include <boost/log/utility/setup/console.hpp>
|
||||
#include <boost/log/utility/setup/file.hpp>
|
||||
#include <boost/log/utility/setup/formatter_parser.hpp>
|
||||
|
||||
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
|
||||
// this is used by fully compatible compilers like gcc
|
||||
#include <source_location>
|
||||
|
||||
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
|
||||
// this is used by clang on linux where source_location is still not out of
|
||||
// experimental headers
|
||||
#include <experimental/source_location>
|
||||
#endif
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace clio {
|
||||
|
||||
class Config;
|
||||
#if defined(HAS_SOURCE_LOCATION) && __has_builtin(__builtin_source_location)
|
||||
using source_location_t = std::source_location;
|
||||
#define CURRENT_SRC_LOCATION source_location_t::current()
|
||||
|
||||
#elif defined(HAS_EXPERIMENTAL_SOURCE_LOCATION)
|
||||
using source_location_t = std::experimental::source_location;
|
||||
#define CURRENT_SRC_LOCATION source_location_t::current()
|
||||
|
||||
#else
|
||||
// A workaround for AppleClang that is lacking source_location atm.
|
||||
// TODO: remove this workaround when all compilers catch up to c++20
|
||||
class SourceLocation
|
||||
{
|
||||
std::string_view file_;
|
||||
std::size_t line_;
|
||||
|
||||
public:
|
||||
SourceLocation(std::string_view file, std::size_t line) : file_{file}, line_{line}
|
||||
{
|
||||
}
|
||||
std::string_view
|
||||
file_name() const
|
||||
{
|
||||
return file_;
|
||||
}
|
||||
std::size_t
|
||||
line() const
|
||||
{
|
||||
return line_;
|
||||
}
|
||||
};
|
||||
using source_location_t = SourceLocation;
|
||||
#define CURRENT_SRC_LOCATION source_location_t(__builtin_FILE(), __builtin_LINE())
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Custom severity levels for @ref Logger.
|
||||
*/
|
||||
enum class Severity {
|
||||
TRC,
|
||||
DBG,
|
||||
NFO,
|
||||
WRN,
|
||||
ERR,
|
||||
FTL,
|
||||
};
|
||||
|
||||
BOOST_LOG_ATTRIBUTE_KEYWORD(log_severity, "Severity", Severity);
|
||||
BOOST_LOG_ATTRIBUTE_KEYWORD(log_channel, "Channel", std::string);
|
||||
|
||||
/**
|
||||
* @brief Custom labels for @ref Severity in log output.
|
||||
*
|
||||
* @param stream std::ostream The output stream
|
||||
* @param sev Severity The severity to output to the ostream
|
||||
* @return std::ostream& The same ostream we were given
|
||||
*/
|
||||
std::ostream&
|
||||
operator<<(std::ostream& stream, Severity sev);
|
||||
|
||||
/**
|
||||
* @brief A simple thread-safe logger for the channel specified
|
||||
* in the constructor.
|
||||
*
|
||||
* This is cheap to copy and move. Designed to be used as a member variable or
|
||||
* otherwise. See @ref LogService::init() for setup of the logging core and
|
||||
* severity levels for each channel.
|
||||
*/
|
||||
class Logger final
|
||||
{
|
||||
using logger_t = boost::log::sources::severity_channel_logger_mt<Severity, std::string>;
|
||||
mutable logger_t logger_;
|
||||
|
||||
friend class LogService; // to expose the Pump interface
|
||||
|
||||
/**
|
||||
* @brief Helper that pumps data into a log record via `operator<<`.
|
||||
*/
|
||||
class Pump final
|
||||
{
|
||||
using pump_opt_t = std::optional<boost::log::aux::record_pump<logger_t>>;
|
||||
|
||||
boost::log::record rec_;
|
||||
pump_opt_t pump_ = std::nullopt;
|
||||
|
||||
public:
|
||||
~Pump() = default;
|
||||
Pump(logger_t& logger, Severity sev, source_location_t const& loc)
|
||||
: rec_{logger.open_record(boost::log::keywords::severity = sev)}
|
||||
{
|
||||
if (rec_)
|
||||
{
|
||||
pump_.emplace(boost::log::aux::make_record_pump(logger, rec_));
|
||||
pump_->stream() << boost::log::add_value("SourceLocation", pretty_path(loc));
|
||||
}
|
||||
}
|
||||
|
||||
Pump(Pump&&) = delete;
|
||||
Pump(Pump const&) = delete;
|
||||
Pump&
|
||||
operator=(Pump const&) = delete;
|
||||
Pump&
|
||||
operator=(Pump&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Perfectly forwards any incoming data into the underlying
|
||||
* boost::log pump if the pump is available. nop otherwise.
|
||||
*
|
||||
* @tparam T Type of data to pump
|
||||
* @param data The data to pump
|
||||
* @return Pump& Reference to itself for chaining
|
||||
*/
|
||||
template <typename T>
|
||||
[[maybe_unused]] Pump&
|
||||
operator<<(T&& data)
|
||||
{
|
||||
if (pump_)
|
||||
pump_->stream() << std::forward<T>(data);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] std::string
|
||||
pretty_path(source_location_t const& loc, size_t max_depth = 3) const;
|
||||
|
||||
/**
|
||||
* @brief Custom JSON parser for @ref Severity.
|
||||
*
|
||||
* @param value The JSON string to parse
|
||||
* @return Severity The parsed severity
|
||||
* @throws std::runtime_error Thrown if severity is not in the right format
|
||||
*/
|
||||
friend Severity
|
||||
tag_invoke(boost::json::value_to_tag<Severity>, boost::json::value const& value);
|
||||
};
|
||||
|
||||
public:
|
||||
~Logger() = default;
|
||||
/**
|
||||
* @brief Construct a new Logger object that produces loglines for the
|
||||
* specified channel.
|
||||
*
|
||||
* See @ref LogService::init() for general setup and configuration of
|
||||
* severity levels per channel.
|
||||
*
|
||||
* @param channel The channel this logger will report into.
|
||||
*/
|
||||
Logger(std::string channel) : logger_{boost::log::keywords::channel = channel}
|
||||
{
|
||||
}
|
||||
Logger(Logger const&) = default;
|
||||
Logger(Logger&&) = default;
|
||||
Logger&
|
||||
operator=(Logger const&) = default;
|
||||
Logger&
|
||||
operator=(Logger&&) = default;
|
||||
|
||||
/*! Interface for logging at @ref Severity::TRC severity */
|
||||
[[nodiscard]] Pump
|
||||
trace(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::DBG severity */
|
||||
[[nodiscard]] Pump
|
||||
debug(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::INFO severity */
|
||||
[[nodiscard]] Pump
|
||||
info(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::WRN severity */
|
||||
[[nodiscard]] Pump
|
||||
warn(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::ERR severity */
|
||||
[[nodiscard]] Pump
|
||||
error(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
|
||||
/*! Interface for logging at @ref Severity::FTL severity */
|
||||
[[nodiscard]] Pump
|
||||
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A global logging service.
|
||||
*
|
||||
* Used to initialize and setup the logging core as well as a globally available
|
||||
* entrypoint for logging into the `General` channel as well as raising alerts.
|
||||
*/
|
||||
class LogService
|
||||
{
|
||||
static Logger general_log_; /*! Global logger for General channel */
|
||||
static Logger alert_log_; /*! Global logger for Alerts channel */
|
||||
|
||||
public:
|
||||
LogService() = delete;
|
||||
|
||||
/**
|
||||
* @brief Global log core initialization from a @ref Config
|
||||
*/
|
||||
static void
|
||||
init(Config const& config);
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::TRC severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
trace(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.trace(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::DBG severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
debug(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.debug(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::NFO severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
info(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.info(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::WRN severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
warn(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.warn(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::ERR severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
error(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.error(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible General logger at @ref Severity::FTL severity */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
fatal(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return general_log_.fatal(loc);
|
||||
}
|
||||
|
||||
/*! Globally accesible Alert logger */
|
||||
[[nodiscard]] static Logger::Pump
|
||||
alert(source_location_t const& loc = CURRENT_SRC_LOCATION)
|
||||
{
|
||||
return alert_log_.warn(loc);
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace clio
|
||||
@@ -1,5 +1,23 @@
|
||||
#ifndef CLIO_BUILD_INFO_H
|
||||
#define CLIO_BUILD_INFO_H
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
@@ -12,5 +30,3 @@ std::string const&
|
||||
getClioFullVersionString();
|
||||
|
||||
} // namespace Build
|
||||
|
||||
#endif // CLIO_BUILD_INFO_H
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
#include <ripple/beast/core/SemanticVersion.h>
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
#include <algorithm>
|
||||
#include <main/Build.h>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Build {
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// The build version number. You must edit this for each release
|
||||
// and follow the format described at http://semver.org/
|
||||
//------------------------------------------------------------------------------
|
||||
// clang-format off
|
||||
char const* const versionString = "1.0.3"
|
||||
// clang-format on
|
||||
|
||||
#if defined(DEBUG) || defined(SANITIZER)
|
||||
"+"
|
||||
#ifdef CLIO_GIT_COMMIT_HASH
|
||||
CLIO_GIT_COMMIT_HASH
|
||||
"."
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
"DEBUG"
|
||||
#ifdef SANITIZER
|
||||
"."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef SANITIZER
|
||||
BOOST_PP_STRINGIZE(SANITIZER)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
;
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = [] {
|
||||
std::string const s = versionString;
|
||||
beast::SemanticVersion v;
|
||||
if (!v.parse(s) || v.print() != s)
|
||||
throw std::runtime_error(s + ": Bad server version string");
|
||||
return s;
|
||||
}();
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace Build
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user