mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
84 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69f5025a29 | ||
|
|
d1c41a8bb7 | ||
|
|
207ba51461 | ||
|
|
ebe7688ccb | ||
|
|
6d9f8a7ead | ||
|
|
6ca777ea96 | ||
|
|
963685dd31 | ||
|
|
e36545058d | ||
|
|
44527140f0 | ||
|
|
0eaaa1fb31 | ||
|
|
1846f629a5 | ||
|
|
83af5af3c6 | ||
|
|
418a0ddbf2 | ||
|
|
6cfbfda014 | ||
|
|
91648f98ad | ||
|
|
71e1637c5f | ||
|
|
59cd2ce5aa | ||
|
|
d783edd57a | ||
|
|
1ce8a58167 | ||
|
|
92e5c4792b | ||
|
|
d7f36733bc | ||
|
|
435d56e7c5 | ||
|
|
bf3b24867c | ||
|
|
ec70127050 | ||
|
|
547cb340bd | ||
|
|
c20b14494a | ||
|
|
696b1a585c | ||
|
|
23442ff1a7 | ||
|
|
db4046e02a | ||
|
|
fc1b5ae4da | ||
|
|
5411fd7497 | ||
|
|
f6488f7024 | ||
|
|
e3ada6c5da | ||
|
|
d61d702ccd | ||
|
|
4d42cb3cdb | ||
|
|
111b55b397 | ||
|
|
c90bc15959 | ||
|
|
1804e3e9c0 | ||
|
|
24f69acd9e | ||
|
|
98d0a963dc | ||
|
|
665890d410 | ||
|
|
545886561f | ||
|
|
68eec01dbc | ||
|
|
02621fe02e | ||
|
|
6ad72446d1 | ||
|
|
1d0a43669b | ||
|
|
71aabc8c29 | ||
|
|
6b98579bfb | ||
|
|
375ac2ffa6 | ||
|
|
c6ca650767 | ||
|
|
2336148d0d | ||
|
|
12178abf4d | ||
|
|
b8705ae086 | ||
|
|
b83d7478ef | ||
|
|
4fd6d51d21 | ||
|
|
d195bdb66d | ||
|
|
50dbb51627 | ||
|
|
2f369e175c | ||
|
|
47e03a7da3 | ||
|
|
d7b84a2e7a | ||
|
|
e79425bc21 | ||
|
|
7710468f37 | ||
|
|
210d7fdbc8 | ||
|
|
ba8e7188ca | ||
|
|
271323b0f4 | ||
|
|
7b306f3ba0 | ||
|
|
73805d44ad | ||
|
|
f19772907d | ||
|
|
616f0176c9 | ||
|
|
9f4f5d319e | ||
|
|
dcbc4577c2 | ||
|
|
f4d8e18bf7 | ||
|
|
b3e001ebfb | ||
|
|
524821c0b0 | ||
|
|
a292a607c2 | ||
|
|
81894c0a90 | ||
|
|
0a7def18cd | ||
|
|
1e969ba13b | ||
|
|
ef62718a27 | ||
|
|
aadd9e50f0 | ||
|
|
d9e89746a4 | ||
|
|
557ea5d7f6 | ||
|
|
4cc3b3ec0f | ||
|
|
a960471ef4 |
36
.github/actions/build_clio/action.yml
vendored
Normal file
36
.github/actions/build_clio/action.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
default: default
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
run: |
|
||||
mkdir -p build
|
||||
cd build
|
||||
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True --profile ${{ inputs.conan_profile }}
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
|
||||
cmake --build . --parallel $threads_num
|
||||
@@ -1,3 +1,5 @@
|
||||
name: Check format
|
||||
description: Check format using clang-format-11
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Git common ancestor
|
||||
description: Find the closest common commit
|
||||
outputs:
|
||||
commit:
|
||||
description: Hash of commit
|
||||
value: ${{ steps.find_common_ancestor.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common git ancestor
|
||||
id: find_common_ancestor
|
||||
shell: bash
|
||||
run: |
|
||||
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT
|
||||
50
.github/actions/restore_cache/action.yml
vendored
Normal file
50
.github/actions/restore_cache/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan info . -j info.json
|
||||
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
|
||||
echo "$packages_info"
|
||||
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: ccache_cache
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
46
.github/actions/save_cache/action.yml
vendored
Normal file
46
.github/actions/save_cache/action.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan remove "*" -s -b -f
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
55
.github/actions/setup_conan/action.yml
vendored
Normal file
55
.github/actions/setup_conan/action.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
outputs:
|
||||
conan_profile:
|
||||
description: Created conan profile name
|
||||
value: ${{ steps.conan_export_output.outputs.conan_profile }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: On mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: clio_clang_14
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
clang_path="$(brew --prefix llvm@14)/bin/clang"
|
||||
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler=clang $CONAN_PROFILE
|
||||
conan profile update settings.compiler.version=14 $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
|
||||
conan profile update env.CC="$clang_path" $CONAN_PROFILE
|
||||
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
|
||||
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: On linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: conan_export_output
|
||||
run: |
|
||||
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add conan-non-prod artifactory
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
|
||||
echo "Adding conan-non-prod"
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
else
|
||||
echo "Conan-non-prod is available"
|
||||
fi
|
||||
|
||||
|
||||
292
.github/workflows/build.yml
vendored
292
.github/workflows/build.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Build Clio
|
||||
on:
|
||||
push:
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
branches: [master, release/*, develop]
|
||||
pull_request:
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
branches: [master, release/*, develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -13,200 +13,142 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run clang-format
|
||||
uses: ./.github/actions/lint
|
||||
uses: ./.github/actions/clang_format
|
||||
|
||||
build_clio:
|
||||
name: Build Clio
|
||||
runs-on: [self-hosted, heavy]
|
||||
build_mac:
|
||||
name: Build macOS
|
||||
needs: lint
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- suffix: deb
|
||||
image: rippleci/clio-dpkg-builder:2022-09-17
|
||||
script: dpkg
|
||||
- suffix: rpm
|
||||
image: rippleci/clio-rpm-builder:2022-09-17
|
||||
script: rpm
|
||||
|
||||
container:
|
||||
image: ${{ matrix.type.image }}
|
||||
runs-on: [self-hosted, macOS]
|
||||
env:
|
||||
CCACHE_DIR: ${{ github.workspace }}/.ccache
|
||||
CONAN_USER_HOME: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
fetch-depth: 0
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Clone Clio packaging repo
|
||||
- name: Install packages
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_linux:
|
||||
name: Build linux
|
||||
needs: lint
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: conanio/gcc11:1.61.0
|
||||
options: --user root
|
||||
env:
|
||||
CCACHE_DIR: /root/.ccache
|
||||
CONAN_USER_HOME: /root/
|
||||
steps:
|
||||
- name: Get Clio
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio-packages
|
||||
repository: XRPLF/clio-packages
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
- name: Install packages
|
||||
run: |
|
||||
export CLIO_ROOT=$(realpath clio)
|
||||
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
fi
|
||||
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
|
||||
cmake --build clio-packages/build --parallel $(nproc)
|
||||
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
|
||||
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
|
||||
- name: Artifact packages
|
||||
apt update -qq
|
||||
apt install -y jq
|
||||
|
||||
- name: Install ccache
|
||||
run: |
|
||||
wget https://github.com/ccache/ccache/releases/download/v4.8.3/ccache-4.8.3-linux-x86_64.tar.xz
|
||||
tar xf ./ccache-4.8.3-linux-x86_64.tar.xz
|
||||
mv ./ccache-4.8.3-linux-x86_64/ccache /usr/bin/ccache
|
||||
|
||||
- name: Fix git permissions
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_${{ matrix.type.suffix }}_packages
|
||||
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
|
||||
name: clio_tests_linux
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Artifact clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
name: clio_tests-${{ matrix.type.suffix }}
|
||||
path: ${{ github.workspace }}/clio_tests
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_dev:
|
||||
name: Build on Mac/Clang14 and run tests
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
test_mac:
|
||||
needs: build_mac
|
||||
runs-on: [self-hosted, macOS]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost_1_77_0
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build Boost
|
||||
if: ${{ steps.boost.outputs.cache-hit != 'true' }}
|
||||
name: clio_tests_mac
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
rm -rf boost_1_77_0.tar.gz boost_1_77_0 # cleanup if needed first
|
||||
curl -s -fOJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
cd boost_1_77_0
|
||||
./bootstrap.sh
|
||||
./b2 define=BOOST_ASIO_HAS_STD_INVOKE_RESULT cxxflags="-std=c++20"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install llvm@14 pkg-config protobuf openssl ninja cassandra-cpp-driver bison cmake
|
||||
|
||||
- name: Setup environment for llvm-14
|
||||
run: |
|
||||
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
|
||||
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
|
||||
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
|
||||
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost_1_77_0
|
||||
cd clio
|
||||
cmake -B build -DCMAKE_C_COMPILER='/usr/local/opt/llvm@14/bin/clang' -DCMAKE_CXX_COMPILER='/usr/local/opt/llvm@14/bin/clang++'
|
||||
if ! cmake --build build -j; then
|
||||
echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
- name: Run Test
|
||||
run: |
|
||||
cd clio/build
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
test_clio:
|
||||
name: Test Clio
|
||||
runs-on: [self-hosted, Linux]
|
||||
needs: build_clio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
suffix: [rpm, deb]
|
||||
test_linux:
|
||||
needs: build_linux
|
||||
runs-on: [self-hosted, x-heavy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get clio_tests artifact
|
||||
uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests-${{ matrix.suffix }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 10
|
||||
uses: ./.github/actions/test
|
||||
|
||||
code_coverage:
|
||||
name: Build on Linux and code coverage
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build boost
|
||||
if: steps.boost.outputs.cache-hit != 'true'
|
||||
name: clio_tests_linux
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
mv boost_1_77_0 boost
|
||||
cd boost
|
||||
./bootstrap.sh
|
||||
./b2
|
||||
- name: install deps
|
||||
run: |
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost
|
||||
cd clio
|
||||
cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"'
|
||||
if ! cmake --build build -j$(nproc); then
|
||||
echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
cd build
|
||||
make clio_tests-ccov
|
||||
- name: Code Coverage Summary Report
|
||||
uses: irongut/CodeCoverageSummary@v1.2.0
|
||||
with:
|
||||
filename: clio/build/clio_tests-gcc-cov/out.xml
|
||||
badge: true
|
||||
output: both
|
||||
format: markdown
|
||||
|
||||
- name: Save PR number and ccov report
|
||||
run: |
|
||||
mkdir -p ./UnitTestCoverage
|
||||
echo ${{ github.event.number }} > ./UnitTestCoverage/NR
|
||||
cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html
|
||||
cp code-coverage-results.md ./UnitTestCoverage/out.md
|
||||
cat code-coverage-results.md > $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
files: clio/build/clio_tests-gcc-cov/out.xml
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: UnitTestCoverage
|
||||
path: UnitTestCoverage/
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: code_coverage_report
|
||||
path: clio/build/clio_tests-gcc-cov/out.xml
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
*clio*.log
|
||||
build*/
|
||||
/build*/
|
||||
.build
|
||||
.cache
|
||||
.vscode
|
||||
.python-version
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/main/impl/Build.cpp
|
||||
|
||||
5
CMake/Ccache.cmake
Normal file
5
CMake/Ccache.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_program (CCACHE_PATH "ccache")
|
||||
if (CCACHE_PATH)
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
message (STATUS "Using ccache: ${CCACHE_PATH}")
|
||||
endif ()
|
||||
42
CMake/CheckCompiler.cmake
Normal file
42
CMake/CheckCompiler.cmake
Normal file
@@ -0,0 +1,42 @@
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "Clang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_clang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "AppleClang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_appleclang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message (FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif ()
|
||||
set (is_gcc TRUE)
|
||||
else ()
|
||||
message (FATAL_ERROR "Supported compilers: AppleClang 14+, Clang 14+, GCC 11+")
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
string (TOLOWER ${san} san)
|
||||
set (SAN_FLAG "-fsanitize=${san}")
|
||||
set (SAN_LIB "")
|
||||
if (is_gcc)
|
||||
if (san STREQUAL "address")
|
||||
set (SAN_LIB "asan")
|
||||
elseif (san STREQUAL "thread")
|
||||
set (SAN_LIB "tsan")
|
||||
elseif (san STREQUAL "memory")
|
||||
set (SAN_LIB "msan")
|
||||
elseif (san STREQUAL "undefined")
|
||||
set (SAN_LIB "ubsan")
|
||||
endif ()
|
||||
endif ()
|
||||
set (_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set (CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
CHECK_CXX_COMPILER_FLAG (${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set (CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if (NOT COMPILER_SUPPORTS_SAN)
|
||||
message (FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif ()
|
||||
endif ()
|
||||
@@ -2,32 +2,38 @@
|
||||
write version to source
|
||||
#]===================================================================]
|
||||
|
||||
find_package(Git REQUIRED)
|
||||
find_package (Git REQUIRED)
|
||||
|
||||
set(GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set(GIT_COMMAND branch --show-current)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (GIT_COMMAND branch --show-current)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(BRANCH STREQUAL "")
|
||||
set(BRANCH "dev")
|
||||
endif()
|
||||
if (BRANCH STREQUAL "")
|
||||
set (BRANCH "dev")
|
||||
endif ()
|
||||
|
||||
if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
|
||||
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else()
|
||||
set(GIT_COMMAND describe --tags)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${TAG_VERSION}-${REV}")
|
||||
endif()
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
|
||||
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else ()
|
||||
set (GIT_COMMAND describe --tags)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${TAG_VERSION}-${REV}")
|
||||
endif ()
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set(VERSION "${VERSION}+DEBUG")
|
||||
endif()
|
||||
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set (VERSION "${VERSION}+DEBUG")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Build version: ${VERSION}")
|
||||
set(clio_version "${VERSION}")
|
||||
message (STATUS "Build version: ${VERSION}")
|
||||
set (clio_version "${VERSION}")
|
||||
|
||||
configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
configure_file (CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
|
||||
@@ -1,42 +1,45 @@
|
||||
# call add_converage(module_name) to add coverage targets for the given module
|
||||
function(add_converage module)
|
||||
if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
# call add_coverage(module_name) to add coverage targets for the given module
|
||||
function (add_coverage module)
|
||||
if ("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
message("[Coverage] Building with llvm Code Coverage Tools")
|
||||
message ("[Coverage] Building with llvm Code Coverage Tools")
|
||||
# Using llvm gcov ; llvm install by xcode
|
||||
set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message(FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif()
|
||||
set (LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if (NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message (FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif ()
|
||||
|
||||
# set Flags
|
||||
target_compile_options(${module} PRIVATE -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(${module} PUBLIC -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_compile_options (${module} PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_link_options (${module} PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(clio PUBLIC -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_compile_options (clio PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_link_options (clio PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
# llvm-cov
|
||||
add_custom_target(
|
||||
${module}-ccov-preprocessing
|
||||
add_custom_target (${module}-ccov-preprocessing
|
||||
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
|
||||
${module}.profdata
|
||||
DEPENDS ${module})
|
||||
|
||||
add_custom_target(
|
||||
${module}-ccov-show
|
||||
add_custom_target (${module}-ccov-show
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# add summary for CI parse
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata
|
||||
@@ -45,8 +48,7 @@ function(add_converage module)
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# exclude libs and unittests self
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
@@ -54,38 +56,36 @@ function(add_converage module)
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps" > /dev/null 2>&1
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
add_custom_command(
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
|
||||
)
|
||||
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}"
|
||||
MATCHES "GNU")
|
||||
message("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
elseif ("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
message ("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
|
||||
find_program(GCOV_PATH gcov)
|
||||
if(NOT GCOV_PATH)
|
||||
message(FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif() # NOT GCOV_PATH
|
||||
find_program(GCOVR_PATH gcovr)
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif() # NOT GCOVR_PATH
|
||||
find_program (GCOV_PATH gcov)
|
||||
if (NOT GCOV_PATH)
|
||||
message (FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif () # NOT GCOV_PATH
|
||||
find_program (GCOVR_PATH gcovr)
|
||||
if (NOT GCOVR_PATH)
|
||||
message (FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif () # NOT GCOVR_PATH
|
||||
|
||||
set(COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
set (COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options (${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(${module} PRIVATE gcov)
|
||||
target_link_libraries (${module} PRIVATE gcov)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
target_compile_options (clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(clio PRIVATE gcov)
|
||||
target_link_libraries (clio PRIVATE gcov)
|
||||
# this target is used for CI as well generate the summary out.xml will send
|
||||
# to github action to generate markdown, we can paste it to comments or
|
||||
# readme
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
@@ -102,8 +102,7 @@ function(add_converage module)
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
|
||||
# generate the detail report
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
@@ -114,13 +113,13 @@ function(add_converage module)
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
add_custom_command(
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov-report
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
else()
|
||||
message(FATAL_ERROR "Complier not support yet")
|
||||
endif()
|
||||
endfunction()
|
||||
else ()
|
||||
message (FATAL_ERROR "Complier not support yet")
|
||||
endif ()
|
||||
endfunction ()
|
||||
11
CMake/Docs.cmake
Normal file
11
CMake/Docs.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
find_package (Doxygen REQUIRED)
|
||||
|
||||
set (DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile)
|
||||
set (DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||
|
||||
configure_file (${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||
add_custom_target (docs
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Generating API documentation with Doxygen"
|
||||
VERBATIM)
|
||||
39
CMake/Settings.cmake
Normal file
39
CMake/Settings.cmake
Normal file
@@ -0,0 +1,39 @@
|
||||
set(COMPILER_FLAGS
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
)
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wduplicated-branches
|
||||
-Wduplicated-cond
|
||||
-Wlogical-op
|
||||
-Wuseless-cast
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
|
||||
|
||||
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})
|
||||
11
CMake/SourceLocation.cmake
Normal file
11
CMake/SourceLocation.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
include (CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx ("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if (SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif ()
|
||||
|
||||
check_include_file_cxx ("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif ()
|
||||
@@ -1,6 +1,11 @@
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
|
||||
find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED)
|
||||
|
||||
target_link_libraries(clio PUBLIC ${Boost_LIBRARIES})
|
||||
find_package (Boost 1.82 REQUIRED
|
||||
COMPONENTS
|
||||
program_options
|
||||
coroutine
|
||||
system
|
||||
log
|
||||
log_setup
|
||||
)
|
||||
|
||||
5
CMake/deps/OpenSSL.cmake
Normal file
5
CMake/deps/OpenSSL.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_package (OpenSSL 1.1.1 REQUIRED)
|
||||
|
||||
set_target_properties (OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
|
||||
From: CJ Cobb <ccobb@ripple.com>
|
||||
Date: Wed, 10 Aug 2022 12:30:01 -0400
|
||||
Subject: [PATCH] Remove bitset operator !=
|
||||
|
||||
---
|
||||
src/ripple/protocol/Feature.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
|
||||
index b3ecb099b..6424be411 100644
|
||||
--- a/src/ripple/protocol/Feature.h
|
||||
+++ b/src/ripple/protocol/Feature.h
|
||||
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
|
||||
public:
|
||||
using base::bitset;
|
||||
using base::operator==;
|
||||
- using base::operator!=;
|
||||
|
||||
using base::all;
|
||||
using base::any;
|
||||
--
|
||||
2.32.0
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
include(CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if(SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif()
|
||||
|
||||
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif()
|
||||
2
CMake/deps/Threads.cmake
Normal file
2
CMake/deps/Threads.cmake
Normal file
@@ -0,0 +1,2 @@
|
||||
set (THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
@@ -1,153 +1 @@
|
||||
find_package(ZLIB REQUIRED)
|
||||
|
||||
find_library(cassandra NAMES cassandra)
|
||||
if(NOT cassandra)
|
||||
message("System installed Cassandra cpp driver not found. Will build")
|
||||
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
|
||||
if(NOT zlib)
|
||||
message("zlib not found. will build")
|
||||
add_library(zlib STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG v1.2.12
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
)
|
||||
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (zlib_src BINARY_DIR)
|
||||
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
|
||||
set_target_properties (zlib PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(zlib zlib_src)
|
||||
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
|
||||
endif()
|
||||
find_library(krb5 NAMES krb5-dev libkrb5-dev)
|
||||
if(NOT krb5)
|
||||
message("krb5 not found. will build")
|
||||
add_library(krb5 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(krb5_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG krb5-1.20
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
)
|
||||
message(${ep_lib_prefix}/krb5.a)
|
||||
message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a)
|
||||
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (krb5_src BINARY_DIR)
|
||||
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
|
||||
set_target_properties (krb5 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(krb5 krb5_src)
|
||||
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
|
||||
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
|
||||
|
||||
|
||||
if(NOT libuv1)
|
||||
message("libuv1 not found, will build")
|
||||
add_library(libuv1 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(libuv_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.44.1
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (libuv_src BINARY_DIR)
|
||||
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (libuv1 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(libuv1 libuv_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
||||
endif()
|
||||
add_library (cassandra STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(cassandra_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||
GIT_TAG 2.16.2
|
||||
CMAKE_ARGS
|
||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
-DCASS_BUILD_STATIC=ON
|
||||
-DCASS_BUILD_SHARED=OFF
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
|
||||
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (cassandra PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
message("cass dirs")
|
||||
message(${BINARY_DIR})
|
||||
message(${SOURCE_DIR})
|
||||
message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a)
|
||||
add_dependencies(cassandra cassandra_src)
|
||||
|
||||
if(NOT libuv1)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
|
||||
target_link_libraries(cassandra INTERFACE libuv1)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${libuv1})
|
||||
endif()
|
||||
if(NOT krb5)
|
||||
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
|
||||
target_link_libraries(cassandra INTERFACE krb5)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${krb5})
|
||||
endif()
|
||||
|
||||
if(NOT zlib)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
|
||||
target_link_libraries(cassandra INTERFACE zlib)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${zlib})
|
||||
endif()
|
||||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
|
||||
|
||||
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
||||
target_link_libraries(clio PUBLIC cassandra)
|
||||
else()
|
||||
message("Found system installed cassandra cpp driver")
|
||||
message(${cassandra})
|
||||
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
|
||||
message(${cassandra_includes})
|
||||
get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH)
|
||||
get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY)
|
||||
target_link_libraries (clio PUBLIC ${cassandra})
|
||||
target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR})
|
||||
endif()
|
||||
find_package (cassandra-cpp-driver REQUIRED)
|
||||
|
||||
@@ -1,22 +1,4 @@
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
|
||||
)
|
||||
find_package (GTest REQUIRED)
|
||||
|
||||
FetchContent_GetProperties(googletest)
|
||||
|
||||
if(NOT googletest_POPULATED)
|
||||
FetchContent_Populate(googletest)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio_tests PUBLIC clio gmock_main)
|
||||
target_include_directories(clio_tests PRIVATE unittests)
|
||||
|
||||
enable_testing()
|
||||
|
||||
include(GoogleTest)
|
||||
|
||||
#increase timeout for tests discovery to 10 seconds, by default it is 5s. As more unittests added, we start to hit this issue
|
||||
#https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests(clio_tests DISCOVERY_TIMEOUT 10)
|
||||
enable_testing ()
|
||||
include (GoogleTest)
|
||||
|
||||
@@ -1,14 +1 @@
|
||||
FetchContent_Declare(
|
||||
libfmt
|
||||
URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(libfmt)
|
||||
|
||||
if(NOT libfmt_POPULATED)
|
||||
FetchContent_Populate(libfmt)
|
||||
add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC fmt)
|
||||
|
||||
find_package (fmt REQUIRED)
|
||||
|
||||
1
CMake/deps/libxrpl.cmake
Normal file
1
CMake/deps/libxrpl.cmake
Normal file
@@ -0,0 +1 @@
|
||||
find_package (xrpl REQUIRED)
|
||||
@@ -1,20 +0,0 @@
|
||||
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
||||
set(RIPPLED_BRANCH "1.9.2")
|
||||
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
||||
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
|
||||
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
||||
FetchContent_Declare(rippled
|
||||
GIT_REPOSITORY "${RIPPLED_REPO}"
|
||||
GIT_TAG "${RIPPLED_BRANCH}"
|
||||
GIT_SHALLOW ON
|
||||
PATCH_COMMAND "${patch_command}"
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(rippled)
|
||||
if(NOT rippled_POPULATED)
|
||||
FetchContent_Populate(rippled)
|
||||
add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs)
|
||||
target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed?
|
||||
@@ -1,16 +1,14 @@
|
||||
set(CLIO_INSTALL_DIR "/opt/clio")
|
||||
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
set (CLIO_INSTALL_DIR "/opt/clio")
|
||||
set (CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
|
||||
install(TARGETS clio_server DESTINATION bin)
|
||||
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
||||
install (TARGETS clio_server DESTINATION bin)
|
||||
|
||||
#install(FILES example-config.json DESTINATION etc RENAME config.json)
|
||||
file(READ example-config.json config)
|
||||
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
file (READ example-config.json config)
|
||||
string (REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file (WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install (FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
configure_file ("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
|
||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
install (FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
target_compile_options(clio
|
||||
PUBLIC -Wall
|
||||
-Werror
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else)
|
||||
311
CMakeLists.txt
311
CMakeLists.txt
@@ -1,56 +1,87 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
|
||||
project(clio)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message(FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif()
|
||||
# ========================================================================== #
|
||||
# Options #
|
||||
# ========================================================================== #
|
||||
option (verbose "Verbose build" FALSE)
|
||||
option (tests "Build tests" FALSE)
|
||||
option (docs "Generate doxygen docs" FALSE)
|
||||
option (coverage "Build test coverage report" FALSE)
|
||||
option (packaging "Create distribution packages" FALSE)
|
||||
# ========================================================================== #
|
||||
set (san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
# ========================================================================== #
|
||||
|
||||
option(BUILD_TESTS "Build tests" TRUE)
|
||||
# Include required modules
|
||||
include (CMake/Ccache.cmake)
|
||||
include (CheckCXXCompilerFlag)
|
||||
|
||||
option(VERBOSE "Verbose build" TRUE)
|
||||
if(VERBOSE)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||
endif()
|
||||
if (verbose)
|
||||
set (CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
endif ()
|
||||
|
||||
if (packaging)
|
||||
add_definitions (-DPKG=1)
|
||||
endif ()
|
||||
|
||||
if(PACKAGING)
|
||||
add_definitions(-DPKG=1)
|
||||
endif()
|
||||
add_library (clio)
|
||||
|
||||
#c++20 removed std::result_of but boost 1.75 is still using it.
|
||||
add_definitions(-DBOOST_ASIO_HAS_STD_INVOKE_RESULT=1)
|
||||
# Clio tweaks and checks
|
||||
include (CMake/CheckCompiler.cmake)
|
||||
include (CMake/Settings.cmake)
|
||||
include (CMake/ClioVersion.cmake)
|
||||
include (CMake/SourceLocation.cmake)
|
||||
|
||||
add_library(clio)
|
||||
target_compile_features(clio PUBLIC cxx_std_20)
|
||||
target_include_directories(clio PUBLIC src)
|
||||
# Clio deps
|
||||
include (CMake/deps/libxrpl.cmake)
|
||||
include (CMake/deps/Boost.cmake)
|
||||
include (CMake/deps/OpenSSL.cmake)
|
||||
include (CMake/deps/Threads.cmake)
|
||||
include (CMake/deps/libfmt.cmake)
|
||||
include (CMake/deps/cassandra.cmake)
|
||||
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMake/settings.cmake)
|
||||
include(CMake/ClioVersion.cmake)
|
||||
include(CMake/deps/rippled.cmake)
|
||||
include(CMake/deps/libfmt.cmake)
|
||||
include(CMake/deps/Boost.cmake)
|
||||
include(CMake/deps/cassandra.cmake)
|
||||
include(CMake/deps/SourceLocation.cmake)
|
||||
# TODO: Include directory will be wrong when installed.
|
||||
target_include_directories (clio PUBLIC src)
|
||||
target_compile_features (clio PUBLIC cxx_std_20)
|
||||
|
||||
target_sources(clio PRIVATE
|
||||
target_link_libraries (clio
|
||||
PUBLIC Boost::boost
|
||||
PUBLIC Boost::coroutine
|
||||
PUBLIC Boost::program_options
|
||||
PUBLIC Boost::system
|
||||
PUBLIC Boost::log
|
||||
PUBLIC Boost::log_setup
|
||||
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PUBLIC fmt::fmt
|
||||
PUBLIC OpenSSL::Crypto
|
||||
PUBLIC OpenSSL::SSL
|
||||
PUBLIC xrpl::libxrpl
|
||||
|
||||
INTERFACE Threads::Threads
|
||||
)
|
||||
|
||||
if (is_gcc)
|
||||
# FIXME: needed on gcc for now
|
||||
target_compile_definitions (clio PUBLIC BOOST_ASIO_DISABLE_CONCEPTS)
|
||||
endif ()
|
||||
|
||||
target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/backend/BackendInterface.cpp
|
||||
src/backend/LedgerCache.cpp
|
||||
## NextGen Backend
|
||||
src/backend/cassandra/impl/Future.cpp
|
||||
src/backend/cassandra/impl/Cluster.cpp
|
||||
src/backend/cassandra/impl/Batch.cpp
|
||||
src/backend/cassandra/impl/Result.cpp
|
||||
src/backend/cassandra/impl/Tuple.cpp
|
||||
src/backend/cassandra/impl/SslContext.cpp
|
||||
src/backend/cassandra/Handle.cpp
|
||||
src/backend/cassandra/SettingsProvider.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
src/data/cassandra/impl/Cluster.cpp
|
||||
src/data/cassandra/impl/Batch.cpp
|
||||
src/data/cassandra/impl/Result.cpp
|
||||
src/data/cassandra/impl/Tuple.cpp
|
||||
src/data/cassandra/impl/SslContext.cpp
|
||||
src/data/cassandra/Handle.cpp
|
||||
src/data/cassandra/SettingsProvider.cpp
|
||||
## ETL
|
||||
src/etl/Source.cpp
|
||||
src/etl/ProbingSource.cpp
|
||||
@@ -58,8 +89,10 @@ target_sources(clio PRIVATE
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Subscriptions
|
||||
src/subscriptions/SubscriptionManager.cpp
|
||||
## Feed
|
||||
src/feed/SubscriptionManager.cpp
|
||||
## Web
|
||||
src/web/IntervalSweepHandler.cpp
|
||||
## RPC
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/Factories.cpp
|
||||
@@ -68,9 +101,10 @@ target_sources(clio PRIVATE
|
||||
src/rpc/WorkQueue.cpp
|
||||
src/rpc/common/Specs.cpp
|
||||
src/rpc/common/Validators.cpp
|
||||
# RPC impl
|
||||
src/rpc/common/MetaProcessors.cpp
|
||||
src/rpc/common/impl/APIVersionParser.cpp
|
||||
src/rpc/common/impl/HandlerProvider.cpp
|
||||
## RPC handler
|
||||
## RPC handlers
|
||||
src/rpc/handlers/AccountChannels.cpp
|
||||
src/rpc/handlers/AccountCurrencies.cpp
|
||||
src/rpc/handlers/AccountInfo.cpp
|
||||
@@ -81,6 +115,7 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
src/rpc/handlers/DepositAuthorized.cpp
|
||||
src/rpc/handlers/GatewayBalances.cpp
|
||||
src/rpc/handlers/Ledger.cpp
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
@@ -96,88 +131,142 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/Tx.cpp
|
||||
## Util
|
||||
src/config/Config.cpp
|
||||
src/log/Logger.cpp
|
||||
src/util/config/Config.cpp
|
||||
src/util/log/Logger.cpp
|
||||
src/util/Taggable.cpp)
|
||||
|
||||
add_executable(clio_server src/main/main.cpp)
|
||||
target_link_libraries(clio_server PUBLIC clio)
|
||||
# Clio server
|
||||
add_executable (clio_server src/main/Main.cpp)
|
||||
target_link_libraries (clio_server PRIVATE clio)
|
||||
target_link_options(clio_server
|
||||
PRIVATE
|
||||
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
|
||||
)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
set(TEST_TARGET clio_tests)
|
||||
add_executable(${TEST_TARGET}
|
||||
# Unittesting
|
||||
if (tests)
|
||||
set (TEST_TARGET clio_tests)
|
||||
add_executable (${TEST_TARGET}
|
||||
# Common
|
||||
unittests/Main.cpp
|
||||
unittests/Playground.cpp
|
||||
unittests/Logger.cpp
|
||||
unittests/Config.cpp
|
||||
unittests/ProfilerTest.cpp
|
||||
unittests/DOSGuard.cpp
|
||||
unittests/SubscriptionTest.cpp
|
||||
unittests/SubscriptionManagerTest.cpp
|
||||
unittests/LoggerTests.cpp
|
||||
unittests/ConfigTests.cpp
|
||||
unittests/ProfilerTests.cpp
|
||||
unittests/JsonUtilTests.cpp
|
||||
unittests/DOSGuardTests.cpp
|
||||
unittests/SubscriptionTests.cpp
|
||||
unittests/SubscriptionManagerTests.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTest.cpp
|
||||
unittests/etl/ExtractorTest.cpp
|
||||
unittests/etl/TransformerTest.cpp
|
||||
unittests/etl/ExtractionDataPipeTests.cpp
|
||||
unittests/etl/ExtractorTests.cpp
|
||||
unittests/etl/TransformerTests.cpp
|
||||
unittests/etl/CacheLoaderTests.cpp
|
||||
unittests/etl/AmendmentBlockHandlerTests.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTest.cpp
|
||||
unittests/rpc/CountersTest.cpp
|
||||
unittests/rpc/AdminVerificationTest.cpp
|
||||
unittests/rpc/RPCHelpersTests.cpp
|
||||
unittests/rpc/CountersTests.cpp
|
||||
unittests/rpc/AdminVerificationTests.cpp
|
||||
unittests/rpc/APIVersionTests.cpp
|
||||
unittests/rpc/ForwardingProxyTests.cpp
|
||||
unittests/rpc/WorkQueueTests.cpp
|
||||
unittests/rpc/AmendmentsTests.cpp
|
||||
unittests/rpc/JsonBoolTests.cpp
|
||||
## RPC handlers
|
||||
unittests/rpc/handlers/DefaultProcessorTests.cpp
|
||||
unittests/rpc/handlers/TestHandlerTests.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTest.cpp
|
||||
unittests/rpc/handlers/AccountLinesTest.cpp
|
||||
unittests/rpc/handlers/AccountTxTest.cpp
|
||||
unittests/rpc/handlers/AccountOffersTest.cpp
|
||||
unittests/rpc/handlers/AccountInfoTest.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTest.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTest.cpp
|
||||
unittests/rpc/handlers/BookOffersTest.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTest.cpp
|
||||
unittests/rpc/handlers/TxTest.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTest.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTest.cpp
|
||||
unittests/rpc/handlers/ServerInfoTest.cpp
|
||||
unittests/rpc/handlers/PingTest.cpp
|
||||
unittests/rpc/handlers/RandomTest.cpp
|
||||
unittests/rpc/handlers/NFTInfoTest.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTest.cpp
|
||||
unittests/rpc/handlers/SubscribeTest.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTest.cpp
|
||||
unittests/rpc/handlers/LedgerDataTest.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTest.cpp
|
||||
unittests/rpc/handlers/BookChangesTest.cpp
|
||||
unittests/rpc/handlers/LedgerTest.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTests.cpp
|
||||
unittests/rpc/handlers/AccountLinesTests.cpp
|
||||
unittests/rpc/handlers/AccountTxTests.cpp
|
||||
unittests/rpc/handlers/AccountOffersTests.cpp
|
||||
unittests/rpc/handlers/AccountInfoTests.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTests.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTests.cpp
|
||||
unittests/rpc/handlers/BookOffersTests.cpp
|
||||
unittests/rpc/handlers/DepositAuthorizedTests.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTests.cpp
|
||||
unittests/rpc/handlers/TxTests.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTests.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTests.cpp
|
||||
unittests/rpc/handlers/ServerInfoTests.cpp
|
||||
unittests/rpc/handlers/PingTests.cpp
|
||||
unittests/rpc/handlers/RandomTests.cpp
|
||||
unittests/rpc/handlers/NFTInfoTests.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTests.cpp
|
||||
unittests/rpc/handlers/SubscribeTests.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTests.cpp
|
||||
unittests/rpc/handlers/LedgerDataTests.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTests.cpp
|
||||
unittests/rpc/handlers/BookChangesTests.cpp
|
||||
unittests/rpc/handlers/LedgerTests.cpp
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
# Backend
|
||||
unittests/backend/BackendFactoryTest.cpp
|
||||
unittests/backend/cassandra/BaseTests.cpp
|
||||
unittests/backend/cassandra/BackendTests.cpp
|
||||
unittests/backend/cassandra/RetryPolicyTests.cpp
|
||||
unittests/backend/cassandra/SettingsProviderTests.cpp
|
||||
unittests/backend/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/backend/cassandra/AsyncExecutorTests.cpp
|
||||
unittests/webserver/ServerTest.cpp
|
||||
unittests/webserver/RPCExecutorTest.cpp)
|
||||
include(CMake/deps/gtest.cmake)
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
unittests/data/cassandra/SettingsProviderTests.cpp
|
||||
unittests/data/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/data/cassandra/AsyncExecutorTests.cpp
|
||||
# Webserver
|
||||
unittests/web/ServerTests.cpp
|
||||
unittests/web/RPCServerHandlerTests.cpp
|
||||
unittests/web/WhitelistHandlerTests.cpp
|
||||
unittests/web/SweepHandlerTests.cpp)
|
||||
|
||||
# test for dwarf5 bug on ci
|
||||
target_compile_options(clio PUBLIC -gdwarf-4)
|
||||
include (CMake/deps/gtest.cmake)
|
||||
|
||||
# if CODE_COVERAGE enable, add clio_test-ccov
|
||||
if(CODE_COVERAGE)
|
||||
include(CMake/coverage.cmake)
|
||||
add_converage(${TEST_TARGET})
|
||||
endif()
|
||||
endif()
|
||||
# See https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
|
||||
|
||||
include(CMake/install/install.cmake)
|
||||
if(PACKAGING)
|
||||
include(CMake/packaging.cmake)
|
||||
endif()
|
||||
# Fix for dwarf5 bug on ci
|
||||
target_compile_options (clio PUBLIC -gdwarf-4)
|
||||
|
||||
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
|
||||
target_include_directories (${TEST_TARGET} PRIVATE unittests)
|
||||
target_link_libraries (${TEST_TARGET} PUBLIC clio gtest::gtest)
|
||||
|
||||
# Generate `clio_tests-ccov` if coverage is enabled
|
||||
# Note: use `make clio_tests-ccov` to generate report
|
||||
if (coverage)
|
||||
target_compile_definitions(${TEST_TARGET} PRIVATE COVERAGE_ENABLED)
|
||||
include (CMake/Coverage.cmake)
|
||||
add_coverage (${TEST_TARGET})
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
target_compile_options (clio
|
||||
PUBLIC
|
||||
# Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1>
|
||||
${SAN_FLAG}
|
||||
-fno-omit-frame-pointer)
|
||||
target_compile_definitions (clio
|
||||
PUBLIC
|
||||
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
|
||||
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
|
||||
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
|
||||
target_link_libraries (clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled
|
||||
# Note: use `make docs` to generate the documentation
|
||||
if (docs)
|
||||
include (CMake/Docs.cmake)
|
||||
endif ()
|
||||
|
||||
include (CMake/install/install.cmake)
|
||||
if (packaging)
|
||||
include (CMake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
17
Doxyfile
17
Doxyfile
@@ -1,3 +1,16 @@
|
||||
PROJECT_NAME = "Clio"
|
||||
INPUT = src
|
||||
RECURSIVE = YES
|
||||
INPUT = ../src ../unittests
|
||||
EXCLUDE_PATTERNS = *Test*.cpp *Test*.h
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = YES
|
||||
|
||||
QUIET = YES
|
||||
WARNINGS = NO
|
||||
WARN_NO_PARAMDOC = NO
|
||||
WARN_IF_INCOMPLETE_DOC = NO
|
||||
WARN_IF_UNDOCUMENTED = NO
|
||||
|
||||
GENERATE_LATEX = NO
|
||||
GENERATE_HTML = YES
|
||||
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
|
||||
125
README.md
125
README.md
@@ -1,51 +1,102 @@
|
||||
# Clio
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
|
||||
historical ledger and transaction data are stored in a more space-efficient format,
|
||||
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
|
||||
Validated historical ledger and transaction data are stored in a more space-efficient format,
|
||||
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||
without the need for redundant data storage or computation.
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share access to the same dataset,
|
||||
allowing for a highly available cluster of Clio nodes, without the need for redundant data storage or computation.
|
||||
|
||||
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
||||
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
||||
Other non-validated data is also not returned, such as information about queued transactions.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client.
|
||||
To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
|
||||
## Requirements
|
||||
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
|
||||
|
||||
2. Access to one or more rippled nodes. Can be local or remote.
|
||||
|
||||
## Building
|
||||
|
||||
Clio is built with CMake. Clio requires at least GCC-11/clang-14.0.0 (C++20), and Boost 1.75.0.
|
||||
Clio is built with CMake and uses Conan for managing dependencies.
|
||||
It is written in C++20 and therefore requires a modern compiler.
|
||||
|
||||
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
## Prerequisites
|
||||
|
||||
```sh
|
||||
# Install dependencies
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake clang-format
|
||||
# Install gcovr to run code coverage
|
||||
sudo apt-get -y install gcovr
|
||||
### Minimum Requirements
|
||||
|
||||
# Compile Boost
|
||||
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
|
||||
tar xvzf $HOME/boost_1_75_0.tar.gz
|
||||
cd $HOME/boost_1_75_0
|
||||
./bootstrap.sh
|
||||
./b2 -j$(nproc)
|
||||
echo "export BOOST_ROOT=$HOME/boost_1_75_0" >> $HOME/.profile && source $HOME/.profile
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) (needed for code coverage generation)
|
||||
|
||||
# Clone the Clio Git repository & build Clio
|
||||
cd $HOME
|
||||
git clone https://github.com/XRPLF/clio.git
|
||||
cd $HOME/clio
|
||||
cmake -B build && cmake --build build --parallel $(nproc)
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| Clang | 14 |
|
||||
| Apple Clang | 14.0.3 |
|
||||
|
||||
### Conan configuration
|
||||
|
||||
Clio does not require anything but default settings in your (`~/.conan/profiles/default`) Conan profile. It's best to have no extra flags specified.
|
||||
> Mac example:
|
||||
```
|
||||
[settings]
|
||||
os=Macos
|
||||
os_build=Macos
|
||||
arch=armv8
|
||||
arch_build=armv8
|
||||
compiler=apple-clang
|
||||
compiler.version=14
|
||||
compiler.libcxx=libc++
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
> Linux example:
|
||||
```
|
||||
[settings]
|
||||
os=Linux
|
||||
os_build=Linux
|
||||
arch=x86_64
|
||||
arch_build=x86_64
|
||||
compiler=gcc
|
||||
compiler.version=11
|
||||
compiler.libcxx=libstdc++11
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
|
||||
### Artifactory
|
||||
|
||||
1. Make sure artifactory is setup with Conan
|
||||
```sh
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
```
|
||||
Now you should be able to download prebuilt `xrpl` package on some platforms.
|
||||
|
||||
2. Remove old packages you may have cached:
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
conan remove -f cassandra-cpp-driver
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
|
||||
Navigate to Clio's root directory and perform
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
If all goes well, `conan install` will find required packages and `cmake` will do the rest. you should end up with `clio_server` and `clio_tests` in the `build` directory (the current directory).
|
||||
|
||||
> **Tip:** You can omit the `-o tests=True` in `conan install` command above if you don't want to build `clio_tests`.
|
||||
|
||||
> **Tip:** To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
|
||||
## Running
|
||||
```sh
|
||||
@@ -96,12 +147,12 @@ The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top l
|
||||
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
|
||||
|
||||
```json
|
||||
"server":{
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233
|
||||
},
|
||||
"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
"ssl_key_file" : "/full/path/to/key.file"
|
||||
"ssl_cert_file": "/full/path/to/cert.file",
|
||||
"ssl_key_file": "/full/path/to/key.file"
|
||||
```
|
||||
|
||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||
@@ -172,6 +223,20 @@ which can cause high latencies. A possible alternative to this is to just deploy
|
||||
a database in each region, and the Clio nodes in each region use their region's database.
|
||||
This is effectively two systems.
|
||||
|
||||
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
|
||||
It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
```json
|
||||
"api_version": {
|
||||
"min": 1,
|
||||
"max": 2,
|
||||
"default": 1
|
||||
}
|
||||
```
|
||||
All of the above are optional.
|
||||
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
|
||||
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
// This option can be used to setup a secure connect bundle connection
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
// The following options are used only if using contact_points
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
"keyspace": "clio",
|
||||
// Authentication settings
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
"max_requests_outstanding": 25000,
|
||||
// Other common settings
|
||||
"keyspace": "clio",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
|
||||
87
conanfile.py
Normal file
87
conanfile.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
import re
|
||||
|
||||
class Clio(ConanFile):
|
||||
name = 'clio'
|
||||
license = 'ISC'
|
||||
author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>'
|
||||
url = 'https://github.com/xrplf/clio'
|
||||
description = 'Clio RPC server'
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'fPIC': [True, False],
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.16.2',
|
||||
'fmt/10.0.0',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/1.12.0',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
'protobuf/*:with_zlib': True,
|
||||
'snappy/*:shared': False,
|
||||
'gtest/*:no_main': True,
|
||||
}
|
||||
|
||||
exports_sources = (
|
||||
'CMakeLists.txt', 'CMake/*', 'src/*'
|
||||
)
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
self.requires('gtest/1.13.0')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost'].visibility = 'global'
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self)
|
||||
# Fix this setting to follow the default introduced in Conan 1.48
|
||||
# to align with our build instructions.
|
||||
self.folders.generators = 'build/generators'
|
||||
|
||||
generators = 'CMakeDeps'
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
@@ -9,7 +12,18 @@
|
||||
"table_prefix": "",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
"threads": 8,
|
||||
//
|
||||
// Advanced options. USE AT OWN RISK:
|
||||
// ---
|
||||
"core_connections_per_host": 1 // Defaults to 1
|
||||
//
|
||||
// Below options will use defaults from cassandra driver if left unspecified.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
|
||||
//
|
||||
// "queue_size_io": 2
|
||||
//
|
||||
// ---
|
||||
}
|
||||
},
|
||||
"etl_sources": [
|
||||
@@ -20,21 +34,24 @@
|
||||
}
|
||||
],
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
], // comma-separated list of ips to exclude from rate limiting
|
||||
/* The below values are the default values and are only specified here
|
||||
* for documentation purposes. The rate limiter currently limits
|
||||
* connections and bandwidth per ip. The rate limiter looks at the raw
|
||||
* ip of a client connection, and so requests routed through a load
|
||||
* balancer will all have the same ip and be treated as a single client
|
||||
*/
|
||||
"max_fetches": 1000000, // max bytes per ip per sweep interval
|
||||
"max_connections": 20, // max connections per ip
|
||||
"max_requests": 20, // max connections per ip
|
||||
"sweep_interval": 1 // time in seconds before resetting bytes per ip count
|
||||
],
|
||||
//
|
||||
// The below values are the default values and are only specified here
|
||||
// for documentation purposes. The rate limiter currently limits
|
||||
// connections and bandwidth per IP. The rate limiter looks at the raw
|
||||
// IP of a client connection, and so requests routed through a load
|
||||
// balancer will all have the same IP and be treated as a single client.
|
||||
//
|
||||
"max_fetches": 1000000, // Max bytes per IP per sweep interval
|
||||
"max_connections": 20, // Max connections per IP
|
||||
"max_requests": 20, // Max connections per IP per sweep interval
|
||||
"sweep_interval": 1 // Time in seconds before resetting max_fetches and max_requests
|
||||
},
|
||||
"cache": {
|
||||
// Comma-separated list of peer nodes that Clio can use to download cache from at startup
|
||||
"peers": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -45,11 +62,12 @@
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
/* Max number of requests to queue up before rejecting further requests.
|
||||
* Defaults to 0, which disables the limit
|
||||
*/
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
"max_queue_size": 500
|
||||
},
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
@@ -77,17 +95,24 @@
|
||||
}
|
||||
],
|
||||
"log_level": "info",
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
"log_to_console": true,
|
||||
"log_directory": "./clio_log",
|
||||
// Clio logs to file in the specified directory only if "log_directory" is set
|
||||
// "log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
//"start_sequence": [integer] the ledger index to start from,
|
||||
//"finish_sequence": [integer] the ledger index to finish at,
|
||||
//"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
//"ssl_key_file" : "/full/path/to/key.file"
|
||||
// "start_sequence": [integer] the ledger index to start from,
|
||||
// "finish_sequence": [integer] the ledger index to finish at,
|
||||
// "ssl_cert_file" : "/full/path/to/cert.file",
|
||||
// "ssl_key_file" : "/full/path/to/key.file"
|
||||
"api_version": {
|
||||
"min": 1, // Minimum API version supported (could be 1 or 2)
|
||||
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
|
||||
"default": 1 // Clio behaves the same as rippled by default
|
||||
}
|
||||
}
|
||||
|
||||
87
external/cassandra/conanfile.py
vendored
Normal file
87
external/cassandra/conanfile.py
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
from conan import ConanFile, tools
|
||||
from conan.tools.cmake import CMake, CMakeToolchain
|
||||
|
||||
class Cassandra(ConanFile):
|
||||
name = 'cassandra-cpp-driver'
|
||||
version = '2.16.2'
|
||||
license = 'Apache-2.0'
|
||||
url = 'https://github.com/conan-io/conan-center-index'
|
||||
homepage = 'https://docs.datastax.com/en/developer/cpp-driver/'
|
||||
description = 'Cassandra C++ Driver'
|
||||
topics = ('conan', 'cassandra', 'driver')
|
||||
|
||||
settings = 'os', 'arch', 'compiler', 'build_type'
|
||||
options = {
|
||||
'shared': [True, False],
|
||||
'fPIC': [True, False],
|
||||
'install_header_in_subdir': [True, False],
|
||||
'use_atomic': [None, 'boost', 'std'],
|
||||
'with_openssl': [True, False],
|
||||
'with_zlib': [True, False],
|
||||
'with_kerberos': [True, False],
|
||||
'use_timerfd': [True, False],
|
||||
}
|
||||
default_options = {
|
||||
'shared': False,
|
||||
'fPIC': True,
|
||||
'install_header_in_subdir': False,
|
||||
'use_atomic': None,
|
||||
'with_openssl': True,
|
||||
'with_zlib': True,
|
||||
'with_kerberos': False,
|
||||
'use_timerfd': True,
|
||||
}
|
||||
|
||||
def requirements(self):
|
||||
self.requires('libuv/1.44.1')
|
||||
self.requires('http_parser/2.9.4')
|
||||
if self.options.with_openssl:
|
||||
self.requires('openssl/1.1.1q')
|
||||
if self.options.with_zlib:
|
||||
self.requires('minizip/1.2.12')
|
||||
self.requires('zlib/1.2.13')
|
||||
if self.options.use_atomic == 'boost':
|
||||
self.requires('boost/1.79.0')
|
||||
|
||||
exports_sources = ['CMakeLists.txt']
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == 'Windows':
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
self.options['libuv'].shared = self.options.shared
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
if self.settings.get_safe('compiler.cppstd') == '20':
|
||||
tc.blocks['cppstd'].values['cppstd'] = 17
|
||||
tc.variables['CASS_BUILD_STATIC'] = not self.options.shared
|
||||
tc.variables['CASS_USE_STATIC_LIBS'] = not self.options.shared
|
||||
tc.variables['CASS_BUILD_SHARED'] = self.options.shared
|
||||
tc.variables['LIBUV_ROOT_DIR'] = self.deps_cpp_info['libuv'].rootpath
|
||||
if self.options.with_openssl:
|
||||
tc.variables['OPENSSL_ROOT_DIR'] = self.deps_cpp_info['openssl'].rootpath
|
||||
tc.generate()
|
||||
|
||||
def source(self):
|
||||
tools.files.get(self, f'https://github.com/datastax/cpp-driver/archive/refs/tags/{self.version}.tar.gz', strip_root=True)
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
|
||||
def package_info(self):
|
||||
if self.options.shared:
|
||||
self.cpp_info.libs = ['cassandra']
|
||||
else:
|
||||
self.cpp_info.libs = ['cassandra_static']
|
||||
if self.settings.os == 'Windows':
|
||||
self.cpp_info.libs.extend(['iphlpapi', 'psapi', 'wsock32', 'crypt32', 'ws2_32', 'userenv'])
|
||||
if not self.options.shared:
|
||||
self.cpp_info.defines = ['CASS_STATIC']
|
||||
@@ -1,583 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <backend/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
/**
|
||||
* @brief Throws an error when database read time limit is exceeded.
|
||||
*
|
||||
* This class is throws an error when read time limit is exceeded but
|
||||
* is also paired with a separate class to retry the connection.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
public:
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Separate class that reattempts connection after time limit.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param func Instance of Cassandra database handler class.
|
||||
* @param waitMs Is the arbitrary time limit of 500ms.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
retryOnTimeout(F func, size_t waitMs = 500)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
log.error() << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Passes in serialized handlers in an asynchronous fashion.
|
||||
*
|
||||
* Note that the synchronous auto passes handlers critical to supporting
|
||||
* the Clio backend. The coroutine types are checked if same/different.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronous(F&& f)
|
||||
{
|
||||
/** @brief Serialized handlers and their execution.
|
||||
*
|
||||
* The ctx class is converted into a serialized handler, also named
|
||||
* ctx, and is used to pass a stream of data into the method.
|
||||
*/
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::io_context::strand strand(ctx);
|
||||
std::optional<boost::asio::io_context::work> work;
|
||||
|
||||
/*! @brief Place the ctx within the vector of serialized handlers. */
|
||||
work.emplace(ctx);
|
||||
|
||||
/**
|
||||
* @brief If/else statements regarding coroutine type matching.
|
||||
*
|
||||
* R is the currently executing coroutine that is about to get passed.
|
||||
* If corountine types do not match, the current one's type is stored.
|
||||
*/
|
||||
using R = typename boost::result_of<F(boost::asio::yield_context&)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
/**
|
||||
* @brief When the coroutine type is the same
|
||||
*
|
||||
* The spawn function enables programs to implement asynchronous logic
|
||||
* in a synchronous manner. res stores the instance of the currently
|
||||
* executing coroutine, yield. The different type is returned.
|
||||
*/
|
||||
R res;
|
||||
boost::asio::spawn(strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
res = f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*! @brief When the corutine type is different, run as normal. */
|
||||
boost::asio::spawn(strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reestablishes synchronous connection on timeout.
|
||||
*
|
||||
* @tparam Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra database handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(F&& f)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(f); });
|
||||
}
|
||||
|
||||
/*! @brief Handles ledger and transaction backend data. */
|
||||
class BackendInterface
|
||||
{
|
||||
/**
|
||||
* @brief Shared mutexes and a cache for the interface.
|
||||
*
|
||||
* rngMutex is a shared mutex. Shared mutexes prevent shared data
|
||||
* from being accessed by multiple threads and has two levels of
|
||||
* access: shared and exclusive.
|
||||
*/
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
|
||||
/**
|
||||
* @brief Public read methods
|
||||
*
|
||||
* All of these reads methods can throw DatabaseTimeout. When writing
|
||||
* code in an RPC handler, this exception does not need to be caught:
|
||||
* when an RPC results in a timeout, an error is returned to the client.
|
||||
*/
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/*! @brief Fetches a specific ledger by sequence number. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches a specific ledger by hash. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the latest ledger sequence. */
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the current ledger range while locking that process */
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences to be tracked.
|
||||
*
|
||||
* Function that continues updating the range sliding window or creates
|
||||
* a new sliding window once the maxSequence limit has been reached.
|
||||
*
|
||||
* @param newMax Unsigned 32-bit integer representing new max of range.
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the fees for specific transactions.
|
||||
*
|
||||
* @param seq Unsigned 32-bit integer reprsenting sequence.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<ripple::Fees>
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief TRANSACTION METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash Unsigned 256-bit integer representing hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes Unsigned integer value representing a hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account
|
||||
*
|
||||
* @param account A specific XRPL Account, speciifed by unique type
|
||||
* accountID.
|
||||
* @param limit Paging limit for how many transactions can be returned per
|
||||
* page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Unsigned 32-bit integer for latest total
|
||||
* transactions.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<ripple::uint256>
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief NFT methods */
|
||||
/**
|
||||
* @brief Fetches a specific NFT
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<NFT>
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param limit Paging limit as to how many transactions return per page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursorIn Represents transaction number and ledger sequence.
|
||||
* @param yield Currently executing coroutine is passed in as input.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief STATE DATA METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific ledger object: vector of unsigned chars
|
||||
*
|
||||
* @param key Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<Blob>
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
|
||||
*
|
||||
* @param keys Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob>
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObject */
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObjects */
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers: vector of objects
|
||||
*
|
||||
* Objects are made of a key value, vector of unsigned chars (blob),
|
||||
* and a boolean detailing whether keys and blob match.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<LedgerObject>
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Paging limit as to how many transactions returned per page.
|
||||
* @param outOfOrder Boolean on whether ledger page is out of order.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return LedgerPage
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Fetches successor object from key/index. */
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/*! @brief Fetches successor key from key/index. */
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchSuccessorKey. */
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return BookOffersPage
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Returns a ledger range
|
||||
*
|
||||
* Ledger range is a struct of min and max sequence numbers). Due to
|
||||
* the use of [&], which denotes a special case of a lambda expression
|
||||
* where values found outside the scope are passed by reference, wrt the
|
||||
* currently executing coroutine.
|
||||
*
|
||||
* @return std::optional<LedgerRange>
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([&](boost::asio::yield_context yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerInfo Const on ledger information.
|
||||
* @param ledgerHeader r-value string representing ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* The key and blob are r-value references and do NOT have memory addresses.
|
||||
*
|
||||
* @param key String represented as an r-value.
|
||||
* @param seq Unsigned integer representing a sequence.
|
||||
* @param blob r-value vector of unsigned characters (blob).
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash r-value reference. No memory address.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param date Unsigned 32-bit integer.
|
||||
* @param transaction r-value reference. No memory address.
|
||||
* @param metadata r-value refrence. No memory address.
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new transaction for a specific NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Passed in as an r-value reference.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param successor Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
|
||||
/*! @brief Tells database we will write data for a specific ledger. */
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* TODO: change the return value to represent different results:
|
||||
* Committed, write conflict, errored, successful but not committed
|
||||
*
|
||||
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Private helper method to write ledger object
|
||||
*
|
||||
* @param key r-value string representing key.
|
||||
* @param seq Unsigned 32-bit integer representing sequence.
|
||||
* @param blob r-value vector of unsigned chars.
|
||||
*/
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
using BackendInterface = Backend::BackendInterface;
|
||||
@@ -19,19 +19,26 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/CassandraBackend.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/CassandraBackend.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
std::shared_ptr<BackendInterface>
|
||||
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
log.info() << "Constructing BackendInterface";
|
||||
static util::Logger log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
|
||||
@@ -42,8 +49,7 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new"))
|
||||
{
|
||||
auto cfg = config.section("database." + type);
|
||||
backend =
|
||||
std::make_shared<Backend::Cassandra::CassandraBackend>(Backend::Cassandra::SettingsProvider{cfg}, readOnly);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
@@ -56,7 +62,7 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
log.info() << "Constructed BackendInterface Successfully";
|
||||
LOG(log.info()) << "Constructed BackendInterface Successfully";
|
||||
return backend;
|
||||
}
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
@@ -17,28 +17,26 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"Backend"};
|
||||
util::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
gLog.debug() << "Want finish writes for " << ledgerSequence;
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
gLog.debug() << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
@@ -50,27 +48,9 @@ BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq,
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const
|
||||
{
|
||||
gLog.trace() << "called";
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return hardFetchLedgerRange(yield);
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
gLog.trace() << "called";
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
@@ -79,22 +59,22 @@ std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
gLog.trace() << "Missed cache and missed in db";
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
else
|
||||
gLog.trace() << "Missed cache but found in db";
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
}
|
||||
}
|
||||
@@ -103,7 +83,7 @@ std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
@@ -116,7 +96,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
else
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
gLog.trace() << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
@@ -138,13 +118,13 @@ std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -152,7 +132,7 @@ std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ)
|
||||
@@ -171,7 +151,7 @@ BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
@@ -194,7 +174,7 @@ BackendInterface::fetchBookOffers(
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
gLog.trace() << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
@@ -207,7 +187,7 @@ BackendInterface::fetchBookOffers(
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
gLog.trace() << "Next is empty. breaking";
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
@@ -223,32 +203,56 @@ BackendInterface::fetchBookOffers(
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
gLog.trace() << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
gLog.debug() << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([this](auto yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
BackendInterface::fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
@@ -272,14 +276,14 @@ BackendInterface::fetchLedgerPage(
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
gLog.error() << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
gLog.error() << msg.str();
|
||||
LOG(gLog.error()) << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
@@ -289,7 +293,7 @@ BackendInterface::fetchLedgerPage(
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const
|
||||
{
|
||||
ripple::Fees fees;
|
||||
|
||||
@@ -298,7 +302,7 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context&
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
gLog.error() << "Could not find fees";
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -308,9 +312,6 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context&
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
|
||||
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
|
||||
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
|
||||
|
||||
@@ -320,4 +321,4 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context&
|
||||
return fees;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
568
src/data/BackendInterface.h
Normal file
568
src/data/BackendInterface.h
Normal file
@@ -0,0 +1,568 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <data/Types.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Represents a database timeout error.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
public:
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @param waitMs Delay between retry attempts
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = 500)
|
||||
{
|
||||
static util::Logger log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously executes the given function object inside a coroutine.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronous(FnType&& func)
|
||||
{
|
||||
boost::asio::io_context ctx;
|
||||
|
||||
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
R res;
|
||||
boost::asio::spawn(
|
||||
ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) { res = func(yield); });
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(FnType&& func)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(func); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The interface to the database used by Clio.
|
||||
*/
|
||||
class BackendInterface
|
||||
{
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by sequence number.
|
||||
*
|
||||
* @param sequence The sequence number to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by hash.
|
||||
*
|
||||
* @param hash The hash to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the latest ledger sequence.
|
||||
*
|
||||
* @param yield The coroutine context
|
||||
* @return Latest sequence wrapped in an optional if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch the current ledger range.
|
||||
*
|
||||
* @return The current ledger range if populated; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB.
|
||||
*
|
||||
* @param newMax The new maximum sequence available
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Fetch the fees from a specific ledger sequence.
|
||||
*
|
||||
* @param seq The sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return ripple::Fees if fees are found; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction to fetch
|
||||
* @param yield The coroutine context
|
||||
* @return TransactionAndMetadata if transaction is found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes A vector of hashes to fetch transactions for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of TransactionAndMetadata matching the given hashes
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account.
|
||||
*
|
||||
* @param account The account to fetch transactions for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Results as a vector of TransactionAndMetadata
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Hashes as ripple::uint256 in a vector
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return NFT object on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursorIn The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first
|
||||
* and only calls out to the real DB if a cache miss ocurred.
|
||||
*
|
||||
* @param key The key of the object
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to fetch from Cache
|
||||
* first and only calls out to the real DB for each of the keys that was not found in the cache.
|
||||
*
|
||||
* @param keys A vector with the keys of the objects to fetch
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of ledger objects as Blobs
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
*
|
||||
* @param keys The keys to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of Blobs representing each fetched object
|
||||
*/
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of LedgerObject representing the diff
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param outOfOrder If set to true max available sequence is used instead of ledgerSequence
|
||||
* @param yield The coroutine context
|
||||
* @return The ledger page
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor key.
|
||||
*
|
||||
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the successor in the cache
|
||||
* first and only if it's not found in the cache will it fetch from the actual DB.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor key on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Database-specific implementation of fetching the successor key
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param yield The coroutine context
|
||||
* @return The book offers page
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
*
|
||||
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using synchronous(FnType&&).
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerHeader Ledger header.
|
||||
* @param blob r-value string serialization of ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* @param key The key to write the ledger object under
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param blob The data to write
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param date The timestamp of the entry
|
||||
* @param transaction The transaction data to write
|
||||
* @param metadata The metadata to write
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes NFTs to the database.
|
||||
*
|
||||
* @param data A vector of NFTsData objects representing the NFTs
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data A vector of AccountTransactionsData objects representing the account transactions
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
*
|
||||
* @param data A vector of NFTTransactionsData objects
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Key of the object that the passed successor will be the successor for
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param successor The successor data to write
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Starts a write transaction with the DB. No-op for cassandra.
|
||||
*
|
||||
* Note: Can potentially be deprecated and removed.
|
||||
*/
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* Uses doFinishWrites to synchronize with the pending writes.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to finish writing for
|
||||
* @return true on success; false otherwise
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
using BackendInterface = data::BackendInterface;
|
||||
@@ -19,41 +19,41 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Schema.h>
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/impl/ExecutionStrategy.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Schema.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/ExecutionStrategy.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <ripple/protocol/nft.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Implements @ref BackendInterface for Cassandra/Scylladb
|
||||
* @brief Implements @ref BackendInterface for Cassandra/ScyllaDB.
|
||||
*
|
||||
* Note: this is a safer and more correct rewrite of the original implementation
|
||||
* of the backend. We deliberately did not change the interface for now so that
|
||||
* other parts such as ETL do not have to change at all.
|
||||
* Eventually we should change the interface so that it does not have to know
|
||||
* about yield_context.
|
||||
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategy>
|
||||
template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategyType>
|
||||
class BasicCassandraBackend : public BackendInterface
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
Schema<SettingsProviderType> schema_;
|
||||
Handle handle_;
|
||||
|
||||
// have to be mutable because BackendInterface constness :(
|
||||
mutable ExecutionStrategy executor_;
|
||||
mutable ExecutionStrategyType executor_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
@@ -61,7 +61,8 @@ public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, bool readOnly)
|
||||
: settingsProvider_{std::move(settingsProvider)}
|
||||
@@ -92,11 +93,11 @@ public:
|
||||
}
|
||||
catch (std::runtime_error const& ex)
|
||||
{
|
||||
log_.error() << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
}
|
||||
|
||||
log_.info() << "Created (revamped) CassandraBackend";
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
TransactionsAndCursor
|
||||
@@ -105,7 +106,7 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const override
|
||||
boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
@@ -122,8 +123,8 @@ public:
|
||||
if (cursor)
|
||||
{
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
log_.debug() << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -131,7 +132,8 @@ public:
|
||||
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
log_.debug() << "account = " << ripple::strHex(account) << " idx = " << seq << " tuple = " << placeHolder;
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
// FIXME: Limit is a hack to support uint32_t properly for the time
|
||||
@@ -142,20 +144,20 @@ public:
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
log_.debug() << "No rows returned";
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
log_.info() << "num_rows = " << numRows;
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
log_.debug() << "Setting cursor";
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
@@ -166,11 +168,11 @@ public:
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
log_.debug() << "Txns = " << txns.size();
|
||||
LOG(log_.debug()) << "Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
log_.debug() << "Returning cursor";
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
@@ -190,18 +192,18 @@ public:
|
||||
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1)))
|
||||
{
|
||||
log_.warn() << "Update failed for ledger " << ledgerSequence_;
|
||||
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
|
||||
return false;
|
||||
}
|
||||
|
||||
log_.info() << "Committed ledger " << ledgerSequence_;
|
||||
LOG(log_.info()) << "Committed ledger " << ledgerSequence_;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) override
|
||||
writeLedger(ripple::LedgerHeader const& ledgerInfo, std::string&& blob) override
|
||||
{
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerInfo.seq, std::move(header));
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerInfo.seq, std::move(blob));
|
||||
|
||||
executor_.write(schema_->insertLedgerHash, ledgerInfo.hash, ledgerInfo.seq);
|
||||
|
||||
@@ -209,7 +211,7 @@ public:
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res)
|
||||
{
|
||||
@@ -218,25 +220,23 @@ public:
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
|
||||
log_.error() << "Could not fetch latest ledger - no rows";
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
log_.error() << "Could not fetch latest ledger - no result";
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch latest ledger: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const override
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call for seq " << sequence;
|
||||
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
|
||||
if (res)
|
||||
{
|
||||
@@ -247,25 +247,23 @@ public:
|
||||
return util::deserializeHeader(ripple::makeSlice(*maybeValue));
|
||||
}
|
||||
|
||||
log_.error() << "Could not fetch ledger by sequence - no rows";
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
log_.error() << "Could not fetch ledger by sequence - no result";
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch ledger by sequence: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
@@ -273,31 +271,29 @@ public:
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return fetchLedgerBySequence(*maybeValue, yield);
|
||||
|
||||
log_.error() << "Could not fetch ledger by hash - no rows";
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
log_.error() << "Could not fetch ledger by hash - no result";
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch ledger by hash: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const override
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerRange); res)
|
||||
{
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
log_.debug() << "Could not fetch ledger range - no rows";
|
||||
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@@ -319,44 +315,43 @@ public:
|
||||
if (range.minSequence > range.maxSequence)
|
||||
std::swap(range.minSequence, range.maxSequence);
|
||||
|
||||
log_.debug() << "After hardFetchLedgerRange range is " << range.minSequence << ":" << range.maxSequence;
|
||||
LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
|
||||
<< range.maxSequence;
|
||||
return range;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch ledger range: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
|
||||
return fetchTransactions(hashes, yield);
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
|
||||
if (not res)
|
||||
{
|
||||
log_.error() << "Could not fetch all transaction hashes: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows())
|
||||
{
|
||||
log_.error() << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -365,18 +360,17 @@ public:
|
||||
hashes.push_back(std::move(hash));
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
log_.debug() << "Fetched " << hashes.size() << " transaction hashes from Cassandra in "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << " milliseconds";
|
||||
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from Cassandra in "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
|
||||
<< " milliseconds";
|
||||
|
||||
return hashes;
|
||||
}
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
|
||||
if (not res)
|
||||
return std::nullopt;
|
||||
@@ -407,7 +401,7 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
log_.error() << "Could not fetch NFT - no rows";
|
||||
LOG(log_.error()) << "Could not fetch NFT - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@@ -417,10 +411,8 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const override
|
||||
boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
@@ -436,8 +428,8 @@ public:
|
||||
if (cursor)
|
||||
{
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
log_.debug() << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -445,7 +437,8 @@ public:
|
||||
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
log_.debug() << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq << " tuple = " << placeHolder;
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
statement.bindAt(2, Limit{limit});
|
||||
@@ -454,20 +447,20 @@ public:
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
log_.debug() << "No rows returned";
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
log_.info() << "num_rows = " << numRows;
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
log_.debug() << "Setting cursor";
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
@@ -478,11 +471,11 @@ public:
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
log_.debug() << "NFT Txns = " << txns.size();
|
||||
LOG(log_.debug()) << "NFT Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
log_.debug() << "Returning cursor";
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
@@ -490,10 +483,10 @@ public:
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
log_.debug() << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<Blob>(); result)
|
||||
@@ -503,22 +496,20 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.debug() << "Could not fetch ledger object - no rows";
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch ledger object: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res)
|
||||
{
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue)
|
||||
@@ -528,23 +519,21 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.debug() << "Could not fetch transaction - no rows";
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch transaction: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<ripple::uint256>(); result)
|
||||
@@ -555,22 +544,20 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.debug() << "Could not fetch successor - no rows";
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Could not fetch successor: " << res.error();
|
||||
LOG(log_.error()) << "Could not fetch successor: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context& yield) const override
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (hashes.size() == 0)
|
||||
return {};
|
||||
|
||||
@@ -581,7 +568,7 @@ public:
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(numHashes);
|
||||
|
||||
auto const timeDiff = util::timed([this, &yield, &results, &hashes, &statements]() {
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
|
||||
@@ -602,7 +589,8 @@ public:
|
||||
});
|
||||
|
||||
assert(numHashes == results.size());
|
||||
log_.debug() << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff << " milliseconds";
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
return results;
|
||||
}
|
||||
|
||||
@@ -610,15 +598,13 @@ public:
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override
|
||||
boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
if (keys.size() == 0)
|
||||
return {};
|
||||
|
||||
auto const numKeys = keys.size();
|
||||
log_.trace() << "Fetching " << numKeys << " objects";
|
||||
LOG(log_.trace()) << "Fetching " << numKeys << " objects";
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.reserve(numKeys);
|
||||
@@ -641,42 +627,41 @@ public:
|
||||
return {};
|
||||
});
|
||||
|
||||
log_.trace() << "Fetched " << numKeys << " objects";
|
||||
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
log_.trace() << __func__ << " call";
|
||||
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, &yield]() -> std::vector<ripple::uint256> {
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
|
||||
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
|
||||
if (not res)
|
||||
{
|
||||
log_.error() << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results)
|
||||
{
|
||||
log_.error() << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
std::vector<ripple::uint256> resultKeys;
|
||||
for (auto [key] : extract<ripple::uint256>(results))
|
||||
keys.push_back(key);
|
||||
resultKeys.push_back(key);
|
||||
|
||||
return keys;
|
||||
return resultKeys;
|
||||
});
|
||||
|
||||
// one of the above errors must have happened
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
log_.debug() << "Fetched " << keys.size() << " diff hashes from Cassandra in " << timeDiff << " milliseconds";
|
||||
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
|
||||
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
std::vector<LedgerObject> results;
|
||||
@@ -697,7 +682,7 @@ public:
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
log_.trace() << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
|
||||
if (range)
|
||||
executor_.write(schema_->insertDiff, seq, key);
|
||||
@@ -708,8 +693,8 @@ public:
|
||||
void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
|
||||
{
|
||||
log_.trace() << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
assert(key.size() != 0);
|
||||
assert(successor.size() != 0);
|
||||
|
||||
@@ -761,7 +746,7 @@ public:
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override
|
||||
{
|
||||
log_.trace() << "Writing txn to cassandra";
|
||||
LOG(log_.trace()) << "Writing txn to cassandra";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
@@ -819,13 +804,13 @@ private:
|
||||
auto maybeSuccess = res->template get<bool>();
|
||||
if (not maybeSuccess)
|
||||
{
|
||||
log_.error() << "executeSyncUpdate - error getting result - no row";
|
||||
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not maybeSuccess.value())
|
||||
{
|
||||
log_.warn() << "Update failed. Checking if DB state is what we expect";
|
||||
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
|
||||
|
||||
// error may indicate that another writer wrote something.
|
||||
// in this case let's just compare the current state of things
|
||||
@@ -841,4 +826,4 @@ private:
|
||||
|
||||
using CassandraBackend = BasicCassandraBackend<SettingsProvider, detail::DefaultExecutionStrategy<>>;
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -17,21 +17,21 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
#include <backend/Types.h>
|
||||
#include <data/Types.h>
|
||||
|
||||
/**
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables.
|
||||
*/
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
@@ -40,7 +40,7 @@ struct AccountTransactionsData
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -52,7 +52,7 @@ struct AccountTransactionsData
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it
|
||||
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it.
|
||||
*
|
||||
* Gets written to nf_token_transactions table and the like.
|
||||
*/
|
||||
@@ -139,6 +139,12 @@ struct NFTsData
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is an offer.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
@@ -147,19 +153,28 @@ isOffer(T const& object)
|
||||
return offer_bytes == 0x006f;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied hex represents an offer object.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isOfferHex(T const& object)
|
||||
{
|
||||
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
|
||||
if (blob)
|
||||
{
|
||||
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
|
||||
return offer_bytes == 0x006f;
|
||||
}
|
||||
return isOffer(*blob);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is a dir node.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is a dir node; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
@@ -168,6 +183,13 @@ isDirNode(T const& object)
|
||||
return spaceKey == 0x0064;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is a book dir.
|
||||
*
|
||||
* @param key The key into the object
|
||||
* @param object The object to check
|
||||
* @return true if the object is a book dir; false otherwise
|
||||
*/
|
||||
template <class T, class R>
|
||||
inline bool
|
||||
isBookDir(T const& key, R const& object)
|
||||
@@ -179,6 +201,12 @@ isBookDir(T const& key, R const& object)
|
||||
return !sle[~ripple::sfOwner].has_value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book out of an offer object.
|
||||
*
|
||||
* @param offer The offer to get the book for
|
||||
* @return Book as ripple::uint256
|
||||
*/
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
@@ -186,26 +214,40 @@ getBook(T const& offer)
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
|
||||
return book;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book base.
|
||||
*
|
||||
* @param key The key to get the book base out of
|
||||
* @return Book base as ripple::uint256
|
||||
*/
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
assert(key.size() == ripple::uint256::size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < 24; ++i)
|
||||
{
|
||||
ret.data()[i] = key.data()[i];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stringify a ripple::uint256.
|
||||
*
|
||||
* @param input The input value
|
||||
* @return The input value as a string
|
||||
*/
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& uint)
|
||||
uint256ToString(ripple::uint256 const& input)
|
||||
{
|
||||
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
|
||||
return {reinterpret_cast<const char*>(input.data()), input.size()};
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
@@ -17,9 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <data/LedgerCache.h>
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
|
||||
uint32_t
|
||||
LedgerCache::latestLedgerSequence() const
|
||||
@@ -69,7 +69,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock{mtx_};
|
||||
std::shared_lock lck{mtx_};
|
||||
successorReqCounter_++;
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
@@ -146,7 +146,7 @@ LedgerCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
return 1;
|
||||
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||
return static_cast<float>(objectHitCounter_) / objectReqCounter_;
|
||||
}
|
||||
|
||||
float
|
||||
@@ -154,7 +154,7 @@ LedgerCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
return 1;
|
||||
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||
return static_cast<float>(successorHitCounter_) / successorReqCounter_;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
@@ -21,15 +21,18 @@
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <backend/Types.h>
|
||||
#include <data/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache
|
||||
{
|
||||
struct CacheEntry
|
||||
@@ -57,42 +60,95 @@ class LedgerCache
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
// Update the cache with new ledger objects set isBackground to true when writing old data from a background thread
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param blobs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
void
|
||||
setFull();
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
// whether the cache has all data for the most recent ledger
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
@@ -1,4 +1,5 @@
|
||||
# Clio Backend
|
||||
# Backend
|
||||
|
||||
## Background
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
|
||||
@@ -21,20 +21,23 @@
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
// *** return types
|
||||
namespace data {
|
||||
|
||||
using Blob = std::vector<unsigned char>;
|
||||
|
||||
/**
|
||||
* @brief Represents an object in the ledger.
|
||||
*/
|
||||
struct LedgerObject
|
||||
{
|
||||
ripple::uint256 key;
|
||||
Blob blob;
|
||||
|
||||
bool
|
||||
operator==(const LedgerObject& other) const
|
||||
{
|
||||
@@ -42,16 +45,27 @@ struct LedgerObject
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a page of LedgerObjects.
|
||||
*/
|
||||
struct LedgerPage
|
||||
{
|
||||
std::vector<LedgerObject> objects;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a page of book offer objects.
|
||||
*/
|
||||
struct BookOffersPage
|
||||
{
|
||||
std::vector<LedgerObject> offers;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a transaction and its metadata bundled together.
|
||||
*/
|
||||
struct TransactionAndMetadata
|
||||
{
|
||||
Blob transaction;
|
||||
@@ -85,10 +99,13 @@ struct TransactionAndMetadata
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a cursor into the transactions table.
|
||||
*/
|
||||
struct TransactionsCursor
|
||||
{
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t transactionIndex = 0;
|
||||
|
||||
TransactionsCursor() = default;
|
||||
TransactionsCursor(std::uint32_t ledgerSequence, std::uint32_t transactionIndex)
|
||||
@@ -101,6 +118,8 @@ struct TransactionsCursor
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor(TransactionsCursor const&) = default;
|
||||
|
||||
TransactionsCursor&
|
||||
operator=(TransactionsCursor const&) = default;
|
||||
|
||||
@@ -114,12 +133,18 @@ struct TransactionsCursor
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represests a bundle of transactions with metadata and a cursor to the next page.
|
||||
*/
|
||||
struct TransactionsAndCursor
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<TransactionsCursor> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a NFToken.
|
||||
*/
|
||||
struct NFT
|
||||
{
|
||||
ripple::uint256 tokenID;
|
||||
@@ -143,9 +168,8 @@ struct NFT
|
||||
{
|
||||
}
|
||||
|
||||
// clearly two tokens are the same if they have the same ID, but this
|
||||
// struct stores the state of a given token at a given ledger sequence, so
|
||||
// we also need to compare with ledgerSequence
|
||||
// clearly two tokens are the same if they have the same ID, but this struct stores the state of a given token at a
|
||||
// given ledger sequence, so we also need to compare with ledgerSequence.
|
||||
bool
|
||||
operator==(NFT const& other) const
|
||||
{
|
||||
@@ -153,12 +177,17 @@ struct NFT
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
struct LedgerRange
|
||||
{
|
||||
std::uint32_t minSequence;
|
||||
std::uint32_t maxSequence;
|
||||
std::uint32_t minSequence = 0;
|
||||
std::uint32_t maxSequence = 0;
|
||||
};
|
||||
|
||||
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
} // namespace Backend
|
||||
|
||||
} // namespace data
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
@@ -28,8 +28,11 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief The requirements of a settings provider.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
@@ -41,6 +44,9 @@ concept SomeSettingsProvider = requires(T a) {
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of an execution strategy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
@@ -66,6 +72,9 @@ concept SomeExecutionStrategy = requires(
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of a retry policy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
@@ -76,4 +85,4 @@ concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraEr
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -23,10 +23,10 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief A simple container for both error message and error code
|
||||
* @brief A simple container for both error message and error code.
|
||||
*/
|
||||
class CassandraError
|
||||
{
|
||||
@@ -67,18 +67,27 @@ public:
|
||||
return os;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The final error message as a std::string
|
||||
*/
|
||||
std::string
|
||||
message() const
|
||||
{
|
||||
return message_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The error code
|
||||
*/
|
||||
uint32_t
|
||||
code() const
|
||||
{
|
||||
return code_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the wrapped error is considered a timeout; false otherwise
|
||||
*/
|
||||
bool
|
||||
isTimeout() const
|
||||
{
|
||||
@@ -89,6 +98,9 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the wrapped error is an invalid query; false otherwise
|
||||
*/
|
||||
bool
|
||||
isInvalidQuery() const
|
||||
{
|
||||
@@ -96,4 +108,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -17,9 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings}
|
||||
{
|
||||
@@ -152,4 +152,4 @@ Handle::prepare(std::string_view query) const
|
||||
throw std::runtime_error(rc.error().message());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,15 +19,15 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <backend/cassandra/impl/Session.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/impl/Session.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -37,7 +37,7 @@
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Represents a handle to the cassandra database cluster
|
||||
@@ -57,28 +57,31 @@ public:
|
||||
using ResultType = Result;
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle from a @ref Settings object
|
||||
* @brief Construct a new handle from a @ref detail::Settings object.
|
||||
*
|
||||
* @param clusterSettings The settings to use
|
||||
*/
|
||||
explicit Handle(Settings clusterSettings = Settings::defaultSettings());
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle with default settings and only by setting
|
||||
* the contact points
|
||||
* @brief Construct a new handle with default settings and only by setting the contact points.
|
||||
*
|
||||
* @param contactPoints The contact points to use instead of settings
|
||||
*/
|
||||
explicit Handle(std::string_view contactPoints);
|
||||
|
||||
/**
|
||||
* @brief Disconnects gracefully if possible
|
||||
* @brief Disconnects gracefully if possible.
|
||||
*/
|
||||
~Handle();
|
||||
|
||||
/**
|
||||
* @brief Move is supported
|
||||
* @brief Move is supported.
|
||||
*/
|
||||
Handle(Handle&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Connect to the cluster asynchronously
|
||||
* @brief Connect to the cluster asynchronously.
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
@@ -86,31 +89,37 @@ public:
|
||||
asyncConnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncConnect() const for how this works.
|
||||
*
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect() const;
|
||||
|
||||
/**
|
||||
* @brief Connect to the the specified keyspace asynchronously
|
||||
* @brief Connect to the the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncConnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncConnect(std::string_view) const for how this works.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Disconnect from the cluster asynchronously
|
||||
* @brief Disconnect from the cluster asynchronously.
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
@@ -118,32 +127,40 @@ public:
|
||||
asyncDisconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncDisconnect() const for how this works.
|
||||
*
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
disconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Reconnect to the the specified keyspace asynchronously
|
||||
* @brief Reconnect to the the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncReconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncReconnect(std::string_view) const for how this works.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
reconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a simple query with optional args asynchronously
|
||||
* @brief Execute a simple query with optional args asynchronously.
|
||||
*
|
||||
* @param query The query to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
@@ -155,10 +172,13 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::string_view, Args&&...) const for how this
|
||||
* works.
|
||||
* See asyncExecute(std::string_view, Args&&...) const for how this works.
|
||||
*
|
||||
* @param query The query to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return The result or an error
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
@@ -168,30 +188,34 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute each of the statements asynchronously
|
||||
* @brief Execute each of the statements asynchronously.
|
||||
*
|
||||
* Batched version is not always the right option. Especially since it only
|
||||
* supports INSERT, UPDATE and DELETE statements.
|
||||
* This can be used as an alternative when statements need to execute in
|
||||
* bulk.
|
||||
* Batched version is not always the right option.
|
||||
* Especially since it only supports INSERT, UPDATE and DELETE statements.
|
||||
* This can be used as an alternative when statements need to execute in bulk.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return A vector of future objects
|
||||
*/
|
||||
[[nodiscard]] std::vector<FutureType>
|
||||
asyncExecuteEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for
|
||||
* how this works.
|
||||
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for how this works.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
executeEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a prepared statement with optional args asynchronously
|
||||
* @brief Execute a prepared statement with optional args asynchronously.
|
||||
*
|
||||
* @param statement The prepared statement to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
@@ -203,10 +227,13 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&, Args&&...) const
|
||||
* for how this works.
|
||||
* See asyncExecute(std::vector<StatementType> const&, Args&&...) const for how this works.
|
||||
*
|
||||
* @param statement The prepared statement to bind and execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return The result or an error
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
@@ -216,61 +243,70 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously
|
||||
* @brief Execute one (bound or simple) statements asynchronously.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously with a
|
||||
* callback
|
||||
* @brief Execute one (bound or simple) statements asynchronously with a callback.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @param cb The callback to execute when data is ready
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(StatementType const& statement, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(StatementType const&) const for how this
|
||||
* works.
|
||||
* See @ref asyncExecute(StatementType const&) const for how this works.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @return The result or an error
|
||||
*/
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
execute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&) const for how
|
||||
* this works.
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&) const for how this works.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
execute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
* with a completion callback
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously with a completion callback.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @param cb The callback to execute when data is ready
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Prepare a statement
|
||||
* @brief Prepare a statement.
|
||||
*
|
||||
* @return A @ref PreparedStatementType
|
||||
* @param query
|
||||
* @return A prepared statement
|
||||
* @throws std::runtime_error with underlying error description on failure
|
||||
*/
|
||||
[[nodiscard]] PreparedStatementType
|
||||
@@ -278,12 +314,13 @@ public:
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Extracts the results into series of std::tuple<Types...> by creating a
|
||||
* simple wrapper with an STL input iterator inside.
|
||||
* @brief Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL input
|
||||
* iterator inside.
|
||||
*
|
||||
* You can call .begin() and .end() in order to iterate as usual.
|
||||
* This also means that you can use it in a range-based for or with some
|
||||
* algorithms.
|
||||
* This also means that you can use it in a range-based for or with some algorithms.
|
||||
*
|
||||
* @param result The result to iterate
|
||||
*/
|
||||
template <typename... Types>
|
||||
[[nodiscard]] detail::ResultExtractor<Types...>
|
||||
@@ -292,4 +329,4 @@ extract(Handle::ResultType const& result)
|
||||
return {result};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,17 +19,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <fmt/compile.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name)
|
||||
@@ -38,17 +38,12 @@ template <SomeSettingsProvider SettingsProviderType>
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Manages the DB schema and provides access to prepared statements
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema
|
||||
{
|
||||
// Current schema version.
|
||||
// Update this everytime you update the schema.
|
||||
// Migrations will be ran automatically based on this value.
|
||||
static constexpr uint16_t version = 1u;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
public:
|
||||
@@ -261,7 +256,7 @@ public:
|
||||
}();
|
||||
|
||||
/**
|
||||
* @brief Prepared statements holder
|
||||
* @brief Prepared statements holder.
|
||||
*/
|
||||
class Statements
|
||||
{
|
||||
@@ -528,21 +523,19 @@ public:
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx <= ?
|
||||
AND seq_idx < ?
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
}();
|
||||
|
||||
// the smallest transaction idx is 0, we use uint to store the transaction index, so we shall use ">=" to
|
||||
// include it(the transaction with idx 0) in the result
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx >= ?
|
||||
AND seq_idx > ?
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
@@ -643,18 +636,18 @@ public:
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Recreates the prepared statements
|
||||
* @brief Recreates the prepared statements.
|
||||
*/
|
||||
void
|
||||
prepareStatements(Handle const& handle)
|
||||
{
|
||||
log_.info() << "Preparing cassandra statements";
|
||||
LOG(log_.info()) << "Preparing cassandra statements";
|
||||
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
|
||||
log_.info() << "Finished preparing statements";
|
||||
LOG(log_.info()) << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements
|
||||
* @brief Provides access to statements.
|
||||
*/
|
||||
std::unique_ptr<Statements> const&
|
||||
operator->() const
|
||||
@@ -666,4 +659,4 @@ private:
|
||||
std::unique_ptr<Statements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -17,17 +17,18 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/config/Config.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
namespace detail {
|
||||
inline Settings::ContactPoints
|
||||
@@ -38,7 +39,7 @@ tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::valu
|
||||
"Feed entire Cassandra section to parse "
|
||||
"Settings::ContactPoints instead");
|
||||
|
||||
clio::Config obj{value};
|
||||
util::Config obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
@@ -56,7 +57,7 @@ tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::j
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
SettingsProvider::SettingsProvider(clio::Config const& cfg, uint16_t ttl)
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg, uint16_t ttl)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
@@ -115,6 +116,19 @@ SettingsProvider::parseSettings() const
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * 1000};
|
||||
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * 1000};
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
settings.password = config_.maybeValue<std::string>("password");
|
||||
@@ -122,4 +136,4 @@ SettingsProvider::parseSettings() const
|
||||
return settings;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,20 +19,20 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Provides settings for @ref CassandraBackend
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider
|
||||
{
|
||||
clio::Config config_;
|
||||
util::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
@@ -41,34 +41,50 @@ class SettingsProvider
|
||||
Settings settings_;
|
||||
|
||||
public:
|
||||
explicit SettingsProvider(clio::Config const& cfg, uint16_t ttl = 0);
|
||||
/**
|
||||
* @brief Create a settings provider from the specified config.
|
||||
*
|
||||
* @param cfg The config of Clio to use
|
||||
* @param ttl Time to live setting
|
||||
*/
|
||||
explicit SettingsProvider(util::Config const& cfg, uint16_t ttl = 0);
|
||||
|
||||
/*! Get the cluster settings */
|
||||
/**
|
||||
* @return The cluster settings
|
||||
*/
|
||||
[[nodiscard]] Settings
|
||||
getSettings() const;
|
||||
|
||||
/*! Get the specified keyspace */
|
||||
/**
|
||||
* @return The specified keyspace
|
||||
*/
|
||||
[[nodiscard]] inline std::string
|
||||
getKeyspace() const
|
||||
{
|
||||
return keyspace_;
|
||||
}
|
||||
|
||||
/*! Get an optional table prefix to use in all queries */
|
||||
/**
|
||||
* @return The optional table prefix to use in all queries
|
||||
*/
|
||||
[[nodiscard]] inline std::optional<std::string>
|
||||
getTablePrefix() const
|
||||
{
|
||||
return tablePrefix_;
|
||||
}
|
||||
|
||||
/*! Get the replication factor */
|
||||
/**
|
||||
* @return The replication factor
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
getReplicationFactor() const
|
||||
{
|
||||
return replicationFactor_;
|
||||
}
|
||||
|
||||
/*! Get the default time to live to use in all `create` queries */
|
||||
/**
|
||||
* @return The default time to live to use in all `create` queries
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
getTtl() const
|
||||
{
|
||||
@@ -83,4 +99,4 @@ private:
|
||||
parseSettings() const;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
namespace detail {
|
||||
struct Settings;
|
||||
@@ -64,4 +64,4 @@ using MaybeError = util::Expected<void, CassandraError>;
|
||||
using ResultOrError = util::Expected<Result, CassandraError>;
|
||||
using Error = util::Unexpected<CassandraError>;
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,19 +19,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/RetryPolicy.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/RetryPolicy.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A query executor with a changable retry policy
|
||||
@@ -53,7 +53,7 @@ class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<Statemen
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
@@ -116,4 +116,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,9 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <exception>
|
||||
@@ -29,10 +29,9 @@ namespace {
|
||||
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
};
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
// todo: use an appropritae value instead of CASS_BATCH_TYPE_LOGGED for
|
||||
// different use cases
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
|
||||
{
|
||||
@@ -53,4 +52,4 @@ Batch::add(Statement const& statement)
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,12 +19,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Batch : public ManagedObject<CassBatch>
|
||||
{
|
||||
@@ -34,4 +34,4 @@ struct Batch : public ManagedObject<CassBatch>
|
||||
add(Statement const& statement);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,11 +17,13 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
@@ -39,7 +41,7 @@ template <class... Ts>
|
||||
overloadSet(Ts...) -> overloadSet<Ts...>;
|
||||
}; // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
|
||||
{
|
||||
@@ -48,44 +50,40 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Error setting cassandra protocol version to v4: "} + cass_error_desc(rc));
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
std::string{"Error setting cassandra io threads to "} + to_string(settings.threads) + ": " +
|
||||
cass_error_desc(rc));
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: other options to experiment with and consider later:
|
||||
// cass_cluster_set_max_concurrent_requests_threshold(*this, 10000);
|
||||
// cass_cluster_set_queue_size_event(*this, 100000);
|
||||
// cass_cluster_set_queue_size_io(*this, 100000);
|
||||
// cass_cluster_set_write_bytes_high_water_mark(*this, 16 * 1024 * 1024); // 16mb
|
||||
// cass_cluster_set_write_bytes_low_water_mark(*this, 8 * 1024 * 1024); // half of allowance
|
||||
// cass_cluster_set_pending_requests_high_water_mark(*this, 5000);
|
||||
// cass_cluster_set_pending_requests_low_water_mark(*this, 2500); // half
|
||||
// cass_cluster_set_max_requests_per_flush(*this, 1000);
|
||||
// cass_cluster_set_max_concurrent_creation(*this, 8);
|
||||
// cass_cluster_set_max_connections_per_host(*this, 6);
|
||||
// cass_cluster_set_core_connections_per_host(*this, 4);
|
||||
// cass_cluster_set_constant_speculative_execution_policy(*this, 1000, 1024);
|
||||
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(
|
||||
*this, settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Could not set queue size for IO per host: "} + cass_error_desc(rc));
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
auto const queueSize =
|
||||
settings.queueSizeIO.value_or(settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
setupConnection(settings);
|
||||
setupCertificate(settings);
|
||||
setupCredentials(settings);
|
||||
|
||||
LOG(log_.info()) << "Threads: " << settings.threads;
|
||||
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
|
||||
LOG(log_.info()) << "IO queue size: " << queueSize;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -104,11 +102,12 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
|
||||
if (rc != CASS_OK)
|
||||
throw std::runtime_error("Cassandra: Error setting " + label + " [" + value + "]: " + cass_error_desc(rc));
|
||||
throw std::runtime_error(
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc)));
|
||||
};
|
||||
|
||||
{
|
||||
log_.debug() << "Attempt connection using contact points: " << points.contactPoints;
|
||||
LOG(log_.debug()) << "Attempt connection using contact points: " << points.contactPoints;
|
||||
auto const rc = cass_cluster_set_contact_points(*this, points.contactPoints.data());
|
||||
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
|
||||
}
|
||||
@@ -123,10 +122,10 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
void
|
||||
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
|
||||
{
|
||||
log_.debug() << "Attempt connection using secure bundle";
|
||||
LOG(log_.debug()) << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle" + bundle.bundle);
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle " + bundle.bundle);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +135,7 @@ Cluster::setupCertificate(Settings const& settings)
|
||||
if (not settings.certificate)
|
||||
return;
|
||||
|
||||
log_.debug() << "Configure SSL context";
|
||||
LOG(log_.debug()) << "Configure SSL context";
|
||||
SslContext context = SslContext(*settings.certificate);
|
||||
cass_cluster_set_ssl(*this, context);
|
||||
}
|
||||
@@ -147,8 +146,8 @@ Cluster::setupCredentials(Settings const& settings)
|
||||
if (not settings.username || not settings.password)
|
||||
return;
|
||||
|
||||
log_.debug() << "Set credentials; username: " << settings.username.value();
|
||||
LOG(log_.debug()) << "Set credentials; username: " << settings.username.value();
|
||||
cass_cluster_set_credentials(*this, settings.username.value().c_str(), settings.password.value().c_str());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
@@ -31,32 +31,71 @@
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
// TODO: move Settings to public interface, not detail
|
||||
|
||||
/**
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings
|
||||
{
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
*/
|
||||
struct ContactPoints
|
||||
{
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port;
|
||||
std::optional<uint16_t> port = {};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of a secure connection bundle.
|
||||
*/
|
||||
struct SecureConnectionBundle
|
||||
{
|
||||
std::string bundle; // no meaningful default
|
||||
};
|
||||
|
||||
/** @brief Enables or disables cassandra driver logger */
|
||||
bool enableLog = false;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
|
||||
/** @brief Connection information; either ContactPoints or SecureConnectionBundle */
|
||||
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
|
||||
|
||||
/** @brief The number of threads for the driver to pool */
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000;
|
||||
uint32_t maxReadRequestsOutstanding = 100'000;
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000u;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = 100'000u;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO;
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username;
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password;
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
*/
|
||||
Settings
|
||||
withContactPoints(std::string_view contactPoints)
|
||||
{
|
||||
@@ -65,6 +104,9 @@ struct Settings
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the default settings.
|
||||
*/
|
||||
static Settings
|
||||
defaultSettings()
|
||||
{
|
||||
@@ -74,7 +116,7 @@ struct Settings
|
||||
|
||||
class Cluster : public ManagedObject<CassCluster>
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
Cluster(Settings const& settings);
|
||||
@@ -96,4 +138,4 @@ private:
|
||||
setupCredentials(Settings const& settings);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,13 +19,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/AsyncExecutor.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/AsyncExecutor.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio/async_result.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <atomic>
|
||||
@@ -36,19 +36,20 @@
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
// TODO: this could probably be also moved out of detail and into the main cassandra namespace.
|
||||
|
||||
/**
|
||||
* @brief Implements async and sync querying against the cassandra DB with
|
||||
* support for throttling.
|
||||
* @brief Implements async and sync querying against the cassandra DB with support for throttling.
|
||||
*
|
||||
* Note: A lot of the code that uses yield is repeated below. This is ok for now
|
||||
* because we are hopefully going to be getting rid of it entirely later on.
|
||||
* Note: A lot of the code that uses yield is repeated below.
|
||||
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
std::uint32_t maxWriteRequestsOutstanding_;
|
||||
std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
|
||||
@@ -75,21 +76,21 @@ public:
|
||||
using FutureType = typename HandleType::FutureType;
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using ResultType = typename HandleType::ResultType;
|
||||
|
||||
using CompletionTokenType = boost::asio::yield_context;
|
||||
using FunctionType = void(boost::system::error_code);
|
||||
using AsyncResultType = boost::asio::async_result<CompletionTokenType, FunctionType>;
|
||||
using HandlerType = typename AsyncResultType::completion_handler_type;
|
||||
|
||||
DefaultExecutionStrategy(Settings settings, HandleType const& handle)
|
||||
/**
|
||||
* @param settings The settings to use
|
||||
* @param handle A handle to the cassandra database
|
||||
*/
|
||||
DefaultExecutionStrategy(Settings const& settings, HandleType const& handle)
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
{
|
||||
log_.info() << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
}
|
||||
|
||||
~DefaultExecutionStrategy()
|
||||
@@ -100,17 +101,20 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait for all async writes to finish before unblocking
|
||||
* @brief Wait for all async writes to finish before unblocking.
|
||||
*/
|
||||
void
|
||||
sync()
|
||||
{
|
||||
log_.debug() << "Waiting to sync all writes...";
|
||||
LOG(log_.debug()) << "Waiting to sync all writes...";
|
||||
std::unique_lock<std::mutex> lck(syncMutex_);
|
||||
syncCv_.wait(lck, [this]() { return finishedAllWriteRequests(); });
|
||||
log_.debug() << "Sync done.";
|
||||
LOG(log_.debug()) << "Sync done.";
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if outstanding read requests allowance is exhausted; false otherwise
|
||||
*/
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
@@ -118,7 +122,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
* @brief Blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
@@ -133,14 +137,14 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Cassandra sync write error, retrying: " << res.error();
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
* @brief Blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
@@ -152,11 +156,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data
|
||||
* @brief Non-blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param preparedStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
@@ -173,7 +177,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking batched query execution used for writing data
|
||||
* @brief Non-blocking batched query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
@@ -199,7 +203,7 @@ public:
|
||||
* Retries forever until successful or throws an exception on timeout.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param preparedStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
* @return ResultType or error wrapped in Expected
|
||||
@@ -224,35 +228,35 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
auto const numStatements = statements.size();
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
numReadRequestsOutstanding_ += numStatements;
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statements, [handler](auto&&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
auto init = [this, &statements, &future]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
numReadRequestsOutstanding_ -= numStatements;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
if (res)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed batch read in coroutine: " << res.error();
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
}
|
||||
@@ -271,34 +275,33 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
++numReadRequestsOutstanding_;
|
||||
auto init = [this, &statement, &future]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statement, [handler](auto const&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
if (res)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed read in coroutine: " << res.error();
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
throwErrorIfNeeded(res.error());
|
||||
}
|
||||
}
|
||||
@@ -318,9 +321,6 @@ public:
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
|
||||
std::atomic_bool hadError = false;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
@@ -328,29 +328,29 @@ public:
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
|
||||
// used as the handler for each async statement individually
|
||||
auto executionHandler = [handler, &hadError, &numOutstanding](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
auto init = [this, &statements, &futures, &hadError, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
auto executionHandler = [&hadError, &numOutstanding, sself](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself), [sself]() mutable { sself->complete(); });
|
||||
};
|
||||
|
||||
std::transform(
|
||||
std::cbegin(statements),
|
||||
std::cend(statements),
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
};
|
||||
|
||||
std::transform(
|
||||
std::cbegin(statements),
|
||||
std::cend(statements),
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
|
||||
boost::asio::async_compose<CompletionTokenType, void()>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
@@ -359,8 +359,7 @@ public:
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
|
||||
// it's safe to call blocking get on futures here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
// it's safe to call blocking get on futures here as we already waited for the coroutine to resume above.
|
||||
std::transform(
|
||||
std::make_move_iterator(std::begin(futures)),
|
||||
std::make_move_iterator(std::end(futures)),
|
||||
@@ -384,8 +383,8 @@ private:
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddWriteRequest())
|
||||
{
|
||||
log_.trace() << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
LOG(log_.trace()) << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
|
||||
}
|
||||
}
|
||||
@@ -440,4 +439,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,9 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
@@ -28,7 +28,7 @@ namespace {
|
||||
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
|
||||
{
|
||||
@@ -73,7 +73,10 @@ void
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
{
|
||||
// Note: can't use Future{ptr}.get() because double free will occur :/
|
||||
auto* cb = static_cast<FutureWithCallback::fn_t*>(cbPtr);
|
||||
// Note2: we are moving/copying it locally as a workaround for an issue we are seeing from asio recently.
|
||||
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
|
||||
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
|
||||
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
|
||||
if (auto const rc = cass_future_error_code(ptr); rc)
|
||||
{
|
||||
auto const errMsg = [&ptr](std::string const& label) {
|
||||
@@ -82,16 +85,16 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
cass_future_error_message(ptr, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("invokeHelper");
|
||||
(*cb)(Error{CassandraError{errMsg, rc}});
|
||||
(*local)(Error{CassandraError{errMsg, rc}});
|
||||
}
|
||||
else
|
||||
{
|
||||
(*cb)(Result{cass_future_get_result(ptr)});
|
||||
(*local)(Result{cass_future_get_result(ptr)});
|
||||
}
|
||||
}
|
||||
|
||||
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, fn_t&& cb)
|
||||
: Future{ptr}, cb_{std::make_unique<fn_t>(std::move(cb))}
|
||||
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, FnType&& cb)
|
||||
: Future{ptr}, cb_{std::make_unique<FnType>(std::move(cb))}
|
||||
{
|
||||
// Instead of passing `this` as the userdata void*, we pass the address of
|
||||
// the callback itself which will survive std::move of the
|
||||
@@ -99,4 +102,4 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
cass_future_set_callback(*this, &invokeHelper, cb_.get());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,12 +19,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Future : public ManagedObject<CassFuture>
|
||||
{
|
||||
@@ -43,16 +43,16 @@ invokeHelper(CassFuture* ptr, void* self);
|
||||
class FutureWithCallback : public Future
|
||||
{
|
||||
public:
|
||||
using fn_t = std::function<void(ResultOrError)>;
|
||||
using fn_ptr_t = std::unique_ptr<fn_t>;
|
||||
using FnType = std::function<void(ResultOrError)>;
|
||||
using FnPtrType = std::unique_ptr<FnType>;
|
||||
|
||||
/* implicit */ FutureWithCallback(CassFuture* ptr, fn_t&& cb);
|
||||
/* implicit */ FutureWithCallback(CassFuture* ptr, FnType&& cb);
|
||||
FutureWithCallback(FutureWithCallback const&) = delete;
|
||||
FutureWithCallback(FutureWithCallback&&) = default;
|
||||
|
||||
private:
|
||||
/*! Wrapped in a unique_ptr so it can survive std::move :/ */
|
||||
fn_ptr_t cb_;
|
||||
/** Wrapped in a unique_ptr so it can survive std::move :/ */
|
||||
FnPtrType cb_;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename Managed>
|
||||
class ManagedObject
|
||||
@@ -38,10 +38,10 @@ public:
|
||||
}
|
||||
ManagedObject(ManagedObject&&) = default;
|
||||
|
||||
operator Managed* const() const
|
||||
operator Managed*() const
|
||||
{
|
||||
return ptr_.get();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter}
|
||||
{
|
||||
@@ -66,4 +66,4 @@ ResultIterator::hasMore() const
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -31,7 +31,7 @@
|
||||
#include <iterator>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
@@ -51,11 +51,11 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
using uchar_vector_t = std::vector<unsigned char>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
@@ -63,7 +63,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
throwErrorIfNeeded(rc, "Extract ripple::uint256");
|
||||
output = ripple::uint256::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
@@ -71,20 +71,20 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
|
||||
output = ripple::AccountID::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vector_t>)
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
|
||||
output = uchar_vector_t{buf, buf + bufSize};
|
||||
output = UCharVectorType{buf, buf + bufSize};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
auto const* tuple = cass_row_get_column(row, idx);
|
||||
output = TupleIterator::fromTuple(tuple).extract<uint32_t, uint32_t>();
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
char const* value;
|
||||
std::size_t len;
|
||||
@@ -92,7 +92,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
throwErrorIfNeeded(rc, "Extract string");
|
||||
output = std::string{value, len};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
cass_bool_t flag;
|
||||
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
|
||||
@@ -100,17 +100,17 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
output = flag ? true : false;
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64");
|
||||
output = static_cast<decayed_t>(out);
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
@@ -254,4 +254,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,10 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
@@ -30,14 +30,14 @@
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A retry policy that employs exponential backoff
|
||||
*/
|
||||
class ExponentialBackoffRetryPolicy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
boost::asio::steady_timer timer_;
|
||||
uint32_t attempt_ = 0u;
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
/**
|
||||
* @brief Create a new retry policy instance with the io_context provided
|
||||
*/
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc) : timer_{ioc}
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc) : timer_{boost::asio::make_strand(ioc)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -59,8 +59,8 @@ public:
|
||||
shouldRetry([[maybe_unused]] CassandraError err)
|
||||
{
|
||||
auto const delay = calculateDelay(attempt_);
|
||||
log_.error() << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in "
|
||||
<< delay.count() << " milliseconds";
|
||||
LOG(log_.error()) << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in "
|
||||
<< delay.count() << " milliseconds";
|
||||
|
||||
return true; // keep retrying forever
|
||||
}
|
||||
@@ -91,4 +91,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,11 +19,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Session : public ManagedObject<CassSession>
|
||||
{
|
||||
@@ -35,4 +35,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,13 +17,13 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
@@ -34,4 +34,4 @@ SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,17 +19,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct SslContext : public ManagedObject<CassSsl>
|
||||
{
|
||||
explicit SslContext(std::string const& certificate);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -33,7 +33,7 @@
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement>
|
||||
{
|
||||
@@ -44,7 +44,7 @@ class Statement : public ManagedObject<CassStatement>
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new statement with optionally provided arguments
|
||||
* @brief Construct a new statement with optionally provided arguments.
|
||||
*
|
||||
* Note: it's up to the user to make sure the bound parameters match
|
||||
* the format of the query (e.g. amount of '?' matches count of args).
|
||||
@@ -66,6 +66,11 @@ public:
|
||||
|
||||
Statement(Statement&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Binds the given arguments to the statement.
|
||||
*
|
||||
* @param args Arguments to bind
|
||||
*/
|
||||
template <typename... Args>
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
@@ -74,6 +79,12 @@ public:
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Binds an argument to a specific index.
|
||||
*
|
||||
* @param idx The index of the argument
|
||||
* @param value The value to bind it to
|
||||
*/
|
||||
template <typename Type>
|
||||
void
|
||||
bindAt(std::size_t const idx, Type&& value) const
|
||||
@@ -88,48 +99,48 @@ public:
|
||||
return cass_statement_bind_bytes(*this, idx, static_cast<cass_byte_t const*>(data), size);
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uchar_vec_t = std::vector<unsigned char>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vec_t>)
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind vector<unsigned char>");
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, Limit>)
|
||||
else if constexpr (std::is_same_v<DecayedType, Limit>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_int32(*this, idx, value.limit);
|
||||
throwErrorIfNeeded(rc, "Bind limit (int32)");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
@@ -137,11 +148,16 @@ public:
|
||||
else
|
||||
{
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a prepared statement on the DB side.
|
||||
*
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const>
|
||||
{
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
@@ -151,6 +167,12 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Bind the given arguments and produce a ready to execute Statement.
|
||||
*
|
||||
* @param args The arguments to bind
|
||||
* @return A bound and ready to execute Statement object
|
||||
*/
|
||||
template <typename... Args>
|
||||
Statement
|
||||
bind(Args&&... args) const
|
||||
@@ -161,4 +183,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, tupleDeleter}
|
||||
{
|
||||
@@ -40,4 +40,4 @@ TupleIterator::fromTuple(CassValue const* value)
|
||||
return {cass_iterator_from_tuple(value)};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple>
|
||||
{
|
||||
@@ -68,15 +68,15 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
@@ -84,7 +84,7 @@ public:
|
||||
else
|
||||
{
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -126,24 +126,24 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64 from tuple");
|
||||
output = static_cast<decayed_t>(out);
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -26,6 +27,7 @@
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
|
||||
namespace etl {
|
||||
/**
|
||||
* @brief This datastructure is used to keep track of the sequence of the most recent ledger validated by the network.
|
||||
*
|
||||
@@ -43,6 +45,9 @@ class NetworkValidatedLedgers
|
||||
std::condition_variable cv_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers.
|
||||
*/
|
||||
static std::shared_ptr<NetworkValidatedLedgers>
|
||||
make_ValidatedLedgers()
|
||||
{
|
||||
@@ -50,9 +55,9 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network
|
||||
* @brief Notify the datastructure that idx has been validated by the network.
|
||||
*
|
||||
* @param idx sequence validated by network
|
||||
* @param idx Sequence validated by network
|
||||
*/
|
||||
void
|
||||
push(uint32_t idx)
|
||||
@@ -68,7 +73,7 @@ public:
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getMostRecent()
|
||||
@@ -79,9 +84,9 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network
|
||||
* @brief Waits for the sequence to be validated by the network.
|
||||
*
|
||||
* @param sequence to wait for
|
||||
* @param sequence The sequence to wait for
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
@@ -100,7 +105,7 @@ public:
|
||||
|
||||
// TODO: does the note make sense? lockfree queues provide the same blocking behaviour just without mutex, don't they?
|
||||
/**
|
||||
* @brief Generic thread-safe queue with a max capacity
|
||||
* @brief Generic thread-safe queue with a max capacity.
|
||||
*
|
||||
* @note (original note) We can't use a lockfree queue here, since we need the ability to wait for an element to be
|
||||
* added or removed from the queue. These waits are blocking calls.
|
||||
@@ -116,21 +121,21 @@ class ThreadSafeQueue
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the queue
|
||||
* @brief Create an instance of the queue.
|
||||
*
|
||||
* @param maxSize maximum size of the queue. Calls that would cause the queue to exceed this size will block until
|
||||
* free space is available
|
||||
* free space is available.
|
||||
*/
|
||||
ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
* @brief Push element onto the queue.
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
* Note: This method will block until free space is available.
|
||||
*
|
||||
* @param elt element to push onto queue
|
||||
* @param elt Element to push onto queue
|
||||
*/
|
||||
void
|
||||
push(T const& elt)
|
||||
@@ -142,11 +147,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
* @brief Push element onto the queue.
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
*
|
||||
* @param elt element to push onto queue. elt is moved from
|
||||
* @param elt Element to push onto queue. Ownership is transferred
|
||||
*/
|
||||
void
|
||||
push(T&& elt)
|
||||
@@ -158,11 +163,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Pop element from the queue
|
||||
* @brief Pop element from the queue.
|
||||
*
|
||||
* Note: Will block until queue is non-empty
|
||||
* Note: Will block until queue is non-empty.
|
||||
*
|
||||
* @return element popped from queue
|
||||
* @return Element popped from queue
|
||||
*/
|
||||
T
|
||||
pop()
|
||||
@@ -178,9 +183,9 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Attempt to pop an element
|
||||
* @brief Attempt to pop an element.
|
||||
*
|
||||
* @return element popped from queue or empty optional if queue was empty
|
||||
* @return Element popped from queue or empty optional if queue was empty
|
||||
*/
|
||||
std::optional<T>
|
||||
tryPop()
|
||||
@@ -200,7 +205,7 @@ public:
|
||||
/**
|
||||
* @brief Parititions the uint256 keyspace into numMarkers partitions, each of equal size.
|
||||
*
|
||||
* @param numMarkers total markers to partition for
|
||||
* @param numMarkers Total markers to partition for
|
||||
*/
|
||||
inline std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
@@ -219,3 +224,4 @@ getMarkers(size_t numMarkers)
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
} // namespace etl
|
||||
@@ -19,16 +19,17 @@
|
||||
|
||||
#include <etl/ETLService.h>
|
||||
|
||||
using namespace clio;
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
namespace etl {
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
{
|
||||
if (finishSequence_ && startSequence > *finishSequence_)
|
||||
return {};
|
||||
|
||||
log_.debug() << "Starting etl pipeline";
|
||||
LOG(log_.debug()) << "Starting etl pipeline";
|
||||
state_.isWriting = true;
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
@@ -46,7 +47,8 @@ ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
|
||||
|
||||
auto transformer = TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, startSequence, state_};
|
||||
auto transformer =
|
||||
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
|
||||
transformer.waitTillFinished(); // suspend current thread until exit condition is met
|
||||
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
|
||||
|
||||
@@ -56,12 +58,12 @@ ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
log_.debug() << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
log_.debug() << "Stopping etl pipeline";
|
||||
LOG(log_.debug()) << "Stopping etl pipeline";
|
||||
return lastPublishedSeq;
|
||||
}
|
||||
|
||||
@@ -79,35 +81,39 @@ ETLService::monitor()
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng)
|
||||
{
|
||||
log_.info() << "Database is empty. Will download a ledger "
|
||||
"from the network.";
|
||||
std::optional<ripple::LedgerInfo> ledger;
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
std::optional<ripple::LedgerHeader> ledger;
|
||||
|
||||
if (startSequence_)
|
||||
try
|
||||
{
|
||||
log_.info() << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
if (startSequence_)
|
||||
{
|
||||
log_.info() << "Ledger " << *mostRecentValidated << " has been validated. "
|
||||
<< "Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
LOG(log_.info()) << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "The wait for the next validated "
|
||||
<< "ledger has been aborted. "
|
||||
<< "Exiting monitor loop";
|
||||
return;
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
LOG(log_.info()) << "Ledger " << *mostRecentValidated << " has been validated. Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
return amendmentBlockHandler_.onAmendmentBlock();
|
||||
}
|
||||
|
||||
if (ledger)
|
||||
{
|
||||
@@ -115,86 +121,101 @@ ETLService::monitor()
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed to load initial ledger. Exiting monitor loop";
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (startSequence_)
|
||||
log_.warn() << "start sequence specified but db is already populated";
|
||||
LOG(log_.warn()) << "start sequence specified but db is already populated";
|
||||
|
||||
log_.info() << "Database already populated. Picking up from the tip of history";
|
||||
LOG(log_.info()) << "Database already populated. Picking up from the tip of history";
|
||||
cacheLoader_.load(rng->maxSequence);
|
||||
}
|
||||
|
||||
assert(rng);
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
log_.debug() << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
LOG(log_.debug()) << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
{
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
log_.info() << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
log_.warn() << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
log_.info() << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextSequence;
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::monitorReadOnly()
|
||||
{
|
||||
log_.debug() << "Starting reporting in strict read only mode";
|
||||
LOG(log_.debug()) << "Starting reporting in strict read only mode";
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
uint32_t latestSequence;
|
||||
const auto latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
latestSequence = *net;
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
return *net;
|
||||
else
|
||||
return std::nullopt;
|
||||
}
|
||||
else
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
return rng->maxSequence;
|
||||
}
|
||||
}();
|
||||
|
||||
if (!latestSequenceOpt.has_value())
|
||||
{
|
||||
latestSequence = rng->maxSequence;
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t latestSequence = *latestSequenceOpt;
|
||||
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
@@ -217,7 +238,7 @@ ETLService::monitorReadOnly()
|
||||
void
|
||||
ETLService::run()
|
||||
{
|
||||
log_.info() << "Starting reporting etl";
|
||||
LOG(log_.info()) << "Starting reporting etl";
|
||||
state_.isStopping = false;
|
||||
|
||||
doWork();
|
||||
@@ -227,7 +248,7 @@ void
|
||||
ETLService::doWork()
|
||||
{
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("rippled: ETLService worker");
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly)
|
||||
monitorReadOnly();
|
||||
@@ -237,7 +258,7 @@ ETLService::doWork()
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
@@ -250,6 +271,7 @@ ETLService::ETLService(
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, subscriptions, state_)
|
||||
, amendmentBlockHandler_(ioc, state_)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
@@ -257,3 +279,4 @@ ETLService::ETLService(
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,11 +19,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/Source.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/CacheLoader.h>
|
||||
#include <etl/impl/ExtractionDataPipe.h>
|
||||
#include <etl/impl/Extractor.h>
|
||||
@@ -31,10 +32,11 @@
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <etl/impl/LedgerPublisher.h>
|
||||
#include <etl/impl/Transformer.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <memory>
|
||||
@@ -42,7 +44,14 @@
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
@@ -60,18 +69,20 @@ class SubscriptionManager;
|
||||
class ETLService
|
||||
{
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = SubscriptionManager;
|
||||
using SubscriptionManagerType = feed::SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = clio::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = clio::detail::CacheLoader<Backend::LedgerCache>;
|
||||
using LedgerFetcherType = clio::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = clio::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = clio::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = clio::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using TransformerType = clio::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType>;
|
||||
using DataPipeType = etl::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<data::LedgerCache>;
|
||||
using LedgerFetcherType = etl::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using AmendmentBlockHandlerType = etl::detail::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
@@ -84,6 +95,7 @@ class ETLService
|
||||
LedgerFetcherType ledgerFetcher_;
|
||||
LedgerLoaderType ledgerLoader_;
|
||||
LedgerPublisherType ledgerPublisher_;
|
||||
AmendmentBlockHandlerType amendmentBlockHandler_;
|
||||
|
||||
SystemState state_;
|
||||
|
||||
@@ -94,7 +106,7 @@ class ETLService
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of ETLService
|
||||
* @brief Create an instance of ETLService.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
@@ -104,16 +116,28 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers);
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
* Creates and runs the ETL service.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param balancer Load balancer to use
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
static std::shared_ptr<ETLService>
|
||||
make_ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
@@ -127,12 +151,12 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stops components and joins worker thread
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService()
|
||||
{
|
||||
log_.info() << "onStop called";
|
||||
log_.debug() << "Stopping Reporting ETL";
|
||||
LOG(log_.info()) << "onStop called";
|
||||
LOG(log_.debug()) << "Stopping Reporting ETL";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
@@ -140,11 +164,11 @@ public:
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
|
||||
log_.debug() << "Joined ETLService worker thread";
|
||||
LOG(log_.debug()) << "Joined ETLService worker thread";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last ledger close, in seconds
|
||||
* @brief Get time passed since last ledger close, in seconds.
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
@@ -152,6 +176,17 @@ public:
|
||||
return ledgerPublisher_.lastCloseAgeSeconds();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check for the amendment blocked state.
|
||||
*
|
||||
* @return true if currently amendment blocked; false otherwise
|
||||
*/
|
||||
bool
|
||||
isAmendmentBlocked() const
|
||||
{
|
||||
return state_.isAmendmentBlocked;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get state of ETL as a JSON object
|
||||
*/
|
||||
@@ -177,10 +212,11 @@ private:
|
||||
* @note database must already be populated when this function is called
|
||||
*
|
||||
* @param startSequence the first ledger to extract
|
||||
* @param numExtractors number of extractors to use
|
||||
* @return the last ledger written to the database, if any
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence, int offset);
|
||||
runETLPipeline(uint32_t startSequence, uint32_t numExtractors);
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers.
|
||||
@@ -194,6 +230,15 @@ private:
|
||||
void
|
||||
monitor();
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers and publish them to the ledgers stream
|
||||
*
|
||||
* @param nextSequence the ledger sequence to publish
|
||||
* @return the next ledger sequence to publish
|
||||
*/
|
||||
uint32_t
|
||||
publishNextSequence(uint32_t nextSequence);
|
||||
|
||||
/**
|
||||
* @brief Monitor the database for newly written ledgers.
|
||||
*
|
||||
@@ -213,7 +258,7 @@ private:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of markers to use during the initial ledger download
|
||||
* @brief Get the number of markers to use during the initial ledger download.
|
||||
*
|
||||
* This is equivelent to the degree of parallelism during the initial ledger download.
|
||||
*
|
||||
@@ -226,14 +271,15 @@ private:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service
|
||||
* @brief Start all components to run ETL service.
|
||||
*/
|
||||
void
|
||||
run();
|
||||
|
||||
/**
|
||||
* @brief Spawn the worker thread and start monitoring
|
||||
* @brief Spawn the worker thread and start monitoring.
|
||||
*/
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
@@ -36,20 +36,20 @@
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
using namespace util;
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::unique_ptr<Source>
|
||||
LoadBalancer::make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer)
|
||||
{
|
||||
auto src =
|
||||
std::make_unique<ProbingSource>(config, ioContext, backend, subscriptions, networkValidatedLedgers, balancer);
|
||||
|
||||
auto src = std::make_unique<ProbingSource>(config, ioc, backend, subscriptions, validatedLedgers, balancer);
|
||||
src->run();
|
||||
|
||||
return src;
|
||||
@@ -57,21 +57,21 @@ LoadBalancer::make_Source(
|
||||
|
||||
std::shared_ptr<LoadBalancer>
|
||||
LoadBalancer::make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
|
||||
LoadBalancer::LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
{
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
@@ -80,13 +80,18 @@ LoadBalancer::LoadBalancer(
|
||||
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioContext, backend, subscriptions, nwvl, *this);
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioc, backend, subscriptions, validatedLedgers, *this);
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
log_.info() << "Added etl source - " << sources_.back()->toString();
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
}
|
||||
|
||||
LoadBalancer::~LoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
{
|
||||
@@ -96,8 +101,8 @@ LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res)
|
||||
log_.error() << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
else
|
||||
response = std::move(data);
|
||||
|
||||
@@ -117,15 +122,15 @@ LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObje
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
log.info() << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
LOG(log.info()) << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
log.warn() << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
},
|
||||
@@ -140,11 +145,11 @@ std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
srand(static_cast<unsigned>(time(0)));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
auto numAttempts = 0u;
|
||||
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
@@ -188,7 +193,7 @@ template <class Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
srand(static_cast<unsigned>(time(0)));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
|
||||
@@ -196,8 +201,8 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
log_.debug() << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
<< " - source = " << source->toString();
|
||||
LOG(log_.debug()) << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
<< " - source = " << source->toString();
|
||||
// Originally, it was (source->hasLedger(ledgerSequence) || true)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
@@ -207,29 +212,31 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
log_.debug() << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
log_.info() << "Ledger sequence " << ledgerSequence << " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
LOG(log_.info()) << "Ledger sequence " << ledgerSequence
|
||||
<< " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,22 +19,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
namespace etl {
|
||||
class Source;
|
||||
class ProbingSource;
|
||||
} // namespace etl
|
||||
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
} // namespace feed
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is used to manage connections to transaction processing processes
|
||||
* @brief This class is used to manage connections to transaction processing processes.
|
||||
*
|
||||
* This class spawns a listener for each etl source, which listens to messages on the ledgers stream (to keep track of
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
@@ -48,66 +55,84 @@ public:
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<Source>> sources_;
|
||||
std::uint32_t downloadRanges_ = 16;
|
||||
std::uint32_t downloadRanges_ = 16; /*< The number of markers to use when downloading intial ledger */
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the load balancer
|
||||
* @brief Create an instance of the load balancer.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioContext io context to run on
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param nwvl The network validated ledgers datastructure
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
*/
|
||||
LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
/**
|
||||
* @brief A factory function for the load balancer.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
*/
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
make_LoadBalancer(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
|
||||
~LoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue
|
||||
* @brief A factory function for the ETL source.
|
||||
*
|
||||
* @param sequence sequence of ledger to download
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param balancer The load balancer
|
||||
*/
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
|
||||
~LoadBalancer();
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param cacheOnly Whether to only write to cache and not to the DB; defaults to false
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger
|
||||
* @brief Fetch data for a specific ledger.
|
||||
*
|
||||
* This function will continuously try to fetch data for the specified ledger until the fetch succeeds, the ledger
|
||||
* is found in the database, or the server is shutting down.
|
||||
*
|
||||
* @param ledgerSequence sequence of ledger to fetch data for
|
||||
* @param getObjects if true, fetch diff between specified ledger and previous
|
||||
* @return the extracted data, if extraction was successful. If the ledger was found in the database or the server
|
||||
* @param ledgerSequence Sequence of the ledger to fetch
|
||||
* @param getObjects Whether to get the account state diff between this ledger and the prior one
|
||||
* @param getObjectNeighbors Whether to request object neighbors
|
||||
* @return The extracted data, if extraction was successful. If the ledger was found in the database or the server
|
||||
* is shutting down, the optional will be empty
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
@@ -127,30 +152,32 @@ public:
|
||||
shouldPropagateTxnStream(Source* in) const;
|
||||
|
||||
/**
|
||||
* @return JSON representation of the state of this load balancer
|
||||
* @return JSON representation of the state of this load balancer.
|
||||
*/
|
||||
boost::json::value
|
||||
toJson() const;
|
||||
|
||||
/**
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node.
|
||||
*
|
||||
* @param request JSON-RPC request
|
||||
* @return response received from rippled node
|
||||
* @param request JSON-RPC request to forward
|
||||
* @param clientIp The IP address of the peer
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success; nullopt on failure
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Execute a function on a randomly selected source
|
||||
* @brief Execute a function on a randomly selected source.
|
||||
*
|
||||
* @note f is a function that takes an Source as an argument and returns a bool.
|
||||
* Attempt to execute f for one randomly chosen Source that has the specified ledger. If f returns false, another
|
||||
* randomly chosen Source is used. The process repeats until f returns true.
|
||||
*
|
||||
* @param f function to execute. This function takes the ETL source as an argument, and returns a bool.
|
||||
* @param f Function to execute. This function takes the ETL source as an argument, and returns a bool
|
||||
* @param ledgerSequence f is executed for each Source that has this ledger
|
||||
* @return true if f was eventually executed successfully. false if the ledger was found in the database or the
|
||||
* server is shutting down
|
||||
@@ -159,3 +186,4 @@ private:
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -22,9 +22,12 @@
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <vector>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/Types.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/Types.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
@@ -95,21 +98,20 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
std::sort(finalIDs.begin(), finalIDs.end());
|
||||
std::sort(prevIDs.begin(), prevIDs.end());
|
||||
std::vector<ripple::uint256> tokenIDResult;
|
||||
std::set_difference(
|
||||
finalIDs.begin(),
|
||||
finalIDs.end(),
|
||||
prevIDs.begin(),
|
||||
prevIDs.end(),
|
||||
std::inserter(tokenIDResult, tokenIDResult.begin()));
|
||||
if (tokenIDResult.size() == 1 && owner)
|
||||
return {
|
||||
{NFTTransactionsData(tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenIDResult.front(), *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
// Find the first NFT ID that doesn't match. We're looking for an
|
||||
// added NFT, so the one we want will be the mismatch in finalIDs.
|
||||
auto const diff = std::mismatch(finalIDs.begin(), finalIDs.end(), prevIDs.begin(), prevIDs.end());
|
||||
|
||||
// There should always be a difference so the returned finalIDs
|
||||
// iterator should never be end(). But better safe than sorry.
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner)
|
||||
throw std::runtime_error(
|
||||
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID())));
|
||||
|
||||
return {
|
||||
{NFTTransactionsData(*diff.first, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(*diff.first, *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
@@ -338,3 +340,4 @@ getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string c
|
||||
|
||||
return nfts;
|
||||
}
|
||||
} // namespace etl
|
||||
@@ -17,21 +17,35 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from TX via ETLService
|
||||
* @brief Pull NFT data from TX via ETLService.
|
||||
*
|
||||
* @param txMeta Transaction metadata
|
||||
* @param sttx The transaction
|
||||
* @return NFT transactions data as a pair of transactions and optional NFTsData
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from ledger object via loadInitialLedger
|
||||
* @brief Pull NFT data from ledger object via loadInitialLedger.
|
||||
*
|
||||
* @param seq The ledger sequence to pull for
|
||||
* @param key The owner key
|
||||
* @param blob Object data as blob
|
||||
* @return The NFT data as a vector
|
||||
*/
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
|
||||
|
||||
} // namespace etl
|
||||
@@ -18,15 +18,14 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
using namespace clio;
|
||||
namespace etl {
|
||||
|
||||
ProbingSource::ProbingSource(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
@@ -106,26 +105,26 @@ ProbingSource::token() const
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
ProbingSource::loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly)
|
||||
ProbingSource::loadInitialLedger(std::uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {{}, false};
|
||||
return currentSrc_->loadInitialLedger(ledgerSequence, numMarkers, cacheOnly);
|
||||
return currentSrc_->loadInitialLedger(sequence, numMarkers, cacheOnly);
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, ProbingSource::GetLedgerResponseType>
|
||||
ProbingSource::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
ProbingSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
return currentSrc_->fetchLedger(sequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
@@ -136,7 +135,7 @@ std::optional<boost::json::object>
|
||||
ProbingSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
@@ -156,12 +155,12 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
{
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
log_.info() << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
LOG(log_.info()) << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
@@ -185,12 +184,12 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
{
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
log_.info() << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
LOG(log_.info()) << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
@@ -199,4 +198,5 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
@@ -31,6 +31,8 @@
|
||||
|
||||
#include <mutex>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This Source implementation attempts to connect over both secure websocket and plain websocket.
|
||||
*
|
||||
@@ -44,7 +46,7 @@ public:
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::mutex mtx_;
|
||||
boost::asio::ssl::context sslCtx_;
|
||||
@@ -54,7 +56,7 @@ private:
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the probing source
|
||||
* @brief Create an instance of the probing source.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
@@ -65,10 +67,10 @@ public:
|
||||
* @param sslCtx The SSL context to use; defaults to tlsv12
|
||||
*/
|
||||
ProbingSource(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
|
||||
@@ -97,13 +99,13 @@ public:
|
||||
toString() const override;
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
loadInitialLedger(std::uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
|
||||
std::pair<grpc::Status, GetLedgerResponseType>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const override;
|
||||
|
||||
boost::uuids::uuid
|
||||
@@ -114,7 +116,7 @@ private:
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
boost::asio::yield_context yield) const override;
|
||||
|
||||
SourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
@@ -122,3 +124,4 @@ private:
|
||||
SourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
} // namespace etl
|
||||
@@ -1,3 +1,5 @@
|
||||
# ETL subsystem
|
||||
|
||||
A single clio node has one or more ETL sources, specified in the config
|
||||
file. clio will subscribe to the `ledgers` stream of each of the ETL
|
||||
sources. This stream sends a message whenever a new ledger is validated. Upon
|
||||
|
||||
@@ -17,13 +17,11 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
@@ -36,82 +34,19 @@
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
namespace etl {
|
||||
|
||||
static boost::beast::websocket::stream_base::timeout
|
||||
make_TimeoutOption()
|
||||
{
|
||||
// See #289 for details.
|
||||
// TODO: investigate the issue and find if there is a solution other than
|
||||
// introducing artificial timeouts.
|
||||
if (true)
|
||||
{
|
||||
// The only difference between this and the suggested client role is
|
||||
// that idle_timeout is set to 20 instead of none()
|
||||
auto opt = boost::beast::websocket::stream_base::timeout{};
|
||||
opt.handshake_timeout = std::chrono::seconds(30);
|
||||
opt.idle_timeout = std::chrono::seconds(20);
|
||||
opt.keep_alive_pings = false;
|
||||
return opt;
|
||||
}
|
||||
else
|
||||
{
|
||||
return boost::beast::websocket::stream_base::timeout::suggested(boost::beast::role_type::client);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
if (connected_)
|
||||
hooks_.onDisconnected(ec);
|
||||
|
||||
connected_ = false;
|
||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
std::string err = ec.message();
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec.category() == boost::asio::error::get_ssl_category())
|
||||
{
|
||||
err = std::string(" (") + boost::lexical_cast<std::string>(ERR_GET_LIB(ec.value())) + "," +
|
||||
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
|
||||
// ERR_PACK /* crypto/err/err.h */
|
||||
char buf[128];
|
||||
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
|
||||
err += buf;
|
||||
|
||||
std::cout << err << std::endl;
|
||||
}
|
||||
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
log_.error() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
log_.trace() << "async_wait : ec = " << ec;
|
||||
derived().close(startAgain);
|
||||
});
|
||||
return boost::beast::websocket::stream_base::timeout::suggested(boost::beast::role_type::client);
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
boost::asio::post(strand_, [this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
@@ -124,24 +59,19 @@ PlainSource::close(bool startAgain)
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
@@ -151,68 +81,41 @@ void
|
||||
SslSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
boost::asio::post(strand_, [this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_ makes
|
||||
// sure async_close is only called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
|
||||
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
|
||||
derived().onConnect(ec, ep);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -220,19 +123,16 @@ PlainSource::onConnect(
|
||||
}
|
||||
else
|
||||
{
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
|
||||
// Websocket stream has it's own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
@@ -240,7 +140,6 @@ PlainSource::onConnect(
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
derived().ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
@@ -248,7 +147,6 @@ PlainSource::onConnect(
|
||||
void
|
||||
SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
@@ -256,19 +154,16 @@ SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver
|
||||
}
|
||||
else
|
||||
{
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
|
||||
// Websocket stream has it's own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
|
||||
@@ -276,7 +171,6 @@ SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().next_layer().async_handshake(
|
||||
boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); });
|
||||
}
|
||||
@@ -293,527 +187,8 @@ SslSource::onSslHandshake(
|
||||
}
|
||||
else
|
||||
{
|
||||
// Perform the websocket handshake
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (auto action = hooks_.onConnected(ec); action == SourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object jv{
|
||||
{"command", "subscribe"}, {"streams", {"ledger", "manifests", "validations", "transactions_proposed"}}};
|
||||
std::string s = boost::json::serialize(jv);
|
||||
log_.trace() << "Sending subscribe stream message";
|
||||
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client");
|
||||
|
||||
req.set("X-User", "coro-client");
|
||||
}));
|
||||
|
||||
// Send the message
|
||||
derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onWrite(boost::beast::error_code ec, size_t bytesWritten)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
// if error or error reading message, start over
|
||||
if (ec)
|
||||
{
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
handleMessage();
|
||||
boost::beast::flat_buffer buffer;
|
||||
swap(readBuffer_, buffer);
|
||||
|
||||
log_.trace() << "calling async_read - " << toString();
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
bool
|
||||
SourceImpl<Derived>::handleMessage()
|
||||
{
|
||||
log_.trace() << toString();
|
||||
|
||||
setLastMsgTime();
|
||||
connected_ = true;
|
||||
try
|
||||
{
|
||||
std::string msg{static_cast<char const*>(readBuffer_.data().data()), readBuffer_.size()};
|
||||
log_.trace() << msg;
|
||||
boost::json::value raw = boost::json::parse(msg);
|
||||
log_.trace() << "parsed";
|
||||
boost::json::object response = raw.as_object();
|
||||
|
||||
uint32_t ledgerIndex = 0;
|
||||
if (response.contains("result"))
|
||||
{
|
||||
boost::json::object result = response["result"].as_object();
|
||||
if (result.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = result["ledger_index"].as_int64();
|
||||
}
|
||||
if (result.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = result["validated_ledgers"].as_string();
|
||||
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "ledgerClosed")
|
||||
{
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = response["ledger_index"].as_int64();
|
||||
}
|
||||
if (response.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = response["validated_ledgers"].as_string();
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (balancer_.shouldPropagateTxnStream(this))
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "validationReceived")
|
||||
{
|
||||
subscriptions_->forwardValidation(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "manifestReceived")
|
||||
{
|
||||
subscriptions_->forwardManifest(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " << toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move to detail
|
||||
class AsyncCallData
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
|
||||
std::unique_ptr<grpc::ClientContext> context_;
|
||||
|
||||
grpc::Status status_;
|
||||
unsigned char nextPrefix_;
|
||||
|
||||
std::string lastKey_;
|
||||
|
||||
public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
log_.debug() << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
}
|
||||
|
||||
enum class CallStatus { MORE, DONE, ERRORED };
|
||||
|
||||
CallStatus
|
||||
process(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
{
|
||||
log_.trace() << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
log_.error() << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
log_.error() << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
auto const numObjects = cur_->ledger_objects().objects_size();
|
||||
log_.debug() << "Writing " << numObjects << " objects";
|
||||
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (((unsigned char)obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
log_.debug() << "Wrote " << numObjects << " objects. Got more: " << (more ? "YES" : "NO");
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
|
||||
void
|
||||
call(std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub, grpc::CompletionQueue& cq)
|
||||
{
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
rpc->Finish(next_.get(), &status_, this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
getLastKey()
|
||||
{
|
||||
return lastKey_;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
SourceImpl<Derived>::loadInitialLedger(uint32_t sequence, uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!stub_)
|
||||
return {{}, false};
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
void* tag;
|
||||
bool ok = false;
|
||||
std::vector<AsyncCallData> calls;
|
||||
auto markers = getMarkers(numMarkers);
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
std::optional<ripple::uint256> nextMarker;
|
||||
|
||||
if (i + 1 < markers.size())
|
||||
nextMarker = markers[i + 1];
|
||||
|
||||
calls.emplace_back(sequence, markers[i], nextMarker);
|
||||
}
|
||||
|
||||
log_.debug() << "Starting data download for ledger " << sequence << ". Using source = " << toString();
|
||||
|
||||
for (auto& c : calls)
|
||||
c.call(stub_, cq);
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
size_t incr = 500000;
|
||||
size_t progress = incr;
|
||||
std::vector<std::string> edgeKeys;
|
||||
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok))
|
||||
{
|
||||
assert(tag);
|
||||
auto ptr = static_cast<AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
log_.error() << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
numFinished++;
|
||||
log_.debug() << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
|
||||
if (result == AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
log_.info() << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_.info() << "Finished loadInitialLedger. cache size = " << backend_->cache().size();
|
||||
return {std::move(edgeKeys), !abort};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
SourceImpl<Derived>::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
request.mutable_ledger()->set_sequence(ledgerSequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
request.set_get_objects(getObjects);
|
||||
request.set_get_object_neighbors(getObjectNeighbors);
|
||||
request.set_user("ETL");
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
log_.warn() << "SourceImpl::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString() << " status = " << status.error_message();
|
||||
}
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
log_.debug() << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
|
||||
return requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
log_.trace() << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!connected_)
|
||||
{
|
||||
log_.error() << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioc_};
|
||||
|
||||
log_.trace() << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
||||
|
||||
// Look up the domain name
|
||||
auto const results = resolver.async_resolve(ip_, wsPort_, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
ws->next_layer().expires_after(std::chrono::seconds(3));
|
||||
|
||||
log_.trace() << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
// and to tell rippled to charge the client IP for RPC
|
||||
// resources. See "secure_gateway" in
|
||||
//
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) {
|
||||
req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
}));
|
||||
log_.trace() << "client ip: " << clientIp;
|
||||
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
log_.trace() << "Sending request";
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(request)), yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
auto begin = static_cast<char const*>(buffer.data().data());
|
||||
auto end = begin + buffer.data().size();
|
||||
auto parsed = boost::json::parse(std::string(begin, end));
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
log_.error() << "Error parsing response: " << std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
log_.trace() << "Successfully forward request";
|
||||
|
||||
response = parsed.as_object();
|
||||
|
||||
response["forwarded"] = true;
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
893
src/etl/Source.h
893
src/etl/Source.h
File diff suppressed because it is too large
Load Diff
@@ -21,30 +21,33 @@
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief Represents the state of the ETL subsystem.
|
||||
*/
|
||||
struct SystemState
|
||||
{
|
||||
/**
|
||||
* @brief Whether the process is in strict read-only mode
|
||||
* @brief Whether the process is in strict read-only mode.
|
||||
*
|
||||
* In strict read-only mode, the process will never attempt to become the ETL writer, and will only publish ledgers
|
||||
* as they are written to the database.
|
||||
*/
|
||||
bool isReadOnly = false;
|
||||
|
||||
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
|
||||
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||
|
||||
/**
|
||||
* @brief Whether the process is writing to the database.
|
||||
* @brief Whether clio detected an amendment block.
|
||||
*
|
||||
* Used by server_info
|
||||
* Being amendment blocked means that Clio was compiled with libxrpl that does not yet support some field that
|
||||
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
|
||||
* and should log this error and only handle RPC requests.
|
||||
*/
|
||||
std::atomic_bool isWriting = false;
|
||||
|
||||
/**
|
||||
* @brief Whether the software is stopping
|
||||
*/
|
||||
std::atomic_bool isStopping = false;
|
||||
|
||||
/**
|
||||
* @brief Whether a write conflict was detected
|
||||
*/
|
||||
std::atomic_bool writeConflict = false;
|
||||
std::atomic_bool isAmendmentBlocked = false;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
96
src/etl/impl/AmendmentBlock.h
Normal file
96
src/etl/impl/AmendmentBlock.h
Normal file
@@ -0,0 +1,96 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
struct AmendmentBlockAction
|
||||
{
|
||||
void
|
||||
operator()()
|
||||
{
|
||||
static util::Logger log{"ETL"};
|
||||
LOG(log.fatal())
|
||||
<< "Can't process new ledgers: The current ETL source is not compatible with the version of the "
|
||||
"libxrpl Clio is currently using. Please upgrade Clio to a newer version.";
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ActionCallableType = AmendmentBlockAction>
|
||||
class AmendmentBlockHandler
|
||||
{
|
||||
std::reference_wrapper<boost::asio::io_context> ctx_;
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
boost::asio::steady_timer timer_;
|
||||
std::chrono::milliseconds interval_;
|
||||
|
||||
ActionCallableType action_;
|
||||
|
||||
public:
|
||||
template <typename DurationType = std::chrono::seconds>
|
||||
AmendmentBlockHandler(
|
||||
boost::asio::io_context& ioc,
|
||||
SystemState& state,
|
||||
DurationType interval = DurationType{1},
|
||||
ActionCallableType&& action = ActionCallableType())
|
||||
: ctx_{std::ref(ioc)}
|
||||
, state_{std::ref(state)}
|
||||
, timer_{ioc}
|
||||
, interval_{std::chrono::duration_cast<std::chrono::milliseconds>(interval)}
|
||||
, action_{std::move(action)}
|
||||
{
|
||||
}
|
||||
|
||||
~AmendmentBlockHandler()
|
||||
{
|
||||
boost::asio::post(ctx_.get(), [this]() { timer_.cancel(); });
|
||||
}
|
||||
|
||||
void
|
||||
onAmendmentBlock()
|
||||
{
|
||||
state_.get().isAmendmentBlocked = true;
|
||||
startReportingTimer();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
startReportingTimer()
|
||||
{
|
||||
action_();
|
||||
|
||||
timer_.expires_after(interval_);
|
||||
timer_.async_wait([this](auto ec) {
|
||||
if (!ec)
|
||||
boost::asio::post(ctx_.get(), [this] { startReportingTimer(); });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::detail
|
||||
182
src/etl/impl/AsyncData.h
Normal file
182
src/etl/impl/AsyncData.h
Normal file
@@ -0,0 +1,182 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
class AsyncCallData
|
||||
{
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
|
||||
std::unique_ptr<grpc::ClientContext> context_;
|
||||
|
||||
grpc::Status status_;
|
||||
unsigned char nextPrefix_;
|
||||
|
||||
std::string lastKey_;
|
||||
|
||||
public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
LOG(log_.debug()) << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
}
|
||||
|
||||
enum class CallStatus { MORE, DONE, ERRORED };
|
||||
|
||||
CallStatus
|
||||
process(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
{
|
||||
LOG(log_.trace()) << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
LOG(log_.error()) << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
LOG(log_.error()) << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
LOG(log_.warn()) << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
auto const numObjects = cur_->ledger_objects().objects_size();
|
||||
LOG(log_.debug()) << "Writing " << numObjects << " objects";
|
||||
|
||||
std::vector<data::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (static_cast<unsigned char>(obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
LOG(log_.debug()) << "Wrote " << numObjects << " objects. Got more: " << (more ? "YES" : "NO");
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
|
||||
void
|
||||
call(std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub, grpc::CompletionQueue& cq)
|
||||
{
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
rpc->Finish(next_.get(), &status_, this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
getLastKey()
|
||||
{
|
||||
return lastKey_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::detail
|
||||
@@ -19,22 +19,22 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Cache loading interface
|
||||
@@ -44,7 +44,7 @@ class CacheLoader
|
||||
{
|
||||
enum class LoadStyle { ASYNC, SYNC, NOT_AT_ALL };
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<boost::asio::io_context> ioContext_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
@@ -73,7 +73,7 @@ class CacheLoader
|
||||
|
||||
public:
|
||||
CacheLoader(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& ledgerCache)
|
||||
@@ -132,7 +132,7 @@ public:
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL)
|
||||
{
|
||||
cache_.get().setDisabled();
|
||||
log_.warn() << "Cache is disabled. Not loading";
|
||||
LOG(log_.warn()) << "Cache is disabled. Not loading";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -165,10 +165,10 @@ public:
|
||||
// If loading synchronously, poll cache until full
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull())
|
||||
{
|
||||
log_.debug() << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
|
||||
LOG(log_.debug()) << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||
if (cache_.get().isFull())
|
||||
log_.info() << "Cache is full. Cache size = " << cache_.get().size();
|
||||
LOG(log_.info()) << "Cache is full. Cache size = " << cache_.get().size();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,9 +184,9 @@ private:
|
||||
uint32_t ledgerIndex,
|
||||
std::string const& ip,
|
||||
std::string const& port,
|
||||
boost::asio::yield_context& yield)
|
||||
boost::asio::yield_context yield)
|
||||
{
|
||||
log_.info() << "Loading cache from peer. ip = " << ip << " . port = " << port;
|
||||
LOG(log_.info()) << "Loading cache from peer. ip = " << ip << " . port = " << port;
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
@@ -198,7 +198,7 @@ private:
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioContext_.get()};
|
||||
|
||||
log_.trace() << "Creating websocket";
|
||||
LOG(log_.trace()) << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioContext_.get());
|
||||
|
||||
// Look up the domain name
|
||||
@@ -206,13 +206,13 @@ private:
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
log_.trace() << "Connecting websocket";
|
||||
LOG(log_.trace()) << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
return false;
|
||||
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
LOG(log_.trace()) << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip, "/", yield[ec]);
|
||||
if (ec)
|
||||
@@ -220,7 +220,7 @@ private:
|
||||
|
||||
std::optional<boost::json::value> marker;
|
||||
|
||||
log_.trace() << "Sending request";
|
||||
LOG(log_.trace()) << "Sending request";
|
||||
auto getRequest = [&](auto marker) {
|
||||
boost::json::object request = {
|
||||
{"command", "ledger_data"},
|
||||
@@ -242,7 +242,7 @@ private:
|
||||
ws->async_write(net::buffer(boost::json::serialize(getRequest(marker))), yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << "error writing = " << ec.message();
|
||||
LOG(log_.error()) << "error writing = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ private:
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << "error reading = " << ec.message();
|
||||
LOG(log_.error()) << "error reading = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -259,27 +259,28 @@ private:
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
log_.error() << "Error parsing response: " << raw;
|
||||
LOG(log_.error()) << "Error parsing response: " << raw;
|
||||
return false;
|
||||
}
|
||||
log_.trace() << "Successfully parsed response " << parsed;
|
||||
LOG(log_.trace()) << "Successfully parsed response " << parsed;
|
||||
|
||||
if (auto const& response = parsed.as_object(); response.contains("error"))
|
||||
{
|
||||
log_.error() << "Response contains error: " << response;
|
||||
LOG(log_.error()) << "Response contains error: " << response;
|
||||
auto const& err = response.at("error");
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound")
|
||||
{
|
||||
++numAttempts;
|
||||
if (numAttempts >= 5)
|
||||
{
|
||||
log_.error() << " ledger not found at peer after 5 attempts. "
|
||||
"peer = "
|
||||
<< ip << " ledger = " << ledgerIndex
|
||||
<< ". Check your config and the health of the peer";
|
||||
LOG(log_.error()) << " ledger not found at peer after 5 attempts. "
|
||||
"peer = "
|
||||
<< ip << " ledger = " << ledgerIndex
|
||||
<< ". Check your config and the health of the peer";
|
||||
return false;
|
||||
}
|
||||
log_.warn() << "Ledger not found. ledger = " << ledgerIndex << ". Sleeping and trying again";
|
||||
LOG(log_.warn()) << "Ledger not found. ledger = " << ledgerIndex
|
||||
<< ". Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
continue;
|
||||
}
|
||||
@@ -290,7 +291,7 @@ private:
|
||||
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool())
|
||||
{
|
||||
log_.error() << "cache not full for clio node. ip = " << ip;
|
||||
LOG(log_.error()) << "cache not full for clio node. ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
if (response.contains("marker"))
|
||||
@@ -300,17 +301,17 @@ private:
|
||||
|
||||
auto const& state = response.at("state").as_array();
|
||||
|
||||
std::vector<Backend::LedgerObject> objects;
|
||||
std::vector<data::LedgerObject> objects;
|
||||
objects.reserve(state.size());
|
||||
for (auto const& ledgerObject : state)
|
||||
{
|
||||
auto const& obj = ledgerObject.as_object();
|
||||
|
||||
Backend::LedgerObject stateObject = {};
|
||||
data::LedgerObject stateObject = {};
|
||||
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str()))
|
||||
{
|
||||
log_.error() << "failed to parse object id";
|
||||
LOG(log_.error()) << "failed to parse object id";
|
||||
return false;
|
||||
}
|
||||
boost::algorithm::unhex(obj.at("data").as_string().c_str(), std::back_inserter(stateObject.blob));
|
||||
@@ -319,17 +320,17 @@ private:
|
||||
cache_.get().update(objects, ledgerIndex, true);
|
||||
|
||||
if (marker)
|
||||
log_.debug() << "At marker " << *marker;
|
||||
LOG(log_.debug()) << "At marker " << *marker;
|
||||
} while (marker || !started);
|
||||
|
||||
log_.info() << "Finished downloading ledger from clio node. ip = " << ip;
|
||||
LOG(log_.info()) << "Finished downloading ledger from clio node. ip = " << ip;
|
||||
|
||||
cache_.get().setFull();
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Encountered exception : " << e.what() << " - ip = " << ip;
|
||||
LOG(log_.error()) << "Encountered exception : " << e.what() << " - ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -337,14 +338,14 @@ private:
|
||||
void
|
||||
loadCacheFromDb(uint32_t seq)
|
||||
{
|
||||
std::vector<Backend::LedgerObject> diff;
|
||||
std::vector<data::LedgerObject> diff;
|
||||
std::vector<std::optional<ripple::uint256>> cursors;
|
||||
|
||||
auto append = [](auto&& a, auto&& b) { a.insert(std::end(a), std::begin(b), std::end(b)); };
|
||||
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i)
|
||||
{
|
||||
append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
append(diff, data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq - i, yield);
|
||||
}));
|
||||
}
|
||||
@@ -356,28 +357,28 @@ private:
|
||||
diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end());
|
||||
|
||||
cursors.push_back({});
|
||||
for (auto& obj : diff)
|
||||
for (auto const& obj : diff)
|
||||
if (obj.blob.size())
|
||||
cursors.push_back({obj.key});
|
||||
cursors.push_back({});
|
||||
|
||||
std::stringstream cursorStr;
|
||||
for (auto& c : cursors)
|
||||
for (auto const& c : cursors)
|
||||
if (c)
|
||||
cursorStr << ripple::strHex(*c) << ", ";
|
||||
|
||||
log_.info() << "Loading cache. num cursors = " << cursors.size() - 1;
|
||||
log_.trace() << "cursors = " << cursorStr.str();
|
||||
LOG(log_.info()) << "Loading cache. num cursors = " << cursors.size() - 1;
|
||||
LOG(log_.trace()) << "cursors = " << cursorStr.str();
|
||||
|
||||
thread_ = std::thread{[this, seq, cursors]() {
|
||||
thread_ = std::thread{[this, seq, cursors = std::move(cursors)]() {
|
||||
auto startTime = std::chrono::system_clock::now();
|
||||
auto markers = std::make_shared<std::atomic_int>(0);
|
||||
auto numRemaining = std::make_shared<std::atomic_int>(cursors.size() - 1);
|
||||
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
||||
{
|
||||
auto const start = cursors[i];
|
||||
auto const end = cursors[i + 1];
|
||||
auto const start = cursors.at(i);
|
||||
auto const end = cursors.at(i + 1);
|
||||
|
||||
markers->wait(numCacheMarkers_);
|
||||
++(*markers);
|
||||
@@ -385,14 +386,14 @@ private:
|
||||
boost::asio::spawn(
|
||||
ioContext_.get(),
|
||||
[this, seq, start, end, numRemaining, startTime, markers](boost::asio::yield_context yield) {
|
||||
std::optional<ripple::uint256> cursor = start;
|
||||
auto cursor = start;
|
||||
std::string cursorStr =
|
||||
cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(Backend::firstKey);
|
||||
log_.debug() << "Starting a cursor: " << cursorStr << " markers = " << *markers;
|
||||
cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(data::firstKey);
|
||||
LOG(log_.debug()) << "Starting a cursor: " << cursorStr << " markers = " << *markers;
|
||||
|
||||
while (not stopping_)
|
||||
{
|
||||
auto res = Backend::retryOnTimeout([this, seq, &cursor, &yield]() {
|
||||
auto res = data::retryOnTimeout([this, seq, &cursor, yield]() {
|
||||
return backend_->fetchLedgerPage(cursor, seq, cachePageFetchSize_, false, yield);
|
||||
});
|
||||
|
||||
@@ -401,9 +402,9 @@ private:
|
||||
if (!res.cursor || (end && *(res.cursor) > *end))
|
||||
break;
|
||||
|
||||
log_.trace() << "Loading cache. cache size = " << cache_.get().size()
|
||||
<< " - cursor = " << ripple::strHex(res.cursor.value())
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
LOG(log_.trace()) << "Loading cache. cache size = " << cache_.get().size()
|
||||
<< " - cursor = " << ripple::strHex(res.cursor.value())
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
|
||||
cursor = std::move(res.cursor);
|
||||
}
|
||||
@@ -416,14 +417,14 @@ private:
|
||||
auto endTime = std::chrono::system_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::seconds>(endTime - startTime);
|
||||
|
||||
log_.info() << "Finished loading cache. cache size = " << cache_.get().size() << ". Took "
|
||||
<< duration.count() << " seconds";
|
||||
LOG(log_.info()) << "Finished loading cache. cache size = " << cache_.get().size()
|
||||
<< ". Took " << duration.count() << " seconds";
|
||||
cache_.get().setFull();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "Finished a cursor. num remaining = " << *numRemaining
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
LOG(log_.info()) << "Finished a cursor. num remaining = " << *numRemaining
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -431,4 +432,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief A collection of thread safe async queues used by Extractor and Transformer to communicate
|
||||
@@ -40,7 +40,7 @@ public:
|
||||
constexpr static auto TOTAL_MAX_IN_QUEUE = 1000u;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
uint32_t stride_;
|
||||
uint32_t startSequence_;
|
||||
@@ -126,9 +126,9 @@ private:
|
||||
std::shared_ptr<QueueType>
|
||||
getQueue(uint32_t sequence)
|
||||
{
|
||||
log_.debug() << "Grabbing extraction queue for " << sequence << "; start was " << startSequence_;
|
||||
LOG(log_.debug()) << "Grabbing extraction queue for " << sequence << "; start was " << startSequence_;
|
||||
return queues_[(sequence - startSequence_) % stride_];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Extractor thread that is fetching GRPC data and enqueue it on the DataPipeType
|
||||
@@ -37,7 +37,7 @@ namespace clio::detail {
|
||||
template <typename DataPipeType, typename NetworkValidatedLedgersType, typename LedgerFetcherType>
|
||||
class Extractor
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
std::shared_ptr<NetworkValidatedLedgersType> networkValidatedLedgers_;
|
||||
@@ -90,7 +90,7 @@ private:
|
||||
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence))
|
||||
{
|
||||
auto [fetchResponse, time] = util::timed<std::chrono::duration<double>>(
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>(
|
||||
[this, currentSequence]() { return ledgerFetcher_.get().fetchDataAndDiff(currentSequence); });
|
||||
totalTime += time;
|
||||
|
||||
@@ -103,9 +103,9 @@ private:
|
||||
|
||||
// TODO: extract this part into a strategy perhaps
|
||||
auto const tps = fetchResponse->transactions_list().transactions_size() / time;
|
||||
log_.info() << "Extract phase time = " << time << "; Extract phase tps = " << tps
|
||||
<< "; Avg extract time = " << totalTime / (currentSequence - startSequence_ + 1)
|
||||
<< "; seq = " << currentSequence;
|
||||
LOG(log_.info()) << "Extract phase time = " << time << "; Extract phase tps = " << tps
|
||||
<< "; Avg extract time = " << totalTime / (currentSequence - startSequence_ + 1)
|
||||
<< "; seq = " << currentSequence;
|
||||
|
||||
pipe_.get().push(currentSequence, std::move(fetchResponse));
|
||||
currentSequence += pipe_.get().getStride();
|
||||
@@ -137,4 +137,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -24,12 +24,12 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
void
|
||||
ForwardCache::freshen()
|
||||
{
|
||||
log_.trace() << "Freshening ForwardCache";
|
||||
LOG(log_.trace()) << "Freshening ForwardCache";
|
||||
|
||||
auto numOutstanding = std::make_shared<std::atomic_uint>(latestForwarded_.size());
|
||||
|
||||
@@ -70,7 +70,7 @@ ForwardCache::get(boost::json::object const& request) const
|
||||
|
||||
if (!command)
|
||||
return {};
|
||||
if (RPC::specifiesCurrentOrClosedLedger(request))
|
||||
if (rpc::specifiesCurrentOrClosedLedger(request))
|
||||
return {};
|
||||
|
||||
std::shared_lock lk(mtx_);
|
||||
@@ -80,4 +80,4 @@ ForwardCache::get(boost::json::object const& request) const
|
||||
return {latestForwarded_.at(*command)};
|
||||
}
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -19,10 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/json.hpp>
|
||||
@@ -33,7 +33,7 @@
|
||||
|
||||
class Source;
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Cache for rippled responses
|
||||
@@ -42,20 +42,20 @@ class ForwardCache
|
||||
{
|
||||
using ResponseType = std::optional<boost::json::object>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::unordered_map<std::string, ResponseType> latestForwarded_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
Source const& source_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
etl::Source const& source_;
|
||||
std::uint32_t duration_ = 10;
|
||||
|
||||
void
|
||||
clear();
|
||||
|
||||
public:
|
||||
ForwardCache(clio::Config const& config, boost::asio::io_context& ioc, Source const& source)
|
||||
: strand_(ioc), source_(source)
|
||||
ForwardCache(util::Config const& config, boost::asio::io_context& ioc, Source const& source)
|
||||
: strand_(boost::asio::make_strand(ioc)), source_(source)
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
@@ -79,4 +79,4 @@ public:
|
||||
get(boost::json::object const& command) const;
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -19,17 +19,16 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief GRPC Ledger data fetcher
|
||||
@@ -41,7 +40,7 @@ public:
|
||||
using OptionalGetLedgerResponseType = typename LoadBalancerType::OptionalGetLedgerResponseType;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
@@ -65,13 +64,13 @@ public:
|
||||
* @return ledger header and transaction+metadata blobs; empty optional if the server is shutting down
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchData(uint32_t seq)
|
||||
fetchData(uint32_t sequence)
|
||||
{
|
||||
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
|
||||
LOG(log_.debug()) << "Attempting to fetch ledger with sequence = " << sequence;
|
||||
|
||||
auto response = loadBalancer_->fetchLedger(seq, false, false);
|
||||
auto response = loadBalancer_->fetchLedger(sequence, false, false);
|
||||
if (response)
|
||||
log_.trace() << "GetLedger reply = " << response->DebugString();
|
||||
LOG(log_.trace()) << "GetLedger reply = " << response->DebugString();
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -86,17 +85,17 @@ public:
|
||||
* this ledger and the parent; Empty optional if the server is shutting down
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchDataAndDiff(uint32_t seq)
|
||||
fetchDataAndDiff(uint32_t sequence)
|
||||
{
|
||||
log_.debug() << "Attempting to fetch ledger with sequence = " << seq;
|
||||
LOG(log_.debug()) << "Attempting to fetch ledger with sequence = " << sequence;
|
||||
|
||||
auto response = loadBalancer_->fetchLedger(
|
||||
seq, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= seq);
|
||||
sequence, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= sequence);
|
||||
if (response)
|
||||
log_.trace() << "GetLedger reply = " << response->DebugString();
|
||||
LOG(log_.trace()) << "GetLedger reply = " << response->DebugString();
|
||||
|
||||
return response;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -19,19 +19,21 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/LedgerFetcher.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
/**
|
||||
* @brief Account transactions, NFT transactions and NFT data bundled togeher.
|
||||
*/
|
||||
struct FormattedTransactionsData
|
||||
{
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
@@ -39,7 +41,7 @@ struct FormattedTransactionsData
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
};
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Loads ledger data into the DB
|
||||
@@ -53,7 +55,7 @@ public:
|
||||
using RawLedgerObjectType = typename LoadBalancerType::RawLedgerObjectType;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
@@ -85,7 +87,7 @@ public:
|
||||
* nft_token_transactions tables (mostly transaction hashes, corresponding nodestore hashes and affected accounts)
|
||||
*/
|
||||
FormattedTransactionsData
|
||||
insertTransactions(ripple::LedgerInfo const& ledger, GetLedgerResponseType& data)
|
||||
insertTransactions(ripple::LedgerHeader const& ledger, GetLedgerResponseType& data)
|
||||
{
|
||||
FormattedTransactionsData result;
|
||||
|
||||
@@ -96,7 +98,7 @@ public:
|
||||
ripple::SerialIter it{raw->data(), raw->size()};
|
||||
ripple::STTx sttx{it};
|
||||
|
||||
log_.trace() << "Inserting transaction = " << sttx.getTransactionID();
|
||||
LOG(log_.trace()) << "Inserting transaction = " << sttx.getTransactionID();
|
||||
|
||||
ripple::TxMeta txMeta{sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
||||
|
||||
@@ -105,9 +107,8 @@ public:
|
||||
if (maybeNFT)
|
||||
result.nfTokensData.push_back(*maybeNFT);
|
||||
|
||||
auto journal = ripple::debugLog();
|
||||
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
|
||||
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
|
||||
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID());
|
||||
std::string keyStr{reinterpret_cast<const char*>(sttx.getTransactionID().data()), 32};
|
||||
backend_->writeTransaction(
|
||||
std::move(keyStr),
|
||||
ledger.seq,
|
||||
@@ -140,14 +141,14 @@ public:
|
||||
* @param sequence the sequence of the ledger to download
|
||||
* @return The ledger downloaded, with a full transaction and account state map
|
||||
*/
|
||||
std::optional<ripple::LedgerInfo>
|
||||
std::optional<ripple::LedgerHeader>
|
||||
loadInitialLedger(uint32_t sequence)
|
||||
{
|
||||
// check that database is actually empty
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
log_.fatal() << "Database is not empty";
|
||||
LOG(log_.fatal()) << "Database is not empty";
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
@@ -158,20 +159,20 @@ public:
|
||||
if (!ledgerData)
|
||||
return {};
|
||||
|
||||
ripple::LedgerInfo lgrInfo = util::deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
|
||||
ripple::LedgerHeader lgrInfo = ::util::deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
|
||||
|
||||
log_.debug() << "Deserialized ledger header. " << util::toString(lgrInfo);
|
||||
LOG(log_.debug()) << "Deserialized ledger header. " << ::util::toString(lgrInfo);
|
||||
|
||||
auto timeDiff = util::timed<std::chrono::duration<double>>([this, sequence, &lgrInfo, &ledgerData]() {
|
||||
auto timeDiff = ::util::timed<std::chrono::duration<double>>([this, sequence, &lgrInfo, &ledgerData]() {
|
||||
backend_->startWrites();
|
||||
|
||||
log_.debug() << "Started writes";
|
||||
LOG(log_.debug()) << "Started writes";
|
||||
|
||||
backend_->writeLedger(lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
|
||||
|
||||
log_.debug() << "Wrote ledger";
|
||||
LOG(log_.debug()) << "Wrote ledger";
|
||||
FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, *ledgerData);
|
||||
log_.debug() << "Inserted txns";
|
||||
LOG(log_.debug()) << "Inserted txns";
|
||||
|
||||
// download the full account state map. This function downloads full
|
||||
// ledger data and pushes the downloaded data into the writeQueue.
|
||||
@@ -185,57 +186,59 @@ public:
|
||||
size_t numWrites = 0;
|
||||
backend_->cache().setFull();
|
||||
|
||||
auto seconds = util::timed<std::chrono::seconds>([this, edgeKeys = &edgeKeys, sequence, &numWrites]() {
|
||||
for (auto& key : *edgeKeys)
|
||||
{
|
||||
log_.debug() << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
ripple::uint256 prev = Backend::firstKey;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
assert(cur);
|
||||
if (prev == Backend::firstKey)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
auto seconds =
|
||||
::util::timed<std::chrono::seconds>([this, edgeKeys = &edgeKeys, sequence, &numWrites]() {
|
||||
for (auto& key : *edgeKeys)
|
||||
{
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
{
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
log_.debug() << "Writing book successor = " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ =
|
||||
backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
log_.info() << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
ripple::uint256 prev = data::firstKey;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
assert(cur);
|
||||
if (prev == data::firstKey)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(Backend::lastKey));
|
||||
++numWrites;
|
||||
});
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
{
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
{
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base)
|
||||
<< " - " << ripple::strHex(cur->key);
|
||||
|
||||
log_.info() << "Looping through cache and submitting all writes took " << seconds
|
||||
<< " seconds. numWrites = " << std::to_string(numWrites);
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
}
|
||||
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(data::lastKey));
|
||||
++numWrites;
|
||||
});
|
||||
|
||||
LOG(log_.info()) << "Looping through cache and submitting all writes took " << seconds
|
||||
<< " seconds. numWrites = " << std::to_string(numWrites);
|
||||
}
|
||||
|
||||
log_.debug() << "Loaded initial ledger";
|
||||
LOG(log_.debug()) << "Loaded initial ledger";
|
||||
|
||||
if (not state_.get().isStopping)
|
||||
{
|
||||
@@ -247,9 +250,9 @@ public:
|
||||
backend_->finishWrites(sequence);
|
||||
});
|
||||
|
||||
log_.debug() << "Time to download and store ledger = " << timeDiff;
|
||||
LOG(log_.debug()) << "Time to download and store ledger = " << timeDiff;
|
||||
return lgrInfo;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -19,17 +19,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Publishes ledgers in a synchronized fashion.
|
||||
@@ -45,9 +45,9 @@ namespace clio::detail {
|
||||
template <typename SubscriptionManagerType>
|
||||
class LedgerPublisher
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
boost::asio::io_context::strand publishStrand_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> publishStrand_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions_;
|
||||
@@ -69,9 +69,12 @@ public:
|
||||
LedgerPublisher(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
SystemState const& state)
|
||||
: publishStrand_{ioc}, backend_{backend}, subscriptions_{subscriptions}, state_{std::cref(state)}
|
||||
: publishStrand_{boost::asio::make_strand(ioc)}
|
||||
, backend_{backend}
|
||||
, subscriptions_{subscriptions}
|
||||
, state_{std::cref(state)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -86,7 +89,7 @@ public:
|
||||
bool
|
||||
publish(uint32_t ledgerSequence, std::optional<uint32_t> maxAttempts)
|
||||
{
|
||||
log_.info() << "Attempting to publish ledger = " << ledgerSequence;
|
||||
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
||||
size_t numAttempts = 0;
|
||||
while (not state_.get().isStopping)
|
||||
{
|
||||
@@ -94,14 +97,14 @@ public:
|
||||
|
||||
if (!range || range->maxSequence < ledgerSequence)
|
||||
{
|
||||
log_.debug() << "Trying to publish. Could not find "
|
||||
"ledger with sequence = "
|
||||
<< ledgerSequence;
|
||||
LOG(log_.debug()) << "Trying to publish. Could not find "
|
||||
"ledger with sequence = "
|
||||
<< ledgerSequence;
|
||||
|
||||
// We try maxAttempts times to publish the ledger, waiting one second in between each attempt.
|
||||
if (maxAttempts && numAttempts >= maxAttempts)
|
||||
{
|
||||
log_.debug() << "Failed to publish ledger after " << numAttempts << " attempts.";
|
||||
LOG(log_.debug()) << "Failed to publish ledger after " << numAttempts << " attempts.";
|
||||
return false;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
@@ -110,7 +113,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
auto lgr = Backend::synchronousAndRetryOnTimeout(
|
||||
auto lgr = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerBySequence(ledgerSequence, yield); });
|
||||
|
||||
assert(lgr);
|
||||
@@ -130,16 +133,16 @@ public:
|
||||
* @param lgrInfo the ledger to publish
|
||||
*/
|
||||
void
|
||||
publish(ripple::LedgerInfo const& lgrInfo)
|
||||
publish(ripple::LedgerHeader const& lgrInfo)
|
||||
{
|
||||
boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() {
|
||||
log_.info() << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
LOG(log_.info()) << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
|
||||
if (!state_.get().isWriting)
|
||||
{
|
||||
log_.info() << "Updating cache";
|
||||
LOG(log_.info()) << "Updating cache";
|
||||
|
||||
std::vector<Backend::LedgerObject> diff = Backend::synchronousAndRetryOnTimeout(
|
||||
std::vector<data::LedgerObject> diff = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerDiff(lgrInfo.seq, yield); });
|
||||
|
||||
backend_->cache().update(diff, lgrInfo.seq); // todo: inject cache to update, don't use backend cache
|
||||
@@ -153,10 +156,10 @@ public:
|
||||
// TODO: this probably should be a strategy
|
||||
if (age < 600)
|
||||
{
|
||||
std::optional<ripple::Fees> fees = Backend::synchronousAndRetryOnTimeout(
|
||||
std::optional<ripple::Fees> fees = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchFees(lgrInfo.seq, yield); });
|
||||
|
||||
std::vector<Backend::TransactionAndMetadata> transactions = Backend::synchronousAndRetryOnTimeout(
|
||||
std::vector<data::TransactionAndMetadata> transactions = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield); });
|
||||
|
||||
auto ledgerRange = backend_->fetchLedgerRange();
|
||||
@@ -174,10 +177,10 @@ public:
|
||||
subscriptions_->pubBookChanges(lgrInfo, transactions);
|
||||
|
||||
setLastPublishTime();
|
||||
log_.info() << "Published ledger " << std::to_string(lgrInfo.seq);
|
||||
LOG(log_.info()) << "Published ledger " << std::to_string(lgrInfo.seq);
|
||||
}
|
||||
else
|
||||
log_.info() << "Skipping publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
LOG(log_.info()) << "Skipping publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
});
|
||||
|
||||
// we track latest publish-requested seq, not necessarily already published
|
||||
@@ -249,4 +252,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -19,22 +19,23 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
namespace clio::detail {
|
||||
namespace etl::detail {
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
@@ -45,20 +46,26 @@ namespace clio::detail {
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Transformer thread that prepares new ledger out of raw data from GRPC
|
||||
* @brief Transformer thread that prepares new ledger out of raw data from GRPC.
|
||||
*/
|
||||
template <typename DataPipeType, typename LedgerLoaderType, typename LedgerPublisherType>
|
||||
template <
|
||||
typename DataPipeType,
|
||||
typename LedgerLoaderType,
|
||||
typename LedgerPublisherType,
|
||||
typename AmendmentBlockHandlerType>
|
||||
class Transformer
|
||||
{
|
||||
using GetLedgerResponseType = typename LedgerLoaderType::GetLedgerResponseType;
|
||||
using RawLedgerObjectType = typename LedgerLoaderType::RawLedgerObjectType;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<LedgerLoaderType> loader_;
|
||||
std::reference_wrapper<LedgerPublisherType> publisher_;
|
||||
std::reference_wrapper<AmendmentBlockHandlerType> amendmentBlockHandler_;
|
||||
|
||||
uint32_t startSequence_;
|
||||
std::reference_wrapper<SystemState> state_; // shared state for ETL
|
||||
|
||||
@@ -66,7 +73,7 @@ class Transformer
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the transformer
|
||||
* @brief Create an instance of the transformer.
|
||||
*
|
||||
* This spawns a new thread that reads from the data pipe and writes ledgers to the DB using LedgerLoader and
|
||||
* LedgerPublisher.
|
||||
@@ -76,12 +83,14 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
LedgerLoaderType& loader,
|
||||
LedgerPublisherType& publisher,
|
||||
AmendmentBlockHandlerType& amendmentBlockHandler,
|
||||
uint32_t startSequence,
|
||||
SystemState& state)
|
||||
: pipe_(std::ref(pipe))
|
||||
: pipe_{std::ref(pipe)}
|
||||
, backend_{backend}
|
||||
, loader_(std::ref(loader))
|
||||
, publisher_(std::ref(publisher))
|
||||
, loader_{std::ref(loader)}
|
||||
, publisher_{std::ref(publisher)}
|
||||
, amendmentBlockHandler_{std::ref(amendmentBlockHandler)}
|
||||
, startSequence_{startSequence}
|
||||
, state_{std::ref(state)}
|
||||
{
|
||||
@@ -89,7 +98,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Joins the transformer thread
|
||||
* @brief Joins the transformer thread.
|
||||
*/
|
||||
~Transformer()
|
||||
{
|
||||
@@ -98,7 +107,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Block calling thread until transformer thread exits
|
||||
* @brief Block calling thread until transformer thread exits.
|
||||
*/
|
||||
void
|
||||
waitTillFinished()
|
||||
@@ -137,62 +146,72 @@ private:
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const duration = ((end - start).count()) / 1000000000.0;
|
||||
|
||||
log_.info() << "Load phase of etl : "
|
||||
<< "Successfully wrote ledger! Ledger info: " << util::toString(lgrInfo)
|
||||
<< ". txn count = " << numTxns << ". object count = " << numObjects
|
||||
<< ". load time = " << duration << ". load txns per second = " << numTxns / duration
|
||||
<< ". load objs per second = " << numObjects / duration;
|
||||
LOG(log_.info()) << "Load phase of etl : "
|
||||
<< "Successfully wrote ledger! Ledger info: " << util::toString(lgrInfo)
|
||||
<< ". txn count = " << numTxns << ". object count = " << numObjects
|
||||
<< ". load time = " << duration << ". load txns per second = " << numTxns / duration
|
||||
<< ". load objs per second = " << numObjects / duration;
|
||||
|
||||
// success is false if the ledger was already written
|
||||
publisher_.get().publish(lgrInfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Error writing ledger. " << util::toString(lgrInfo);
|
||||
LOG(log_.error()) << "Error writing ledger. " << util::toString(lgrInfo);
|
||||
}
|
||||
|
||||
setWriteConflict(not success);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO update this documentation
|
||||
/**
|
||||
* @brief Build the next ledger using the previous ledger and the extracted data.
|
||||
* @note rawData should be data that corresponds to the ledger immediately following the previous seq.
|
||||
*
|
||||
* @param rawData data extracted from an ETL source
|
||||
* @return the newly built ledger and data to write to the database
|
||||
* @param rawData Data extracted from an ETL source
|
||||
* @return The newly built ledger and data to write to the database
|
||||
*/
|
||||
std::pair<ripple::LedgerInfo, bool>
|
||||
std::pair<ripple::LedgerHeader, bool>
|
||||
buildNextLedger(GetLedgerResponseType& rawData)
|
||||
{
|
||||
log_.debug() << "Beginning ledger update";
|
||||
ripple::LedgerInfo lgrInfo = util::deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
|
||||
LOG(log_.debug()) << "Beginning ledger update";
|
||||
ripple::LedgerHeader lgrInfo = ::util::deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
|
||||
|
||||
log_.debug() << "Deserialized ledger header. " << util::toString(lgrInfo);
|
||||
LOG(log_.debug()) << "Deserialized ledger header. " << ::util::toString(lgrInfo);
|
||||
backend_->startWrites();
|
||||
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
||||
|
||||
writeSuccessors(lgrInfo, rawData);
|
||||
updateCache(lgrInfo, rawData);
|
||||
std::optional<FormattedTransactionsData> insertTxResultOp;
|
||||
try
|
||||
{
|
||||
updateCache(lgrInfo, rawData);
|
||||
|
||||
log_.debug() << "Inserted/modified/deleted all objects. Number of objects = "
|
||||
<< rawData.ledger_objects().objects_size();
|
||||
LOG(log_.debug()) << "Inserted/modified/deleted all objects. Number of objects = "
|
||||
<< rawData.ledger_objects().objects_size();
|
||||
|
||||
auto insertTxResult = loader_.get().insertTransactions(lgrInfo, rawData);
|
||||
insertTxResultOp.emplace(loader_.get().insertTransactions(lgrInfo, rawData));
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
LOG(log_.fatal()) << "Failed to build next ledger: " << e.what();
|
||||
|
||||
log_.debug() << "Inserted all transactions. Number of transactions = "
|
||||
<< rawData.transactions_list().transactions_size();
|
||||
amendmentBlockHandler_.get().onAmendmentBlock();
|
||||
return {ripple::LedgerHeader{}, false};
|
||||
}
|
||||
|
||||
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
|
||||
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||
LOG(log_.debug()) << "Inserted all transactions. Number of transactions = "
|
||||
<< rawData.transactions_list().transactions_size();
|
||||
|
||||
backend_->writeAccountTransactions(std::move(insertTxResultOp->accountTxData));
|
||||
backend_->writeNFTs(std::move(insertTxResultOp->nfTokensData));
|
||||
backend_->writeNFTTransactions(std::move(insertTxResultOp->nfTokenTxData));
|
||||
|
||||
auto [success, duration] =
|
||||
util::timed<std::chrono::duration<double>>([&]() { return backend_->finishWrites(lgrInfo.seq); });
|
||||
::util::timed<std::chrono::duration<double>>([&]() { return backend_->finishWrites(lgrInfo.seq); });
|
||||
|
||||
log_.debug() << "Finished writes. Total time: " << std::to_string(duration);
|
||||
log_.debug() << "Finished ledger update: " << util::toString(lgrInfo);
|
||||
LOG(log_.debug()) << "Finished writes. Total time: " << std::to_string(duration);
|
||||
LOG(log_.debug()) << "Finished ledger update: " << ::util::toString(lgrInfo);
|
||||
|
||||
return {lgrInfo, success};
|
||||
}
|
||||
@@ -204,9 +223,9 @@ private:
|
||||
* @param rawData Ledger data from GRPC
|
||||
*/
|
||||
void
|
||||
updateCache(ripple::LedgerInfo const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
updateCache(ripple::LedgerHeader const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
{
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
std::vector<data::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(rawData.ledger_objects().objects_size());
|
||||
|
||||
// TODO change these to unordered_set
|
||||
@@ -219,14 +238,14 @@ private:
|
||||
assert(key);
|
||||
|
||||
cacheUpdates.push_back({*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
log_.debug() << "key = " << ripple::strHex(*key) << " - mod type = " << obj.mod_type();
|
||||
LOG(log_.debug()) << "key = " << ripple::strHex(*key) << " - mod type = " << obj.mod_type();
|
||||
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED && !rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors not included. using cache";
|
||||
LOG(log_.debug()) << "object neighbors not included. using cache";
|
||||
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
throw std::logic_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
auto const blob = obj.mutable_data();
|
||||
auto checkBookBase = false;
|
||||
@@ -245,7 +264,7 @@ private:
|
||||
|
||||
if (checkBookBase)
|
||||
{
|
||||
log_.debug() << "Is book dir. Key = " << ripple::strHex(*key);
|
||||
LOG(log_.debug()) << "Is book dir. Key = " << ripple::strHex(*key);
|
||||
|
||||
auto const bookBase = getBookBase(*key);
|
||||
auto const oldFirstDir = backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1);
|
||||
@@ -254,9 +273,10 @@ private:
|
||||
// We deleted the first directory, or we added a directory prior to the old first directory
|
||||
if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key))
|
||||
{
|
||||
log_.debug() << "Need to recalculate book base successor. base = " << ripple::strHex(bookBase)
|
||||
<< " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted
|
||||
<< " - seq = " << lgrInfo.seq;
|
||||
LOG(log_.debug())
|
||||
<< "Need to recalculate book base successor. base = " << ripple::strHex(bookBase)
|
||||
<< " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted
|
||||
<< " - seq = " << lgrInfo.seq;
|
||||
bookSuccessorsToCalculate.insert(bookBase);
|
||||
}
|
||||
}
|
||||
@@ -273,9 +293,9 @@ private:
|
||||
// rippled didn't send successor information, so use our cache
|
||||
if (!rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors not included. using cache";
|
||||
LOG(log_.debug()) << "object neighbors not included. using cache";
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
throw std::logic_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
for (auto const& obj : cacheUpdates)
|
||||
{
|
||||
@@ -284,16 +304,16 @@ private:
|
||||
|
||||
auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq);
|
||||
if (!lb)
|
||||
lb = {Backend::firstKey, {}};
|
||||
lb = {data::firstKey, {}};
|
||||
|
||||
auto ub = backend_->cache().getSuccessor(obj.key, lgrInfo.seq);
|
||||
if (!ub)
|
||||
ub = {Backend::lastKey, {}};
|
||||
ub = {data::lastKey, {}};
|
||||
|
||||
if (obj.blob.size() == 0)
|
||||
{
|
||||
log_.debug() << "writing successor for deleted object " << ripple::strHex(obj.key) << " - "
|
||||
<< ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key);
|
||||
LOG(log_.debug()) << "writing successor for deleted object " << ripple::strHex(obj.key) << " - "
|
||||
<< ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key);
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
}
|
||||
@@ -302,8 +322,8 @@ private:
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(obj.key));
|
||||
backend_->writeSuccessor(uint256ToString(obj.key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
|
||||
log_.debug() << "writing successor for new object " << ripple::strHex(lb->key) << " - "
|
||||
<< ripple::strHex(obj.key) << " - " << ripple::strHex(ub->key);
|
||||
LOG(log_.debug()) << "writing successor for new object " << ripple::strHex(lb->key) << " - "
|
||||
<< ripple::strHex(obj.key) << " - " << ripple::strHex(ub->key);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,41 +334,41 @@ private:
|
||||
{
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(succ->key));
|
||||
|
||||
log_.debug() << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(succ->key);
|
||||
LOG(log_.debug()) << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(succ->key);
|
||||
}
|
||||
else
|
||||
{
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(Backend::lastKey));
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(data::lastKey));
|
||||
|
||||
log_.debug() << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(Backend::lastKey);
|
||||
LOG(log_.debug()) << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(data::lastKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write successors info into DB
|
||||
* @brief Write successors info into DB.
|
||||
*
|
||||
* @param lgrInfo Ledger info
|
||||
* @param rawData Ledger data from GRPC
|
||||
*/
|
||||
void
|
||||
writeSuccessors(ripple::LedgerInfo const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
writeSuccessors(ripple::LedgerHeader const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
{
|
||||
// Write successor info, if included from rippled
|
||||
if (rawData.object_neighbors_included())
|
||||
{
|
||||
log_.debug() << "object neighbors included";
|
||||
LOG(log_.debug()) << "object neighbors included";
|
||||
|
||||
for (auto& obj : *(rawData.mutable_book_successors()))
|
||||
{
|
||||
auto firstBook = std::move(*obj.mutable_first_book());
|
||||
if (!firstBook.size())
|
||||
firstBook = uint256ToString(Backend::lastKey);
|
||||
log_.debug() << "writing book successor " << ripple::strHex(obj.book_base()) << " - "
|
||||
<< ripple::strHex(firstBook);
|
||||
firstBook = uint256ToString(data::lastKey);
|
||||
LOG(log_.debug()) << "writing book successor " << ripple::strHex(obj.book_base()) << " - "
|
||||
<< ripple::strHex(firstBook);
|
||||
|
||||
backend_->writeSuccessor(std::move(*obj.mutable_book_base()), lgrInfo.seq, std::move(firstBook));
|
||||
}
|
||||
@@ -359,45 +379,52 @@ private:
|
||||
{
|
||||
std::string* predPtr = obj.mutable_predecessor();
|
||||
if (!predPtr->size())
|
||||
*predPtr = uint256ToString(Backend::firstKey);
|
||||
*predPtr = uint256ToString(data::firstKey);
|
||||
std::string* succPtr = obj.mutable_successor();
|
||||
if (!succPtr->size())
|
||||
*succPtr = uint256ToString(Backend::lastKey);
|
||||
*succPtr = uint256ToString(data::lastKey);
|
||||
|
||||
if (obj.mod_type() == RawLedgerObjectType::DELETED)
|
||||
{
|
||||
log_.debug() << "Modifying successors for deleted object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
LOG(log_.debug()) << "Modifying successors for deleted object " << ripple::strHex(obj.key())
|
||||
<< " - " << ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.debug() << "adding successor for new object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
LOG(log_.debug()) << "adding successor for new object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::string{obj.key()});
|
||||
backend_->writeSuccessor(std::string{obj.key()}, lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
}
|
||||
else
|
||||
log_.debug() << "object modified " << ripple::strHex(obj.key());
|
||||
LOG(log_.debug()) << "object modified " << ripple::strHex(obj.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** @return true if the transformer is stopping; false otherwise */
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return state_.get().isStopping;
|
||||
}
|
||||
|
||||
/** @return true if there was a write conflict; false otherwise */
|
||||
bool
|
||||
hasWriteConflict() const
|
||||
{
|
||||
return state_.get().writeConflict;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets the write conflict flag.
|
||||
*
|
||||
* @param conflict The value to set
|
||||
*/
|
||||
void
|
||||
setWriteConflict(bool conflict)
|
||||
{
|
||||
@@ -405,4 +432,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clio::detail
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -17,9 +17,11 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <rpc/BookChangesHelper.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
|
||||
namespace feed {
|
||||
|
||||
void
|
||||
Subscription::subscribe(SessionPtrType const& session)
|
||||
@@ -41,7 +43,7 @@ Subscription::publish(std::shared_ptr<std::string> const& message)
|
||||
|
||||
boost::json::object
|
||||
getLedgerPubMessage(
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount)
|
||||
@@ -53,10 +55,9 @@ getLedgerPubMessage(
|
||||
pubMsg["ledger_hash"] = to_string(lgrInfo.hash);
|
||||
pubMsg["ledger_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
||||
|
||||
pubMsg["fee_ref"] = RPC::toBoostJson(fees.units.jsonClipped());
|
||||
pubMsg["fee_base"] = RPC::toBoostJson(fees.base.jsonClipped());
|
||||
pubMsg["reserve_base"] = RPC::toBoostJson(fees.reserve.jsonClipped());
|
||||
pubMsg["reserve_inc"] = RPC::toBoostJson(fees.increment.jsonClipped());
|
||||
pubMsg["fee_base"] = rpc::toBoostJson(fees.base.jsonClipped());
|
||||
pubMsg["reserve_base"] = rpc::toBoostJson(fees.reserve.jsonClipped());
|
||||
pubMsg["reserve_inc"] = rpc::toBoostJson(fees.increment.jsonClipped());
|
||||
|
||||
pubMsg["validated_ledgers"] = ledgerRange;
|
||||
pubMsg["txn_count"] = txnCount;
|
||||
@@ -64,7 +65,7 @@ getLedgerPubMessage(
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
SubscriptionManager::subLedger(boost::asio::yield_context& yield, SessionPtrType session)
|
||||
SubscriptionManager::subLedger(boost::asio::yield_context yield, SessionPtrType session)
|
||||
{
|
||||
subscribeHelper(session, ledgerSubscribers_, [this](SessionPtrType session) { unsubLedger(session); });
|
||||
|
||||
@@ -144,7 +145,7 @@ SubscriptionManager::unsubBookChanges(SessionPtrType session)
|
||||
|
||||
void
|
||||
SubscriptionManager::pubLedger(
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount)
|
||||
@@ -156,13 +157,13 @@ SubscriptionManager::pubLedger(
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionManager::pubTransaction(Backend::TransactionAndMetadata const& blobs, ripple::LedgerInfo const& lgrInfo)
|
||||
SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, ripple::LedgerHeader const& lgrInfo)
|
||||
{
|
||||
auto [tx, meta] = RPC::deserializeTxPlusMeta(blobs, lgrInfo.seq);
|
||||
auto [tx, meta] = rpc::deserializeTxPlusMeta(blobs, lgrInfo.seq);
|
||||
boost::json::object pubObj;
|
||||
pubObj["transaction"] = RPC::toJson(*tx);
|
||||
pubObj["meta"] = RPC::toJson(*meta);
|
||||
RPC::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta, blobs.date);
|
||||
pubObj["transaction"] = rpc::toJson(*tx);
|
||||
pubObj["meta"] = rpc::toJson(*meta);
|
||||
rpc::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta, blobs.date);
|
||||
pubObj["type"] = "transaction";
|
||||
pubObj["validated"] = true;
|
||||
pubObj["status"] = "closed";
|
||||
@@ -185,12 +186,12 @@ SubscriptionManager::pubTransaction(Backend::TransactionAndMetadata const& blobs
|
||||
{
|
||||
ripple::STAmount ownerFunds;
|
||||
auto fetchFundsSynchronous = [&]() {
|
||||
Backend::synchronous([&](boost::asio::yield_context& yield) {
|
||||
ownerFunds = RPC::accountFunds(*backend_, lgrInfo.seq, amount, account, yield);
|
||||
data::synchronous([&](boost::asio::yield_context yield) {
|
||||
ownerFunds = rpc::accountFunds(*backend_, lgrInfo.seq, amount, account, yield);
|
||||
});
|
||||
};
|
||||
|
||||
Backend::retryOnTimeout(fetchFundsSynchronous);
|
||||
data::retryOnTimeout(fetchFundsSynchronous);
|
||||
|
||||
pubObj["transaction"].as_object()["owner_funds"] = ownerFunds.getText();
|
||||
}
|
||||
@@ -244,10 +245,10 @@ SubscriptionManager::pubTransaction(Backend::TransactionAndMetadata const& blobs
|
||||
|
||||
void
|
||||
SubscriptionManager::pubBookChanges(
|
||||
ripple::LedgerInfo const& lgrInfo,
|
||||
std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
auto const json = RPC::computeBookChanges(lgrInfo, transactions);
|
||||
auto const json = rpc::computeBookChanges(lgrInfo, transactions);
|
||||
auto const bookChangesMsg = std::make_shared<std::string>(boost::json::serialize(json));
|
||||
bookChangesSubscribers_.publish(bookChangesMsg);
|
||||
}
|
||||
@@ -259,7 +260,7 @@ SubscriptionManager::forwardProposedTransaction(boost::json::object const& respo
|
||||
txProposedSubscribers_.publish(pubMsg);
|
||||
|
||||
auto transaction = response.at("transaction").as_object();
|
||||
auto accounts = RPC::getAccountsFromTransaction(transaction);
|
||||
auto accounts = rpc::getAccountsFromTransaction(transaction);
|
||||
|
||||
for (ripple::AccountID const& account : accounts)
|
||||
accountProposedSubscribers_.publish(pubMsg, account);
|
||||
@@ -365,3 +366,5 @@ SubscriptionManager::cleanup(SessionPtrType session)
|
||||
|
||||
cleanupFuncs_.erase(session);
|
||||
}
|
||||
|
||||
} // namespace feed
|
||||
591
src/feed/SubscriptionManager.h
Normal file
591
src/feed/SubscriptionManager.h
Normal file
@@ -0,0 +1,591 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include <web/interface/ConnectionBase.h>
|
||||
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
/**
|
||||
* @brief This namespace deals with subscriptions.
|
||||
*/
|
||||
namespace feed {
|
||||
|
||||
using SessionPtrType = std::shared_ptr<web::ConnectionBase>;
|
||||
|
||||
/**
|
||||
* @brief Sends a message to subscribers.
|
||||
*
|
||||
* @param message The message to send
|
||||
* @param subscribers The subscription stream to send the message to
|
||||
* @param counter The subscription counter to decrement if session is detected as dead
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
sendToSubscribers(std::shared_ptr<std::string> const& message, T& subscribers, std::atomic_uint64_t& counter)
|
||||
{
|
||||
for (auto it = subscribers.begin(); it != subscribers.end();)
|
||||
{
|
||||
auto& session = *it;
|
||||
if (session->dead())
|
||||
{
|
||||
it = subscribers.erase(it);
|
||||
--counter;
|
||||
}
|
||||
else
|
||||
{
|
||||
session->send(message);
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Adds a session to the subscription stream.
|
||||
*
|
||||
* @param session The session to add
|
||||
* @param subscribers The stream to subscribe to
|
||||
* @param counter The counter representing the current total subscribers
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
addSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& counter)
|
||||
{
|
||||
if (!subscribers.contains(session))
|
||||
{
|
||||
subscribers.insert(session);
|
||||
++counter;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Removes a session from the subscription stream.
|
||||
*
|
||||
* @param session The session to remove
|
||||
* @param subscribers The stream to unsubscribe from
|
||||
* @param counter The counter representing the current total subscribers
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
removeSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& counter)
|
||||
{
|
||||
if (subscribers.contains(session))
|
||||
{
|
||||
subscribers.erase(session);
|
||||
--counter;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Represents a subscription stream.
|
||||
*/
|
||||
class Subscription
|
||||
{
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
std::unordered_set<SessionPtrType> subscribers_ = {};
|
||||
std::atomic_uint64_t subCount_ = 0;
|
||||
|
||||
public:
|
||||
Subscription() = delete;
|
||||
Subscription(Subscription&) = delete;
|
||||
Subscription(Subscription&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Create a new subscription stream.
|
||||
*
|
||||
* @param ioc The io_context to run on
|
||||
*/
|
||||
explicit Subscription(boost::asio::io_context& ioc) : strand_(boost::asio::make_strand(ioc))
|
||||
{
|
||||
}
|
||||
|
||||
~Subscription() = default;
|
||||
|
||||
/**
|
||||
* @brief Adds the given session to the subscribers set.
|
||||
*
|
||||
* @param session The session to add
|
||||
*/
|
||||
void
|
||||
subscribe(SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Removes the given session from the subscribers set.
|
||||
*
|
||||
* @param session The session to remove
|
||||
*/
|
||||
void
|
||||
unsubscribe(SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Sends the given message to all subscribers.
|
||||
*
|
||||
* @param message The message to send
|
||||
*/
|
||||
void
|
||||
publish(std::shared_ptr<std::string> const& message);
|
||||
|
||||
/**
|
||||
* @return Total subscriber count on this stream.
|
||||
*/
|
||||
std::uint64_t
|
||||
count() const
|
||||
{
|
||||
return subCount_.load();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the stream currently has no subscribers; false otherwise
|
||||
*/
|
||||
bool
|
||||
empty() const
|
||||
{
|
||||
return count() == 0;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a collection of subscriptions where each stream is mapped to a key.
|
||||
*/
|
||||
template <class Key>
|
||||
class SubscriptionMap
|
||||
{
|
||||
using SubscribersType = std::set<SessionPtrType>;
|
||||
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
std::unordered_map<Key, SubscribersType> subscribers_ = {};
|
||||
std::atomic_uint64_t subCount_ = 0;
|
||||
|
||||
public:
|
||||
SubscriptionMap() = delete;
|
||||
SubscriptionMap(SubscriptionMap&) = delete;
|
||||
SubscriptionMap(SubscriptionMap&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Create a new subscription map.
|
||||
*
|
||||
* @param ioc The io_context to run on
|
||||
*/
|
||||
explicit SubscriptionMap(boost::asio::io_context& ioc) : strand_(boost::asio::make_strand(ioc))
|
||||
{
|
||||
}
|
||||
|
||||
~SubscriptionMap() = default;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to a specific stream by its key.
|
||||
*
|
||||
* @param session The session to add
|
||||
* @param key The key for the subscription to subscribe to
|
||||
*/
|
||||
void
|
||||
subscribe(SessionPtrType const& session, Key const& key)
|
||||
{
|
||||
boost::asio::post(strand_, [this, session, key]() { addSession(session, subscribers_[key], subCount_); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from a specific stream by its key.
|
||||
*
|
||||
* @param session The session to remove
|
||||
* @param key The key for the subscription to unsubscribe from
|
||||
*/
|
||||
void
|
||||
unsubscribe(SessionPtrType const& session, Key const& key)
|
||||
{
|
||||
boost::asio::post(strand_, [this, key, session]() {
|
||||
if (!subscribers_.contains(key))
|
||||
return;
|
||||
|
||||
if (!subscribers_[key].contains(session))
|
||||
return;
|
||||
|
||||
--subCount_;
|
||||
subscribers_[key].erase(session);
|
||||
|
||||
if (subscribers_[key].size() == 0)
|
||||
{
|
||||
subscribers_.erase(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sends the given message to all subscribers.
|
||||
*
|
||||
* @param message The message to send
|
||||
* @param key The key for the subscription to send the message to
|
||||
*/
|
||||
void
|
||||
publish(std::shared_ptr<std::string> const& message, Key const& key)
|
||||
{
|
||||
boost::asio::post(strand_, [this, key, message]() {
|
||||
if (!subscribers_.contains(key))
|
||||
return;
|
||||
|
||||
sendToSubscribers(message, subscribers_[key], subCount_);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Total subscriber count on all streams in the collection.
|
||||
*/
|
||||
std::uint64_t
|
||||
count() const
|
||||
{
|
||||
return subCount_.load();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Manages subscriptions.
|
||||
*/
|
||||
class SubscriptionManager
|
||||
{
|
||||
util::Logger log_{"Subscriptions"};
|
||||
|
||||
std::vector<std::thread> workers_;
|
||||
boost::asio::io_context ioc_;
|
||||
std::optional<boost::asio::io_context::work> work_;
|
||||
|
||||
Subscription ledgerSubscribers_;
|
||||
Subscription txSubscribers_;
|
||||
Subscription txProposedSubscribers_;
|
||||
Subscription manifestSubscribers_;
|
||||
Subscription validationsSubscribers_;
|
||||
Subscription bookChangesSubscribers_;
|
||||
|
||||
SubscriptionMap<ripple::AccountID> accountSubscribers_;
|
||||
SubscriptionMap<ripple::AccountID> accountProposedSubscribers_;
|
||||
SubscriptionMap<ripple::Book> bookSubscribers_;
|
||||
|
||||
std::shared_ptr<data::BackendInterface const> backend_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function that creates a new subscription manager configured from the config provided.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param backend The backend to use
|
||||
*/
|
||||
static std::shared_ptr<SubscriptionManager>
|
||||
make_SubscriptionManager(util::Config const& config, std::shared_ptr<data::BackendInterface const> const& backend)
|
||||
{
|
||||
auto numThreads = config.valueOr<uint64_t>("subscription_workers", 1);
|
||||
return std::make_shared<SubscriptionManager>(numThreads, backend);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Creates a new instance of the subscription manager.
|
||||
*
|
||||
* @param numThreads The number of worker threads to manage subscriptions
|
||||
* @param backend The backend to use
|
||||
*/
|
||||
SubscriptionManager(std::uint64_t numThreads, std::shared_ptr<data::BackendInterface const> const& backend)
|
||||
: ledgerSubscribers_(ioc_)
|
||||
, txSubscribers_(ioc_)
|
||||
, txProposedSubscribers_(ioc_)
|
||||
, manifestSubscribers_(ioc_)
|
||||
, validationsSubscribers_(ioc_)
|
||||
, bookChangesSubscribers_(ioc_)
|
||||
, accountSubscribers_(ioc_)
|
||||
, accountProposedSubscribers_(ioc_)
|
||||
, bookSubscribers_(ioc_)
|
||||
, backend_(backend)
|
||||
{
|
||||
work_.emplace(ioc_);
|
||||
|
||||
// We will eventually want to clamp this to be the number of strands,
|
||||
// since adding more threads than we have strands won't see any
|
||||
// performance benefits
|
||||
LOG(log_.info()) << "Starting subscription manager with " << numThreads << " workers";
|
||||
|
||||
workers_.reserve(numThreads);
|
||||
for (auto i = numThreads; i > 0; --i)
|
||||
workers_.emplace_back([this] { ioc_.run(); });
|
||||
}
|
||||
|
||||
/** @brief Stops the worker threads of the subscription manager. */
|
||||
~SubscriptionManager()
|
||||
{
|
||||
work_.reset();
|
||||
|
||||
ioc_.stop();
|
||||
for (auto& worker : workers_)
|
||||
worker.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the ledger stream.
|
||||
*
|
||||
* @param yield The coroutine context
|
||||
* @param session The session to subscribe to the stream
|
||||
* @return JSON object representing the first message to be sent to the new subscriber
|
||||
*/
|
||||
boost::json::object
|
||||
subLedger(boost::asio::yield_context yield, SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Publish to the ledger stream.
|
||||
*
|
||||
* @param lgrInfo The ledger header to serialize
|
||||
* @param fees The fees to serialize
|
||||
* @param ledgerRange The ledger range this message applies to
|
||||
* @param txnCount The total number of transactions to serialize
|
||||
*/
|
||||
void
|
||||
pubLedger(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount);
|
||||
|
||||
/**
|
||||
* @brief Publish to the book changes stream.
|
||||
*
|
||||
* @param lgrInfo The ledger header to serialize
|
||||
* @param transactions The transactions to serialize
|
||||
*/
|
||||
void
|
||||
pubBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the ledger stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubLedger(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the transactions stream.
|
||||
*
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subTransactions(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the transactions stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubTransactions(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Publish to the book changes stream.
|
||||
*
|
||||
* @param blobs The transactions to serialize
|
||||
* @param lgrInfo The ledger header to serialize
|
||||
*/
|
||||
void
|
||||
pubTransaction(data::TransactionAndMetadata const& blobs, ripple::LedgerHeader const& lgrInfo);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the account changes stream.
|
||||
*
|
||||
* @param account The account to monitor changes for
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subAccount(ripple::AccountID const& account, SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the account changes stream.
|
||||
*
|
||||
* @param account The account the stream is for
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubAccount(ripple::AccountID const& account, SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to a specific book changes stream.
|
||||
*
|
||||
* @param book The book to monitor changes for
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subBook(ripple::Book const& book, SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the specific book changes stream.
|
||||
*
|
||||
* @param book The book to stop monitoring changes for
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubBook(ripple::Book const& book, SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the book changes stream.
|
||||
*
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subBookChanges(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the book changes stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubBookChanges(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the manifest stream.
|
||||
*
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subManifest(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the manifest stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubManifest(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the validation stream.
|
||||
*
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subValidation(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the validation stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubValidation(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Publish proposed transactions and proposed accounts from a JSON response.
|
||||
*
|
||||
* @param response The JSON response to use
|
||||
*/
|
||||
void
|
||||
forwardProposedTransaction(boost::json::object const& response);
|
||||
|
||||
/**
|
||||
* @brief Publish manifest updates from a JSON response.
|
||||
*
|
||||
* @param response The JSON response to use
|
||||
*/
|
||||
void
|
||||
forwardManifest(boost::json::object const& response);
|
||||
|
||||
/**
|
||||
* @brief Publish validation updates from a JSON response.
|
||||
*
|
||||
* @param response The JSON response to use
|
||||
*/
|
||||
void
|
||||
forwardValidation(boost::json::object const& response);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the proposed account stream.
|
||||
*
|
||||
* @param account The account to monitor
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subProposedAccount(ripple::AccountID const& account, SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the proposed account stream.
|
||||
*
|
||||
* @param account The account the stream is for
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubProposedAccount(ripple::AccountID const& account, SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the processed transactions stream.
|
||||
*
|
||||
* @param session The session to subscribe to the stream
|
||||
*/
|
||||
void
|
||||
subProposedTransactions(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Unsubscribe from the proposed transactions stream.
|
||||
*
|
||||
* @param session The session to unsubscribe from the stream
|
||||
*/
|
||||
void
|
||||
unsubProposedTransactions(SessionPtrType session);
|
||||
|
||||
/** @brief Clenup the session on removal. */
|
||||
void
|
||||
cleanup(SessionPtrType session);
|
||||
|
||||
/**
|
||||
* @brief Generate a JSON report on the current state of the subscriptions.
|
||||
*
|
||||
* @return The report as a JSON object
|
||||
*/
|
||||
boost::json::object
|
||||
report() const
|
||||
{
|
||||
return {
|
||||
{"ledger", ledgerSubscribers_.count()},
|
||||
{"transactions", txSubscribers_.count()},
|
||||
{"transactions_proposed", txProposedSubscribers_.count()},
|
||||
{"manifests", manifestSubscribers_.count()},
|
||||
{"validations", validationsSubscribers_.count()},
|
||||
{"account", accountSubscribers_.count()},
|
||||
{"accounts_proposed", accountProposedSubscribers_.count()},
|
||||
{"books", bookSubscribers_.count()},
|
||||
{"book_changes", bookChangesSubscribers_.count()},
|
||||
};
|
||||
}
|
||||
|
||||
private:
|
||||
using CleanupFunction = std::function<void(SessionPtrType const)>;
|
||||
|
||||
void
|
||||
subscribeHelper(SessionPtrType const& session, Subscription& subs, CleanupFunction&& func);
|
||||
|
||||
template <typename Key>
|
||||
void
|
||||
subscribeHelper(SessionPtrType const& session, Key const& k, SubscriptionMap<Key>& subs, CleanupFunction&& func);
|
||||
|
||||
// This is how we chose to cleanup subscriptions that have been closed.
|
||||
// Each time we add a subscriber, we add the opposite lambda that unsubscribes that subscriber when cleanup is
|
||||
// called with the session that closed.
|
||||
std::mutex cleanupMtx_;
|
||||
std::unordered_map<SessionPtrType, std::vector<CleanupFunction>> cleanupFuncs_ = {};
|
||||
};
|
||||
|
||||
} // namespace feed
|
||||
@@ -25,20 +25,21 @@
|
||||
#undef GRPC_ASAN_ENABLED
|
||||
#endif
|
||||
|
||||
#include <backend/BackendFactory.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/BackendFactory.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <rpc/Counters.h>
|
||||
#include <rpc/RPCEngine.h>
|
||||
#include <rpc/common/impl/HandlerProvider.h>
|
||||
#include <webserver/RPCExecutor.h>
|
||||
#include <webserver/Server.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <web/RPCServerHandler.h>
|
||||
#include <web/Server.h>
|
||||
|
||||
#include <boost/date_time/posix_time/posix_time_types.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <main/Build.h>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
@@ -46,8 +47,9 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
using namespace clio;
|
||||
using namespace util;
|
||||
using namespace boost::asio;
|
||||
|
||||
namespace po = boost::program_options;
|
||||
|
||||
/**
|
||||
@@ -162,55 +164,56 @@ try
|
||||
}
|
||||
|
||||
LogService::init(config);
|
||||
LogService::info() << "Clio version: " << Build::getClioFullVersionString();
|
||||
LOG(LogService::info()) << "Clio version: " << Build::getClioFullVersionString();
|
||||
|
||||
auto const threads = config.valueOr("io_threads", 2);
|
||||
if (threads <= 0)
|
||||
{
|
||||
LogService::fatal() << "io_threads is less than 0";
|
||||
LOG(LogService::fatal()) << "io_threads is less than 1";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
LogService::info() << "Number of io threads = " << threads;
|
||||
LOG(LogService::info()) << "Number of io threads = " << threads;
|
||||
|
||||
// IO context to handle all incoming requests, as well as other things.
|
||||
// This is not the only io context in the application.
|
||||
io_context ioc{threads};
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
auto sweepHandler = IntervalSweepHandler{config, ioc};
|
||||
auto dosGuard = DOSGuard{config, sweepHandler};
|
||||
auto sweepHandler = web::IntervalSweepHandler{config, ioc};
|
||||
auto whitelistHandler = web::WhitelistHandler{config};
|
||||
auto dosGuard = web::DOSGuard{config, whitelistHandler, sweepHandler};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = Backend::make_Backend(ioc, config);
|
||||
auto backend = data::make_Backend(config);
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptions = SubscriptionManager::make_SubscriptionManager(config, backend);
|
||||
auto subscriptions = feed::SubscriptionManager::make_SubscriptionManager(config, backend);
|
||||
|
||||
// Tracks which ledgers have been validated by the network
|
||||
auto ledgers = NetworkValidatedLedgers::make_ValidatedLedgers();
|
||||
auto ledgers = etl::NetworkValidatedLedgers::make_ValidatedLedgers();
|
||||
|
||||
// Handles the connection to one or more rippled nodes.
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
|
||||
auto balancer = LoadBalancer::make_LoadBalancer(config, ioc, backend, subscriptions, ledgers);
|
||||
auto balancer = etl::LoadBalancer::make_LoadBalancer(config, ioc, backend, subscriptions, ledgers);
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||
auto etl = ETLService::make_ETLService(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
auto etl = etl::ETLService::make_ETLService(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
auto workQueue = WorkQueue::make_WorkQueue(config);
|
||||
auto counters = RPC::Counters::make_Counters(workQueue);
|
||||
auto const handlerProvider =
|
||||
std::make_shared<RPC::detail::ProductionHandlerProvider const>(backend, subscriptions, balancer, etl, counters);
|
||||
auto const rpcEngine = RPC::RPCEngine::make_RPCEngine(
|
||||
config, backend, subscriptions, balancer, etl, dosGuard, workQueue, counters, handlerProvider);
|
||||
auto workQueue = rpc::WorkQueue::make_WorkQueue(config);
|
||||
auto counters = rpc::Counters::make_Counters(workQueue);
|
||||
auto const handlerProvider = std::make_shared<rpc::detail::ProductionHandlerProvider const>(
|
||||
config, backend, subscriptions, balancer, etl, counters);
|
||||
auto const rpcEngine = rpc::RPCEngine::make_RPCEngine(
|
||||
backend, subscriptions, balancer, dosGuard, workQueue, counters, handlerProvider);
|
||||
|
||||
// init the web server
|
||||
auto executor =
|
||||
std::make_shared<RPCExecutor<RPC::RPCEngine, ETLService>>(config, backend, rpcEngine, etl, subscriptions);
|
||||
// Init the web server
|
||||
auto handler = std::make_shared<web::RPCServerHandler<rpc::RPCEngine, etl::ETLService>>(
|
||||
config, backend, rpcEngine, etl, subscriptions);
|
||||
auto ctx = parseCerts(config);
|
||||
auto const ctxRef = ctx ? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()} : std::nullopt;
|
||||
auto const httpServer = Server::make_HttpServer(config, ioc, ctxRef, dosGuard, executor);
|
||||
auto const httpServer = web::make_HttpServer(config, ioc, ctxRef, dosGuard, handler);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
@@ -221,5 +224,5 @@ try
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
LogService::fatal() << "Exit on exception: " << e.what();
|
||||
LOG(LogService::fatal()) << "Exit on exception: " << e.what();
|
||||
}
|
||||
43
src/main/Mainpage.h
Normal file
43
src/main/Mainpage.h
Normal file
@@ -0,0 +1,43 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/**
|
||||
* @mainpage Clio API server
|
||||
*
|
||||
* @section intro Introduction
|
||||
*
|
||||
* Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
|
||||
*
|
||||
* Validated historical ledger and transaction data are stored in a more space-efficient format, using up to 4 times
|
||||
* less space than rippled.
|
||||
*
|
||||
* Clio can be configured to store data in Apache Cassandra or ScyllaDB, allowing for scalable read throughput.
|
||||
* Multiple Clio nodes can share access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||
* without the need for redundant data storage or computation.
|
||||
*
|
||||
* You can read more general information about Clio and its subsystems from the `Related Pages` section.
|
||||
*
|
||||
* @section Develop
|
||||
*
|
||||
* As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
* <A HREF="https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md">Contribution guidelines</A>.
|
||||
*
|
||||
* Read `rpc/README.md` carefully to know more about writing your own handlers for
|
||||
* Clio.
|
||||
*/
|
||||
48
src/rpc/Amendments.h
Normal file
48
src/rpc/Amendments.h
Normal file
@@ -0,0 +1,48 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <ripple/protocol/digest.h>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
#define REGISTER_AMENDMENT(name) inline static const ripple::uint256 name = GetAmendmentId(#name);
|
||||
|
||||
/**
|
||||
* @brief Represents a list of amendments in the XRPL.
|
||||
*/
|
||||
struct Amendments
|
||||
{
|
||||
/**
|
||||
* @param name The name of the amendment
|
||||
* @return The corresponding amendment Id
|
||||
*/
|
||||
static ripple::uint256 const
|
||||
GetAmendmentId(std::string_view const name)
|
||||
{
|
||||
return ripple::sha512Half(ripple::Slice(name.data(), name.size()));
|
||||
}
|
||||
|
||||
REGISTER_AMENDMENT(DisallowIncoming)
|
||||
REGISTER_AMENDMENT(Clawback)
|
||||
};
|
||||
} // namespace rpc
|
||||
@@ -17,13 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <rpc/RPCHelpers.h>
|
||||
|
||||
#include <set>
|
||||
|
||||
namespace RPC {
|
||||
namespace rpc {
|
||||
|
||||
/**
|
||||
* @brief Represents an entry in the book_changes' changes array.
|
||||
@@ -53,7 +54,7 @@ public:
|
||||
* @return std::vector<BookChange> Book changes
|
||||
*/
|
||||
[[nodiscard]] static std::vector<BookChange>
|
||||
compute(std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
compute(std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
return HandlerImpl{}(transactions);
|
||||
}
|
||||
@@ -66,7 +67,7 @@ private:
|
||||
|
||||
public:
|
||||
[[nodiscard]] std::vector<BookChange>
|
||||
operator()(std::vector<Backend::TransactionAndMetadata> const& transactions)
|
||||
operator()(std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
for (auto const& tx : transactions)
|
||||
handleBookChange(tx);
|
||||
@@ -164,22 +165,21 @@ private:
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: use paranthesized initialization when clang catches up
|
||||
tally_[key] = {
|
||||
first, // sideAVolume
|
||||
second, // sideBVolume
|
||||
rate, // highRate
|
||||
rate, // lowRate
|
||||
rate, // openRate
|
||||
rate, // closeRate
|
||||
.sideAVolume = first,
|
||||
.sideBVolume = second,
|
||||
.highRate = rate,
|
||||
.lowRate = rate,
|
||||
.openRate = rate,
|
||||
.closeRate = rate,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleBookChange(Backend::TransactionAndMetadata const& blob)
|
||||
handleBookChange(data::TransactionAndMetadata const& blob)
|
||||
{
|
||||
auto const [tx, meta] = RPC::deserializeTxPlusMeta(blob);
|
||||
auto const [tx, meta] = rpc::deserializeTxPlusMeta(blob);
|
||||
if (!tx || !meta || !tx->isFieldPresent(ripple::sfTransactionType))
|
||||
return;
|
||||
|
||||
@@ -199,6 +199,7 @@ private:
|
||||
case ripple::ttOFFER_CREATE:
|
||||
if (tx->isFieldPresent(ripple::sfOfferSequence))
|
||||
return tx->getFieldU32(ripple::sfOfferSequence);
|
||||
[[fallthrough]];
|
||||
default:
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -206,6 +207,12 @@ private:
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Implementation of value_from for BookChange type.
|
||||
*
|
||||
* @param jv The JSON value to populate
|
||||
* @param change The BookChange to serialize
|
||||
*/
|
||||
inline void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookChange const& change)
|
||||
{
|
||||
@@ -229,7 +236,13 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookChange const
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Computes all book changes for the given ledger header and transactions.
|
||||
*
|
||||
* @param lgrInfo The ledger header
|
||||
* @param transactions The vector of transactions with heir metadata
|
||||
*/
|
||||
[[nodiscard]] boost::json::object const
|
||||
computeBookChanges(ripple::LedgerInfo const& lgrInfo, std::vector<Backend::TransactionAndMetadata> const& transactions);
|
||||
computeBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions);
|
||||
|
||||
} // namespace RPC
|
||||
} // namespace rpc
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user