Compare commits

...

34 Commits
1.0.1 ... 1.0.3

Author SHA1 Message Date
Michael Legleux
d2c870db92 Set version to 1.0.3 2022-11-17 11:06:35 -08:00
Michael Legleux
8e17039586 Build Clio with CentOS 7 2022-11-17 11:02:54 -08:00
manojsdoshi
1310e5dde9 Set version to 1.0.3-rc1 2022-11-16 11:54:09 -05:00
ledhed2222
777ae24f62 Fix issue with assigning values to NFT offers API responses (#301) 2022-09-13 15:54:55 -04:00
Alex Kremer
1ada879072 Probing ETL Source (#292)
* Implement a probing ETL source and do not require SSL certs for SslETLSource (#251)

Fixes #251
2022-09-12 23:32:13 +01:00
Alex Kremer
e2792f5a0c Fix compiler warnings (#306) 2022-09-12 21:35:30 +01:00
Alex Kremer
97c431680a Add 20 second timeout for ETLSource websocket (#297)
Fixes #289
2022-09-12 16:09:46 +01:00
Alex Kremer
0b454a2316 Implement book_changes RPC (#300)
* Port book_changes RPC call from rippled
* Refactor for readability and modern cpp
2022-09-09 18:08:11 +01:00
CJ Cobb
b7cae53fcd cleanup README and example config (#247)
* Indicate defaults for logging parameters
* Remove log_to_file from example config
* Remove online_delete from example config
2022-09-07 18:28:32 -04:00
CJ Cobb
ac45cce5bd insert delivered_amount based on close time (#252) 2022-09-07 18:28:07 -04:00
Michael Legleux
ef39c04e1e timeout for tests (#257) 2022-09-07 18:27:45 -04:00
CJ Cobb
83a099a547 Fix bug where some ledgers are not being published (#281)
* The ledger close time can occasionally be a few seconds in the future,
  which causes ETL to not publish the ledger, because the age
  calculation wraps around and the age is computed as a very large
  unsigned integer. This fix rounds to zero when the age would be
  negative
2022-09-07 16:17:42 -04:00
Alex Kremer
73337d0819 Add CONTRIBUTING documentation (#296)
Fixes #293
2022-09-06 22:30:12 +01:00
CJ Cobb
816625c44e set grpc max message size to unlimited (#249) 2022-08-23 09:30:18 -04:00
ethanlabelle
48e87d7c07 added cache hit rate to server info (#220) 2022-08-15 10:20:45 -05:00
CJ Cobb
dfe18ed682 Update version to 1.0.2 (#245) 2022-08-11 14:35:49 -04:00
Mwni
92a072d7a8 Add README section for database administration
Add remark about Scyllas default memory reservation behavior.
2022-08-11 13:10:23 -04:00
CJ Cobb
24fca61b56 update rippled to 1.9.2 (#228)
* patch rippled to build with c++20
2022-08-10 17:09:56 -04:00
Michael Legleux
ae8303fdc8 Guard for GCC < 11 and update readme (#243) 2022-08-10 15:02:44 -04:00
CJ Cobb
709a8463b8 server_info improvements (#240)
* only return counters and etl info if client is localhost
* move cache and etl info inside info
2022-08-10 15:02:31 -04:00
CJ Cobb
84d31986d1 config file improvements (#241)
* remove log_to_file param
* change the place of workers
2022-08-10 11:30:43 -04:00
Brandon Kong
d50f229631 Fixed warning message to be XRPL standard compliant (#229)
All warnings now contain Warning Objects, which have ID, Message, and Details as fields
2022-08-04 13:21:55 -04:00
Michael Legleux
379c89fb02 Change branches jobs run on
Run gha on "release" branch also
Restrict signing to release branches
2022-07-29 13:36:20 -07:00
CJ Cobb
81f7171368 wrap atomics in shared_ptr for cache download (#230) 2022-07-29 10:56:08 -04:00
Michael Legleux
629b35d1dd Sign clio packages 2022-07-28 23:02:11 -07:00
Brandon Kong
6fc4cee195 Updated backend README.md with the latest Cassandra schemas (#170)
* Updated backend README.md with the latest Cassandra schemas
2022-07-27 12:31:51 -04:00
CJ Cobb
b01813ac3d change id to object_id in diff response to ledger command (#218) 2022-07-26 14:08:54 -05:00
ledhed2222
6bf8c5bc4e Add NFT-specific data stores and add nft_info API (#98) 2022-07-26 15:01:14 -04:00
CJ Cobb
2ffd98f895 Fine tune cache download (#215)
* Fine tune cache download

* Allow operators to specify the max number of concurrent markers. The
  software generates possible markers from ledger diffs, as before, but
  only processes a specified number at one time, which caps database
  reads and distributes the load more evenly over the entire download.
* Allow operators to specify the page fetch size during the cache
  download, which is the number of ledger objects to fetch per marker at
  one time.

* Refactor full ledger dump in test.py
2022-07-26 15:00:27 -04:00
CJ Cobb
3edead32ba remove assert in fetchLedgerPage (#227) 2022-07-26 14:35:59 -04:00
Nathan Nichols
28980734ae ensure lgrInfo is in context.range (#226) 2022-07-26 14:35:48 -04:00
ethanlabelle
ce60c8f64d moved warnings array out of result JSON (#208) 2022-07-26 13:39:27 -04:00
Brandon Kong
39ef2ae33c Fixed 503 response code (#214)
The rate limiting warning response of Clio now follows the XRPL standard.
2022-07-26 13:39:09 -04:00
Nathan Nichols
d83975e750 report ledger when no marker exists in ledger_data (#203) 2022-07-15 13:25:46 -05:00
68 changed files with 3013 additions and 623 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
build/

13
.github/actions/lint/action.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
runs:
using: composite
steps:
# Github's ubuntu-20.04 image already has clang-format-11 installed
- run: |
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
shell: bash
- name: Check for differences
id: assert
shell: bash
run: |
git diff --color --exit-code | tee "clang-format.patch"

21
.github/actions/sign/action.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: 'Sign packages'
runs:
using: "composite"
steps:
- name: Sign
shell: bash
run: |
set -ex -o pipefail
echo "$GPG_KEY_B64"| base64 -d | gpg --batch --no-tty --allow-secret-key-import --import -
unset GPG_KEY_B64
export GPG_PASSPHRASE=$(echo $GPG_KEY_PASS_B64 | base64 -di)
unset GPG_KEY_PASS_B64
export GPG_KEYID=$(gpg --with-colon --list-secret-keys | head -n1 | cut -d : -f 5)
for PKG in $(ls *.deb); do
dpkg-sig \
-g "--no-tty --digest-algo 'sha512' --passphrase '${GPG_PASSPHRASE}' --pinentry-mode=loopback" \
-k "${GPG_KEYID}" \
--sign builder \
$PKG
done

6
.github/actions/test/Dockerfile vendored Normal file
View File

@@ -0,0 +1,6 @@
FROM cassandra:4.0.4
RUN apt-get update && apt-get install -y postgresql
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

8
.github/actions/test/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
pg_ctlcluster 12 main start
su postgres -c"psql -c\"alter user postgres with password 'postgres'\""
su cassandra -c "/opt/cassandra/bin/cassandra -R"
sleep 90
chmod +x ./clio_tests
./clio_tests

View File

@@ -1,9 +1,9 @@
name: Build Clio
on:
push:
branches: [master, develop, develop-next]
branches: [master, release, develop, develop-next]
pull_request:
branches: [master, develop, develop-next]
branches: [master, release, develop, develop-next]
workflow_dispatch:
jobs:
@@ -11,59 +11,132 @@ jobs:
name: Lint
runs-on: ubuntu-20.04
steps:
- name: Get source
uses: actions/checkout@v3
- uses: actions/checkout@v3
- name: Run clang-format
uses: XRPLF/clio-gha/lint@main
uses: ./.github/actions/lint
build_clio:
name: Build
name: Build Clio
runs-on: [self-hosted, Linux]
needs: lint
strategy:
fail-fast: false
matrix:
type:
- suffix: deb
image: rippleci/clio-dpkg-builder:2022-09-17
script: dpkg
- suffix: rpm
image: rippleci/clio-rpm-builder:2022-09-17
script: rpm
container:
image: ${{ matrix.type.image }}
steps:
- uses: actions/checkout@v3
with:
path: clio
- name: Clone Clio repo
- name: Clone Clio packaging repo
uses: actions/checkout@v3
with:
path: clio_src
- name: Clone Clio CI repo
uses: actions/checkout@v3
with:
path: clio_ci
repository: 'XRPLF/clio-ci'
- name: Clone GitHub actions repo
uses: actions/checkout@v3
with:
repository: XRPLF/clio-gha
path: gha # must be the same as defined in XRPLF/clio-gha
path: clio-packages
repository: XRPLF/clio-packages
- name: Build
uses: XRPLF/clio-gha/build@main
shell: bash
run: |
export CLIO_ROOT=$(realpath clio)
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
source /opt/rh/devtoolset-11/enable
fi
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
cmake --build clio-packages/build --parallel $(nproc)
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
- name: Artifact packages
uses: actions/upload-artifact@v3
with:
name: clio_${{ matrix.type.suffix }}_packages
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
- name: Artifact clio_tests
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: clio_tests
path: clio_tests
name: clio_tests-${{ matrix.type.suffix }}
path: ${{ github.workspace }}/clio_tests
- name: Artifact Debian package
sign:
name: Sign packages
needs: build_clio
runs-on: ubuntu-20.04
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/develop'
env:
GPG_KEY_B64: ${{ secrets.GPG_KEY_B64 }}
GPG_KEY_PASS_B64: ${{ secrets.GPG_KEY_PASS_B64 }}
strategy:
fail-fast: false
matrix:
type:
- suffix: deb
image: ubuntu:20.04
script: dpkg
# - suffix: rpm
# image: centos:7
# script: rpm
container:
image: ${{ matrix.type.image }}
steps:
- uses: actions/checkout@v3
- name: Install dpkg-sig
run: |
apt-get update && apt-get install -y dpkg-sig gnupg
- name: Get package artifact
uses: actions/download-artifact@v3
with:
name: clio_${{ matrix.type.suffix }}_packages
- name: find packages
run: find . -name "*.${{ matrix.type.suffix }}"
- name: Sign packages
uses: ./.github/actions/sign
- name: Verify the signature
run: |
set -e
for PKG in $(ls *.deb); do
gpg --verify "${PKG}"
done
- name: Get short SHA
id: shortsha
run: echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)"
- name: Artifact signed packages
uses: actions/upload-artifact@v2
with:
name: deb_package-${{ github.sha }}
path: clio_ci/build/*.deb
name: signed-clio-deb-packages-${{ steps.shortsha.outputs.sha8 }}
path: ${{ github.workspace }}/*.deb
test_clio:
name: Test Clio
runs-on: [self-hosted, Linux]
needs: build_clio
strategy:
fail-fast: false
matrix:
suffix: [rpm, deb]
steps:
- uses: actions/checkout@v3
- name: Get clio_tests artifact
uses: actions/download-artifact@v3
with:
name: clio_tests
name: clio_tests-${{ matrix.suffix }}
- name: Run tests
uses: XRPLF/clio-gha/test@main
timeout-minutes: 10
uses: ./.github/actions/test

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
*clio*.log
build/
.vscode
.python-version

View File

@@ -0,0 +1,24 @@
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
From: CJ Cobb <ccobb@ripple.com>
Date: Wed, 10 Aug 2022 12:30:01 -0400
Subject: [PATCH] Remove bitset operator !=
---
src/ripple/protocol/Feature.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
index b3ecb099b..6424be411 100644
--- a/src/ripple/protocol/Feature.h
+++ b/src/ripple/protocol/Feature.h
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
public:
using base::bitset;
using base::operator==;
- using base::operator!=;
using base::all;
using base::any;
--
2.32.0

View File

@@ -1,11 +1,13 @@
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
set(RIPPLED_BRANCH "1.9.0")
set(RIPPLED_BRANCH "1.9.2")
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
FetchContent_Declare(rippled
GIT_REPOSITORY "${RIPPLED_REPO}"
GIT_TAG "${RIPPLED_BRANCH}"
GIT_SHALLOW ON
PATCH_COMMAND "${patch_command}"
)
FetchContent_GetProperties(rippled)

View File

@@ -2,6 +2,10 @@ cmake_minimum_required(VERSION 3.16.3)
project(clio)
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
message(FATAL_ERROR "GCC 11+ required for building clio")
endif()
option(BUILD_TESTS "Build tests" TRUE)
option(VERBOSE "Verbose build" TRUE)
@@ -12,7 +16,7 @@ endif()
if(NOT GIT_COMMIT_HASH)
if(VERBOSE)
message(WARNING "GIT_COMMIT_HASH not provided...looking for git")
message("GIT_COMMIT_HASH not provided...looking for git")
endif()
find_package(Git)
if(Git_FOUND)
@@ -50,6 +54,8 @@ target_sources(clio PRIVATE
src/backend/SimpleCache.cpp
## ETL
src/etl/ETLSource.cpp
src/etl/ProbingETLSource.cpp
src/etl/NFTHelpers.cpp
src/etl/ReportingETL.cpp
## Subscriptions
src/subscriptions/SubscriptionManager.cpp
@@ -68,6 +74,8 @@ target_sources(clio PRIVATE
src/rpc/handlers/AccountObjects.cpp
src/rpc/handlers/GatewayBalances.cpp
src/rpc/handlers/NoRippleCheck.cpp
# NFT
src/rpc/handlers/NFTInfo.cpp
# Ledger
src/rpc/handlers/Ledger.cpp
src/rpc/handlers/LedgerData.cpp
@@ -78,6 +86,7 @@ target_sources(clio PRIVATE
src/rpc/handlers/TransactionEntry.cpp
src/rpc/handlers/AccountTx.cpp
# Dex
src/rpc/handlers/BookChanges.cpp
src/rpc/handlers/BookOffers.cpp
# NFT
src/rpc/handlers/NFTOffers.cpp

123
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,123 @@
# Contributing
Thank you for your interest in contributing to the `clio` project 🙏
To contribute, please:
1. Fork the repository under your own user.
2. Create a new branch on which to write your changes.
3. Write and test your code.
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
5. Where applicable, write test cases for your code and include those in `unittests`.
6. Ensure your code passes automated checks (e.g. clang-format)
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.). See below for more details.
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
> **Note:** Please make sure you read the [Style guide](#style-guide).
## Git commands
This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
Please note that there are more than one way to do this and these commands are only provided for your convenience.
At this point it's assumed that you have already finished working on your feature/bug.
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up to date with the main `clio` repository.
``` bash
# Create a backup of your branch
git branch <your feature branch>_bk
# Rebase and squash commits into one
git checkout develop
git pull origin develop
git checkout <your feature branch>
git rebase -i develop
```
For each commit in the list other than the first one please select `s` to squash.
After this is done you will have the opportunity to write a message for the squashed commit.
> **Hint:** Please use **imperative mood** commit message capitalizing the first word of the subject.
``` bash
# You should now have a single commit on top of a commit in `develop`
git log
```
> **Todo:** In case there are merge conflicts, please resolve them now
``` bash
# Use the same commit message as you did above
git commit -m 'Your message'
git rebase --continue
```
> **Important:** If you have no GPG keys setup please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
``` bash
# Sign the commit with your GPG key and finally push your changes to the repo
git commit --amend -S
git push --force
```
## Fixing issues found during code review
While your code is in review it's possible that some changes will be requested by the reviewer.
This section describes the process of adding your fixes.
We assume that you already made the required changes on your feature branch.
``` bash
# Add the changed code
git add <paths to add>
# Add a folded commit message (so you can squash them later)
# while also signing it with your GPG key
git commit -S -m "[FOLD] Your commit message"
# And finally push your changes
git push
```
## After code review
Last but not least, when your PR is approved you still have to `Squash and merge` your code.
Luckily there is a button for that towards the bottom of the PR's page on github.
> **Important:** Please leave the automatically generated link to PR in the subject line **and** in the description field please add `"Fixes #ISSUE_ID"` (replacing `ISSUE_ID` with yours).
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
# Style guide
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments.
## Formatting
All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
To change your code to conform use `clang-format -i <your changed files>`.
## Avoid
* Proliferation of nearly identical code.
* Proliferation of new files and classes unless it improves readability or/and compilation time.
* Unmanaged memory allocation and raw pointers.
* Macros (unless they add significant value.)
* Lambda patterns (unless these add significant value.)
* CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
* Importing new libraries unless there is a very good reason to do so.
## Seek to
* Extend functionality of existing code rather than creating new code.
* Prefer readability over terseness where important logic is concerned.
* Inline functions that are not used or are not likely to be used elsewhere in the codebase.
* Use clear and self-explanatory names for functions, variables, structs and classes.
* Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
* Provide as many comments as you feel that a competent programmer would need to understand what your code does.
# Maintainers
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
## Code Review
PRs must be reviewed by at least one of the maintainers.
## Adding and Removing
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
## Existing Maintainers
* [cjcobb23](https://github.com/cjcobb23) (Ripple)
* [natenichols](https://github.com/natenichols) (Ripple)
* [legleux](https://github.com/legleux) (Ripple)
* [undertome](https://github.com/undertome) (Ripple)
* [godexsoft](https://github.com/godexsoft) (Ripple)

View File

@@ -22,7 +22,7 @@ from which data can be extracted. The rippled node does not need to be running o
## Building
Clio is built with CMake. Clio requires c++20, and boost 1.75.0 or later.
Clio is built with CMake. Clio requires at least GCC-11 (C++20), and Boost 1.75.0 or later.
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
@@ -140,25 +140,38 @@ which can cause high latencies. A possible alternative to this is to just deploy
a database in each region, and the Clio nodes in each region use their region's database.
This is effectively two systems.
## Developing against `rippled` in standalone mode
If you wish you develop against a `rippled` instance running in standalone
mode there are a few quirks of both clio and rippled you need to keep in mind.
You must:
1. Advance the `rippled` ledger to at least ledger 256
2. Wait 10 minutes before first starting clio against this standalone node.
## Logging
Clio provides several logging options, all are configurable via the config file and are detailed below.
`log_level`: The minimum level of severity at which the log message will be outputted.
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
`log_level`: The minimum level of severity at which the log message will be outputted.
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`.
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`. Defaults to true.
`log_to_file`: Enable/disable log saving to files in persistent local storage. Options are `true`/`false`.
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it. If not specified, logs are not written to a file.
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it.
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file.
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
deleted to free up space.
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
deleted to free up space. Defaults to 50GB.
`log_rotation_hour_interval`: The time interval in **hours** after the last log rotation to automatically
rotate the current log file.
rotate the current log file. Defaults to 12 hours.
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
size-based log rotation occurs, the timer for the time-based rotation will reset.
## Cassandra / Scylla Administration
Since Clio relies on either Cassandra or Scylla for its database backend, here are some important considerations:
- Scylla, by default, will reserve all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument: https://docs.scylladb.com/getting-started/scylla-in-a-shared-environment/

49
docker/centos/Dockerfile Normal file
View File

@@ -0,0 +1,49 @@
# FROM centos:7 as deps
FROM centos:7 as build
ENV CLIO_DIR=/opt/clio/
# ENV OPENSSL_DIR=/opt/openssl
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
RUN yum install -y devtoolset-11
ENV version=3.16
ENV build=3
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
COPY docker/shared/install_cmake.sh /install_cmake.sh
RUN /install_cmake.sh 3.16.3 /usr/local
RUN source /opt/rh/devtoolset-11/enable
WORKDIR /tmp
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
RUN ./build_git_centos7.sh
RUN git clone https://github.com/openssl/openssl
WORKDIR /tmp/openssl
RUN git checkout OpenSSL_1_1_1q
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
make -j $(nproc) && \
make install_sw
WORKDIR /tmp
# FROM centos:7 as build
RUN git clone https://github.com/xrplf/clio.git
COPY docker/shared/build_boost.sh build_boost.sh
ENV OPENSSL_ROOT=/opt/local/openssl
ENV BOOST_ROOT=/boost
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
RUN yum install -y bison flex
RUN yum install -y rpmdevtools rpmlint
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
cmake -B build -DBUILD_TESTS=1 && \
cmake --build build --parallel $(nproc)
RUN mkdir output
RUN strip clio/build/clio_server && strip clio/build/clio_tests
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
RUN cp clio/example-config.json output/example-config.json
FROM centos:7
COPY --from=build /tmp/output /clio
RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -ex
GIT_VERSION="2.37.1"
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
tar zxvf git-${GIT_VERSION}.tar.gz
cd git-${GIT_VERSION}
yum install -y centos-release-scl epel-release
yum update -y
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
source /opt/rh/devtoolset-11/enable
make configure
./configure
make git -j$(nproc)
make install git
git --version | cut -d ' ' -f3

11
docker/centos/install_cmake.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -exu
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
# it's either those or link=static that halves the failures. probably link=static
BOOST_VERSION=$1
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
echo "BOOST_VERSION: ${BOOST_VERSION}"
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
mkdir -p /boost && mv boost /boost && mv stage /boost

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -ex
GIT_VERSION="2.37.1"
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
tar zxvf git-${GIT_VERSION}.tar.gz
cd git-${GIT_VERSION}
yum install -y centos-release-scl epel-release
yum update -y
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
source /opt/rh/devtoolset-11/enable
make configure
./configure
make git -j$(nproc)
make install git
git --version | cut -d ' ' -f3

View File

@@ -0,0 +1,34 @@
FROM centos:7
ENV CLIO_DIR=/opt/clio/
# ENV OPENSSL_DIR=/opt/openssl
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
RUN yum install -y devtoolset-11
ENV version=3.16
ENV build=3
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
COPY install_cmake.sh /install_cmake.sh
RUN /install_cmake.sh 3.16.3 /usr/local
RUN source /opt/rh/devtoolset-11/enable
WORKDIR /tmp
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
COPY build_git_centos7.sh build_git_centos7.sh
RUN ./build_git_centos7.sh
RUN git clone https://github.com/openssl/openssl
WORKDIR /tmp/openssl
RUN git checkout OpenSSL_1_1_1q
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
make -j $(nproc) && \
make install_sw
WORKDIR /tmp
RUN git clone https://github.com/xrplf/clio.git
COPY build_boost.sh build_boost.sh
ENV OPENSSL_ROOT=/opt/local/openssl
ENV BOOST_ROOT=/boost
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
RUN yum install -y bison flex
RUN source /opt/rh/devtoolset-11/enable && \
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)

View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

13
docker/shared/build_boost.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -exu
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
# it's either those or link=static that halves the failures. probably link=static
BOOST_VERSION=$1
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
echo "BOOST_VERSION: ${BOOST_VERSION}"
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
mkdir -p /boost && mv boost /boost && mv stage /boost

11
docker/shared/install_cmake.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
CMAKE_VERSION=${1:-"3.16.3"}
cd /tmp
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
curl -OJLs $URL
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake

View File

@@ -0,0 +1,3 @@
#!/bin/bash
set -e

24
docker/ubuntu/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM ubuntu:20.04 AS boost
RUN apt-get update && apt-get install -y build-essential
ARG BOOST_VERSION_=1_75_0
ARG BOOST_VERSION=1.75.0
COPY docker/shared/build_boost.sh .
RUN apt install -y curl
RUN ./build_boost.sh ${BOOST_VERSION}
ENV BOOST_ROOT=/boost
FROM ubuntu:20.04 AS build
ENV BOOST_ROOT=/boost
COPY --from=boost /boost /boost
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
RUN apt install -y gpg-agent
RUN wget https://apt.llvm.org/llvm.sh
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
# COPY . /clio
## Install cmake
ARG CMAKE_VERSION=3.16.3
COPY docker/shared/install_cmake.sh .
RUN ./install_cmake.sh ${CMAKE_VERSION}
ENV PATH="/opt/local/cmake/bin:$PATH"

View File

@@ -31,12 +31,10 @@
},
"log_level":"debug",
"log_to_console": true,
"log_to_file": true,
"log_directory":"./clio_log",
"log_rotation_size": 2048,
"log_directory_max_size": 51200,
"log_rotation_hour_interval": 12,
"online_delete":0,
"extractor_threads":8,
"read_only":false
}

View File

@@ -19,7 +19,6 @@ BackendInterface::writeLedgerObject(
std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}
@@ -276,7 +275,8 @@ BackendInterface::fetchLedgerPage(
else if (!outOfOrder)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " incorrect successor table. key = "
<< __func__
<< " deleted or non-existent object in successor table. key = "
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j)
@@ -284,7 +284,6 @@ BackendInterface::fetchLedgerPage(
msg << " - " << ripple::strHex(keys[j]);
}
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
assert(false);
}
}
if (keys.size() && !reachedEnd)

View File

@@ -162,12 +162,12 @@ public:
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context& yield) const = 0;
virtual AccountTransactions
virtual TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const = 0;
virtual std::vector<TransactionAndMetadata>
@@ -180,6 +180,21 @@ public:
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
// *** NFT methods
virtual std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const = 0;
virtual TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const = 0;
// *** state data methods
std::optional<Blob>
fetchLedgerObject(
@@ -285,9 +300,15 @@ public:
std::string&& transaction,
std::string&& metadata) = 0;
virtual void
writeNFTs(std::vector<NFTsData>&& data) = 0;
virtual void
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
virtual void
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
virtual void
writeSuccessor(
std::string&& key,

View File

@@ -1,7 +1,9 @@
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <backend/CassandraBackend.h>
#include <backend/DBHelpers.h>
#include <functional>
#include <unordered_map>
namespace Backend {
// Type alias for async completion handlers
@@ -178,7 +180,7 @@ CassandraBackend::doWriteLedgerObject(
if (range)
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(seq, key)),
std::make_tuple(seq, key),
[this](auto& params) {
auto& [sequence, key] = params.data;
@@ -190,7 +192,7 @@ CassandraBackend::doWriteLedgerObject(
"ledger_diff");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(blob))),
std::make_tuple(std::move(key), seq, std::move(blob)),
[this](auto& params) {
auto& [key, sequence, blob] = params.data;
@@ -215,7 +217,7 @@ CassandraBackend::writeSuccessor(
assert(successor.size() != 0);
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(successor))),
std::make_tuple(std::move(key), seq, std::move(successor)),
[this](auto& params) {
auto& [key, sequence, successor] = params.data;
@@ -234,7 +236,7 @@ CassandraBackend::writeLedger(
{
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.seq, std::move(header))),
std::make_tuple(ledgerInfo.seq, std::move(header)),
[this](auto& params) {
auto& [sequence, header] = params.data;
CassandraStatement statement{insertLedgerHeader_};
@@ -245,7 +247,7 @@ CassandraBackend::writeLedger(
"ledger");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.hash, ledgerInfo.seq)),
std::make_tuple(ledgerInfo.hash, ledgerInfo.seq),
[this](auto& params) {
auto& [hash, sequence] = params.data;
CassandraStatement statement{insertLedgerHash_};
@@ -256,6 +258,7 @@ CassandraBackend::writeLedger(
"ledger_hash");
ledgerSequence_ = ledgerInfo.seq;
}
void
CassandraBackend::writeAccountTransactions(
std::vector<AccountTransactionsData>&& data)
@@ -266,11 +269,11 @@ CassandraBackend::writeAccountTransactions(
{
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(
std::make_tuple(
std::move(account),
record.ledgerSequence,
record.transactionIndex,
record.txHash)),
record.txHash),
[this](auto& params) {
CassandraStatement statement(insertAccountTx_);
auto& [account, lgrSeq, txnIdx, hash] = params.data;
@@ -283,6 +286,31 @@ CassandraBackend::writeAccountTransactions(
}
}
}
void
CassandraBackend::writeNFTTransactions(std::vector<NFTTransactionsData>&& data)
{
for (NFTTransactionsData const& record : data)
{
makeAndExecuteAsyncWrite(
this,
std::make_tuple(
record.tokenID,
record.ledgerSequence,
record.transactionIndex,
record.txHash),
[this](auto const& params) {
CassandraStatement statement(insertNFTTx_);
auto const& [tokenID, lgrSeq, txnIdx, txHash] = params.data;
statement.bindNextBytes(tokenID);
statement.bindNextIntTuple(lgrSeq, txnIdx);
statement.bindNextBytes(txHash);
return statement;
},
"nf_token_transactions");
}
}
void
CassandraBackend::writeTransaction(
std::string&& hash,
@@ -296,7 +324,7 @@ CassandraBackend::writeTransaction(
makeAndExecuteAsyncWrite(
this,
std::move(std::make_pair(seq, hash)),
std::make_pair(seq, hash),
[this](auto& params) {
CassandraStatement statement{insertLedgerTransaction_};
statement.bindNextInt(params.data.first);
@@ -306,12 +334,12 @@ CassandraBackend::writeTransaction(
"ledger_transaction");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(
std::make_tuple(
std::move(hash),
seq,
date,
std::move(transaction),
std::move(metadata))),
std::move(metadata)),
[this](auto& params) {
CassandraStatement statement{insertTransaction_};
auto& [hash, sequence, date, transaction, metadata] = params.data;
@@ -325,6 +353,43 @@ CassandraBackend::writeTransaction(
"transaction");
}
void
CassandraBackend::writeNFTs(std::vector<NFTsData>&& data)
{
for (NFTsData const& record : data)
{
makeAndExecuteAsyncWrite(
this,
std::make_tuple(
record.tokenID,
record.ledgerSequence,
record.owner,
record.isBurned),
[this](auto const& params) {
CassandraStatement statement{insertNFT_};
auto const& [tokenID, lgrSeq, owner, isBurned] = params.data;
statement.bindNextBytes(tokenID);
statement.bindNextInt(lgrSeq);
statement.bindNextBytes(owner);
statement.bindNextBoolean(isBurned);
return statement;
},
"nf_tokens");
makeAndExecuteAsyncWrite(
this,
std::make_tuple(record.tokenID),
[this](auto const& params) {
CassandraStatement statement{insertIssuerNFT_};
auto const& [tokenID] = params.data;
statement.bindNextBytes(ripple::nft::getIssuer(tokenID));
statement.bindNextBytes(tokenID);
return statement;
},
"issuer_nf_tokens");
}
}
std::optional<LedgerRange>
CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
{
@@ -502,21 +567,119 @@ CassandraBackend::fetchAllTransactionHashesInLedger(
return hashes;
}
AccountTransactions
std::optional<NFT>
CassandraBackend::fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
CassandraStatement statement{selectNFT_};
statement.bindNextBytes(tokenID);
statement.bindNextInt(ledgerSequence);
CassandraResult response = executeAsyncRead(statement, yield);
if (!response)
return {};
NFT result;
result.tokenID = tokenID;
result.ledgerSequence = response.getUInt32();
result.owner = response.getBytes();
result.isBurned = response.getBool();
return result;
}
TransactionsAndCursor
CassandraBackend::fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const
{
auto cursor = cursorIn;
auto rng = fetchLedgerRange();
if (!rng)
return {{}, {}};
CassandraStatement statement = forward
? CassandraStatement(selectNFTTxForward_)
: CassandraStatement(selectNFTTx_);
statement.bindNextBytes(tokenID);
if (cursor)
{
statement.bindNextIntTuple(
cursor->ledgerSequence, cursor->transactionIndex);
BOOST_LOG_TRIVIAL(debug) << " token_id = " << ripple::strHex(tokenID)
<< " tuple = " << cursor->ledgerSequence
<< " : " << cursor->transactionIndex;
}
else
{
int const seq = forward ? rng->minSequence : rng->maxSequence;
int const placeHolder =
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder);
BOOST_LOG_TRIVIAL(debug)
<< " token_id = " << ripple::strHex(tokenID) << " idx = " << seq
<< " tuple = " << placeHolder;
}
statement.bindNextUInt(limit);
CassandraResult result = executeAsyncRead(statement, yield);
if (!result.hasResult())
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = result.numRows();
BOOST_LOG_TRIVIAL(info) << "num_rows = " << numRows;
do
{
hashes.push_back(result.getUInt256());
if (--numRows == 0)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " setting cursor";
auto const [lgrSeq, txnIdx] = result.getInt64Tuple();
cursor = {
static_cast<std::uint32_t>(lgrSeq),
static_cast<std::uint32_t>(txnIdx)};
if (forward)
++cursor->transactionIndex;
}
} while (result.nextRow());
auto txns = fetchTransactions(hashes, yield);
BOOST_LOG_TRIVIAL(debug) << __func__ << " txns = " << txns.size();
if (txns.size() == limit)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " returning cursor";
return {txns, cursor};
}
return {txns, {}};
}
TransactionsAndCursor
CassandraBackend::fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool const forward,
std::optional<AccountTransactionsCursor> const& cursorIn,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const
{
auto rng = fetchLedgerRange();
if (!rng)
return {{}, {}};
auto keylet = ripple::keylet::account(account);
auto cursor = cursorIn;
CassandraStatement statement = [this, forward]() {
if (forward)
return CassandraStatement{selectAccountTxForward_};
@@ -524,6 +687,7 @@ CassandraBackend::fetchAccountTransactions(
return CassandraStatement{selectAccountTx_};
}();
auto cursor = cursorIn;
statement.bindNextBytes(account);
if (cursor)
{
@@ -535,8 +699,8 @@ CassandraBackend::fetchAccountTransactions(
}
else
{
int seq = forward ? rng->minSequence : rng->maxSequence;
int placeHolder =
int const seq = forward ? rng->minSequence : rng->maxSequence;
int const placeHolder =
forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindNextIntTuple(placeHolder, placeHolder);
@@ -584,6 +748,7 @@ CassandraBackend::fetchAccountTransactions(
return {txns, {}};
}
std::optional<ripple::uint256>
CassandraBackend::doFetchSuccessorKey(
ripple::uint256 key,
@@ -895,8 +1060,8 @@ CassandraBackend::open(bool readOnly)
cass_cluster_set_credentials(
cluster, username.c_str(), getString("password").c_str());
}
int threads = getInt("threads") ? *getInt("threads")
: std::thread::hardware_concurrency();
int threads =
getInt("threads").value_or(std::thread::hardware_concurrency());
rc = cass_cluster_set_num_threads_io(cluster, threads);
if (rc != CASS_OK)
@@ -1179,6 +1344,64 @@ CassandraBackend::open(bool readOnly)
<< " LIMIT 1";
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "nf_tokens"
<< " ("
<< " token_id blob,"
<< " sequence bigint,"
<< " owner blob,"
<< " is_burned boolean,"
<< " PRIMARY KEY (token_id, sequence)"
<< " )"
<< " WITH CLUSTERING ORDER BY (sequence DESC)"
<< " AND default_time_to_live = " << ttl;
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "SELECT * FROM " << tablePrefix << "nf_tokens"
<< " LIMIT 1";
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix
<< "issuer_nf_tokens"
<< " ("
<< " issuer blob,"
<< " token_id blob,"
<< " PRIMARY KEY (issuer, token_id)"
<< " )";
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "SELECT * FROM " << tablePrefix << "issuer_nf_tokens"
<< " LIMIT 1";
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix
<< "nf_token_transactions"
<< " ("
<< " token_id blob,"
<< " seq_idx tuple<bigint, bigint>,"
<< " hash blob,"
<< " PRIMARY KEY (token_id, seq_idx)"
<< " )"
<< " WITH CLUSTERING ORDER BY (seq_idx DESC)"
<< " AND default_time_to_live = " << ttl;
if (!executeSimpleStatement(query.str()))
continue;
query.str("");
query << "SELECT * FROM " << tablePrefix << "nf_token_transactions"
<< " LIMIT 1";
if (!executeSimpleStatement(query.str()))
continue;
setupSessionAndTable = true;
}
@@ -1296,6 +1519,57 @@ CassandraBackend::open(bool readOnly)
if (!selectAccountTxForward_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "INSERT INTO " << tablePrefix << "nf_tokens"
<< " (token_id,sequence,owner,is_burned)"
<< " VALUES (?,?,?,?)";
if (!insertNFT_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "SELECT sequence,owner,is_burned"
<< " FROM " << tablePrefix << "nf_tokens WHERE"
<< " token_id = ? AND"
<< " sequence <= ?"
<< " ORDER BY sequence DESC"
<< " LIMIT 1";
if (!selectNFT_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "INSERT INTO " << tablePrefix << "issuer_nf_tokens"
<< " (issuer,token_id)"
<< " VALUES (?,?)";
if (!insertIssuerNFT_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "INSERT INTO " << tablePrefix << "nf_token_transactions"
<< " (token_id,seq_idx,hash)"
<< " VALUES (?,?,?)";
if (!insertNFTTx_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "SELECT hash,seq_idx"
<< " FROM " << tablePrefix << "nf_token_transactions WHERE"
<< " token_id = ? AND"
<< " seq_idx < ?"
<< " ORDER BY seq_idx DESC"
<< " LIMIT ?";
if (!selectNFTTx_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << "SELECT hash,seq_idx"
<< " FROM " << tablePrefix << "nf_token_transactions WHERE"
<< " token_id = ? AND"
<< " seq_idx >= ?"
<< " ORDER BY seq_idx ASC"
<< " LIMIT ?";
if (!selectNFTTxForward_.prepareStatement(query, session_.get()))
continue;
query.str("");
query << " INSERT INTO " << tablePrefix << "ledgers "
<< " (sequence, header) VALUES(?,?)";

View File

@@ -115,7 +115,7 @@ public:
throw std::runtime_error(
"CassandraStatement::bindNextBoolean - statement_ is null");
CassError rc = cass_statement_bind_bool(
statement_, 1, static_cast<cass_bool_t>(val));
statement_, curBindingIndex_, static_cast<cass_bool_t>(val));
if (rc != CASS_OK)
{
std::stringstream ss;
@@ -481,6 +481,33 @@ public:
return {first, second};
}
// TODO: should be replaced with a templated implementation as is very
// similar to other getters
bool
getBool()
{
if (!row_)
{
std::stringstream msg;
msg << __func__ << " - no result";
BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str());
}
cass_bool_t val;
CassError rc =
cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val);
if (rc != CASS_OK)
{
std::stringstream msg;
msg << __func__ << " - error getting value: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str());
}
++curGetIndex_;
return val;
}
~CassandraResult()
{
if (result_ != nullptr)
@@ -599,6 +626,12 @@ private:
CassandraPreparedStatement insertAccountTx_;
CassandraPreparedStatement selectAccountTx_;
CassandraPreparedStatement selectAccountTxForward_;
CassandraPreparedStatement insertNFT_;
CassandraPreparedStatement selectNFT_;
CassandraPreparedStatement insertIssuerNFT_;
CassandraPreparedStatement insertNFTTx_;
CassandraPreparedStatement selectNFTTx_;
CassandraPreparedStatement selectNFTTxForward_;
CassandraPreparedStatement insertLedgerHeader_;
CassandraPreparedStatement insertLedgerHash_;
CassandraPreparedStatement updateLedgerRange_;
@@ -615,9 +648,6 @@ private:
// maximum number of concurrent in flight requests. New requests will wait
// for earlier requests to finish if this limit is exceeded
std::uint32_t maxRequestsOutstanding = 10000;
// we keep this small because the indexer runs in the background, and we
// don't want the database to be swamped when the indexer is running
std::uint32_t indexerMaxRequestsOutstanding = 10;
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
// mutex and condition_variable to limit the number of concurrent in flight
@@ -683,12 +713,12 @@ public:
open_ = false;
}
AccountTransactions
TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const override;
bool
@@ -852,6 +882,20 @@ public:
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const override;
// Synchronously fetch the object with key key, as of ledger with sequence
// sequence
std::optional<Blob>
@@ -941,6 +985,9 @@ public:
writeAccountTransactions(
std::vector<AccountTransactionsData>&& data) override;
void
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
void
writeTransaction(
std::string&& hash,
@@ -949,6 +996,9 @@ public:
std::string&& transaction,
std::string&& metadata) override;
void
writeNFTs(std::vector<NFTsData>&& data) override;
void
startWrites() const override
{
@@ -1014,6 +1064,7 @@ public:
{
return numRequestsOutstanding_ < maxRequestsOutstanding;
}
inline bool
finishedAllRequests() const
{

View File

@@ -9,8 +9,8 @@
#include <backend/Pg.h>
#include <backend/Types.h>
/// Struct used to keep track of what to write to transactions and
/// account_transactions tables in Postgres
/// Struct used to keep track of what to write to
/// account_transactions/account_tx tables
struct AccountTransactionsData
{
boost::container::flat_set<ripple::AccountID> accounts;
@@ -32,6 +32,57 @@ struct AccountTransactionsData
AccountTransactionsData() = default;
};
/// Represents a link from a tx to an NFT that was targeted/modified/created
/// by it. Gets written to nf_token_transactions table and the like.
struct NFTTransactionsData
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
ripple::uint256 txHash;
NFTTransactionsData(
ripple::uint256 const& tokenID,
ripple::TxMeta const& meta,
ripple::uint256 const& txHash)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)
{
}
};
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
/// table and the like.
struct NFTsData
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
// The transaction index is only stored because we want to store only the
// final state of an NFT per ledger. Since we pull this from transactions
// we keep track of which tx index created this so we can de-duplicate, as
// it is possible for one ledger to have multiple txs that change the
// state of the same NFT.
std::uint32_t transactionIndex;
ripple::AccountID owner;
bool isBurned;
NFTsData(
ripple::uint256 const& tokenID,
ripple::AccountID const& owner,
ripple::TxMeta const& meta,
bool isBurned)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, owner(owner)
, isBurned(isBurned)
{
}
};
template <class T>
inline bool
isOffer(T const& object)

View File

@@ -833,7 +833,7 @@ PgPool::checkout()
else if (connections_ < config_.max_connections)
{
++connections_;
ret = std::make_unique<Pg>(config_, ioc_, stop_, mutex_);
ret = std::make_unique<Pg>(config_, ioc_);
}
// Otherwise, wait until a connection becomes available or we stop.
else
@@ -1680,7 +1680,6 @@ getLedger(
whichLedger,
std::shared_ptr<PgPool>& pgPool)
{
ripple::LedgerInfo lgrInfo;
std::stringstream sql;
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
"total_coins, closing_time, prev_closing_time, close_time_res, "

View File

@@ -262,8 +262,6 @@ class Pg
PgConfig const& config_;
boost::asio::io_context::strand strand_;
bool& stop_;
std::mutex& mutex_;
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
@@ -364,14 +362,9 @@ public:
*
* @param config Config parameters.
* @param j Logger object.
* @param stop Reference to connection pool's stop flag.
* @param mutex Reference to connection pool's mutex.
*/
Pg(PgConfig const& config,
boost::asio::io_context& ctx,
bool& stop,
std::mutex& mutex)
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
Pg(PgConfig const& config, boost::asio::io_context& ctx)
: config_(config), strand_(ctx)
{
}
};

View File

@@ -2,6 +2,7 @@
#include <boost/format.hpp>
#include <backend/PostgresBackend.h>
#include <thread>
namespace Backend {
// Type alias for async completion handlers
@@ -77,6 +78,12 @@ PostgresBackend::writeAccountTransactions(
}
}
void
PostgresBackend::writeNFTTransactions(std::vector<NFTTransactionsData>&& data)
{
throw std::runtime_error("Not implemented");
}
void
PostgresBackend::doWriteLedgerObject(
std::string&& key,
@@ -152,6 +159,12 @@ PostgresBackend::writeTransaction(
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
}
void
PostgresBackend::writeNFTs(std::vector<NFTsData>&& data)
{
throw std::runtime_error("Not implemented");
}
std::uint32_t
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
{
@@ -419,6 +432,15 @@ PostgresBackend::fetchAllTransactionHashesInLedger(
return {};
}
std::optional<NFT>
PostgresBackend::fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
throw std::runtime_error("Not implemented");
}
std::optional<ripple::uint256>
PostgresBackend::doFetchSuccessorKey(
ripple::uint256 key,
@@ -637,12 +659,25 @@ PostgresBackend::fetchLedgerDiff(
return {};
}
AccountTransactions
// TODO this implementation and fetchAccountTransactions should be
// generalized
TransactionsAndCursor
PostgresBackend::fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool forward,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const
{
throw std::runtime_error("Not implemented");
}
TransactionsAndCursor
PostgresBackend::fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const
{
PgQuery pgQuery(pgPool_);

View File

@@ -62,6 +62,20 @@ public:
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const override;
TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const override;
std::vector<LedgerObject>
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
@@ -87,12 +101,12 @@ public:
std::uint32_t const sequence,
boost::asio::yield_context& yield) const override;
AccountTransactions
TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<AccountTransactionsCursor> const& cursor,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const override;
void
@@ -120,10 +134,16 @@ public:
std::string&& transaction,
std::string&& metadata) override;
void
writeNFTs(std::vector<NFTsData>&& data) override;
void
writeAccountTransactions(
std::vector<AccountTransactionsData>&& data) override;
void
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override;
void
open(bool readOnly) override;

View File

@@ -1,174 +1,132 @@
The data model used by clio is different than that used by rippled.
rippled uses what is known as a SHAMap, which is a tree structure, with
actual ledger and transaction data at the leaves of the tree. Looking up a record
is a tree traversal, where the key is used to determine the path to the proper
leaf node. The path from root to leaf is used as a proof-tree on the p2p network,
where nodes can prove that a piece of data is present in a ledger by sending
the path from root to leaf. Other nodes can verify this path and be certain
that the data does actually exist in the ledger in question.
# Clio Backend
## Background
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra is the only supported database that is production-ready. However, support for more databases like PostgreSQL and DynamoDB may be added in future versions. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
clio instead flattens the data model, so lookups are O(1). This results in time
and space savings. This is possible because clio does not participate in the peer
to peer protocol, and thus does not need to verify any data. clio fully trusts the
rippled nodes that are being used as a data source.
## Data Model
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
clio uses certain features of database query languages to make this happen. Many
databases provide the necessary features to implement the clio data model. At the
time of writing, the data model is implemented in PostgreSQL and CQL (the query
language used by Apache Cassandra and ScyllaDB).
Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
The below examples are a sort of pseudo query language
There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type.
## Ledgers
**Keywords**
*Sequence*: A unique incrementing identification number used to label the different ledger versions.
*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
We store ledger headers in a ledgers table. In PostgreSQL, we store
the headers in their deserialized form, so we can look up by sequence or hash.
## Cassandra Implementation
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
In Cassandra, we store the headers as blobs. The primary table maps a ledger sequence
to the blob, and a secondary table maps a ledger hash to a ledger sequence.
In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below.
## Transactions
Transactions are stored in a very basic table, with a schema like so:
*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.*
### `ledger_transactions`
```
CREATE TABLE transactions (
hash blob,
ledger_sequence int,
transaction blob,
PRIMARY KEY(hash))
CREATE TABLE clio.ledger_transactions (
ledger_sequence bigint, # The sequence number of the ledger version
hash blob, # Hash of all the transactions on this ledger version
PRIMARY KEY (ledger_sequence, hash)
) WITH CLUSTERING ORDER BY (hash ASC) ...
```
This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order.
### `transactions`
```
The primary key is the hash.
CREATE TABLE clio.transactions (
hash blob PRIMARY KEY, # The transaction hash
date bigint, # Date of the transaction
ledger_sequence bigint, # The sequence that the transaction was validated
metadata blob, # Metadata of the transaction
transaction blob # Data of the transaction
) ...
```
This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key.
A common query pattern is fetching all transactions in a ledger. In PostgreSQL,
nothing special is needed for this. We just query:
To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
### `ledger_hashes`
```
SELECT * FROM transactions WHERE ledger_sequence = s;
CREATE TABLE clio.ledger_hashes (
hash blob PRIMARY KEY, # Hash of entire ledger version's data
sequence bigint # The sequence of the ledger version
) ...
```
This table stores the hash of all ledger versions by their sequences.
### `ledger_range`
```
Cassandra doesn't handle queries like this well, since `ledger_sequence` is not
the primary key, so we use a second table that maps a ledger sequence number
to all of the hashes in that ledger:
CREATE TABLE clio.ledger_range (
is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range
sequence bigint # The sequence number of the starting/stopping range
) ...
```
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
### `objects`
```
CREATE TABLE transaction_hashes (
ledger_sequence int,
hash blob,
PRIMARY KEY(ledger_sequence, blob))
CREATE TABLE clio.objects (
key blob, # Object index of the object
sequence bigint, # The sequence this object was last updated
object blob, # Data of the object
PRIMARY KEY (key, sequence)
) WITH CLUSTERING ORDER BY (sequence DESC) ...
```
This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
### `ledgers`
```
This table uses a compound primary key, so we can have multiple records with
the same ledger sequence but different hash. Looking up all of the transactions
in a given ledger then requires querying the transaction_hashes table to get the hashes of
all of the transactions in the ledger, and then using those hashes to query the
transactions table. Sometimes we only want the hashes though.
## Ledger data
Ledger data is more complicated than transaction data. Objects have different versions,
where applying transactions in a particular ledger changes an object with a given
key. A basic example is an account root object: the balance changes with every
transaction sent or received, though the key (object ID) for this object remains the same.
Ledger data then is modeled like so:
CREATE TABLE clio.ledgers (
sequence bigint PRIMARY KEY, # Sequence of the ledger version
header blob # Data of the header
) ...
```
This table stores the ledger header data of specific ledger versions by their sequence.
### `diff`
```
CREATE TABLE objects (
id blob,
ledger_sequence int,
object blob,
PRIMARY KEY(key,ledger_sequence))
CREATE TABLE clio.diff (
seq bigint, # Sequence of the ledger version
key blob, # Hash of changes in the ledger version
PRIMARY KEY (seq, key)
) WITH CLUSTERING ORDER BY (key ASC) ...
```
This table stores the object index of all the changes in each ledger version.
### `account_tx`
```
CREATE TABLE clio.account_tx (
account blob,
seq_idx frozen<tuple<bigint, bigint>>, # Tuple of (ledger_index, transaction_index)
hash blob, # Hash of the transaction
PRIMARY KEY (account, seq_idx)
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
```
This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received.
The `objects` table has a compound primary key. This is essential. Looking up
a ledger object as of a given ledger then is just:
### `successor`
```
SELECT object FROM objects WHERE id = ? and ledger_sequence <= ?
ORDER BY ledger_sequence DESC LIMIT 1;
```
This gives us the most recent ledger object written at or before a specified ledger.
CREATE TABLE clio.successor (
key blob, # Object index
seq bigint, # The sequnce that this ledger object's predecessor and successor was updated
next blob, # Index of the next object that existed in this sequence
PRIMARY KEY (key, seq)
) WITH CLUSTERING ORDER BY (seq ASC) ...
```
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
When a ledger object is deleted, we write a record where `object` is just an empty blob.
![Successor Table Trace Diagram](https://raw.githubusercontent.com/Shoukozumi/clio/9b2ea3efb6b164b02e9a5f0ef6717065a70f078c/src/backend/README.png)
*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace*
### Next
Generally RPCs that read ledger data will just use the above query pattern. However,
a few RPCs (`book_offers` and `ledger_data`) make use of a certain tree operation
called `successor`, which takes in an object id and ledger sequence, and returns
the id of the successor object in the ledger. This is the object in the ledger with the smallest id
greater than the input id.
This problem is quite difficult for clio's data model, since computing this
generally requires the inner nodes of the tree, which clio doesn't store. A naive
way to do this with PostgreSQL is like so:
```
SELECT * FROM objects WHERE id > ? AND ledger_sequence <= s ORDER BY id ASC, ledger_sequence DESC LIMIT 1;
```
This query is not really possible with Cassandra, unless you use ALLOW FILTERING, which
is an anti pattern (for good reason!). It would require contacting basically every node
in the entire cluster.
But even with Postgres, this query is not scalable. Why? Consider what the query
is doing at the database level. The database starts at the input id, and begins scanning
the table in ascending order of id. It needs to skip over any records that don't actually
exist in the desired ledger, which are objects that have been deleted, or objects that
were created later. As ledger history grows, this query skips over more and more records,
which results in the query taking longer and longer. The time this query takes grows
unbounded then, as ledger history just keeps growing. With under a million ledgers, this
query is usable, but as we approach 10 million ledgers are more, the query starts to become very slow.
To alleviate this issue, the data model uses a checkpointing method. We create a second
table called keys, like so:
```
CREATE TABLE keys (
ledger_sequence int,
id blob,
PRIMARY KEY(ledger_sequence, id)
)
```
However, this table does not have an entry for every ledger sequence. Instead,
this table has an entry for rougly every 1 million ledgers. We call these ledgers
flag ledgers. For each flag ledger, the keys table contains every object id in that
ledger, as well as every object id that existed in any ledger between the last flag
ledger and this one. This is a lot of keys, but not every key that ever existed (which
is what the naive attempt at implementing successor was iterating over). In this manner,
the performance is bounded. If we wanted to increase the performance of the successor operation,
we can increase the frequency of flag ledgers. However, this will use more space. 1 million
was chosen as a reasonable tradeoff to bound the performance, but not use too much space,
especially since this is only needed for two RPC calls.
We write to this table every ledger, for each new key. However, we also need to handle
keys that existed in the previous flag ledger. To do that, at each flag ledger, we
iterate through the previous flag ledger, and write any keys that are still present
in the new flag ledger. This is done asynchronously.
## Account Transactions
rippled offers a RPC called `account_tx`. This RPC returns all transactions that
affect a given account, and allows users to page backwards or forwards in time.
Generally, this is a modeled with a table like so:
```
CREATE TABLE account_tx (
account blob,
ledger_sequence int,
transaction_index int,
hash blob,
PRIMARY KEY(account,ledger_sequence,transaction_index))
```
An example of looking up from this table going backwards in time is:
```
SELECT hash FROM account_tx WHERE account = ?
AND ledger_sequence <= ? and transaction_index <= ?
ORDER BY ledger_sequence DESC, transaction_index DESC;
```
This query returns the hashes, and then we use those hashes to read from the
transactions table.
## Comments
There are various nuances around how these data models are tuned and optimized
for each database implementation. Cassandra and PostgreSQL are very different,
so some slight modifications are needed. However, the general model outlined here
is implemented by both databases, and when adding a new database, this general model
should be followed, unless there is a good reason not to. Generally, a database will be
decently similar to either PostgreSQL or Cassandra, so using those as a basis should
be sufficient.
Whatever database is used, clio requires strong consistency, and durability. For this
reason, any replication strategy needs to maintain strong consistency.
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps:
1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
2. If `v` is...
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
2. Being **modified**, do nothing.
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).

View File

@@ -53,11 +53,13 @@ SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
if (!full_)
return {};
std::shared_lock{mtx_};
successorReqCounter_++;
if (seq != latestSeq_)
return {};
auto e = map_.upper_bound(key);
if (e == map_.end())
return {};
successorHitCounter_++;
return {{e->first, e->second.blob}};
}
@@ -81,11 +83,13 @@ SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
if (seq > latestSeq_)
return {};
std::shared_lock lck{mtx_};
objectReqCounter_++;
auto e = map_.find(key);
if (e == map_.end())
return {};
if (seq < e->second.seq)
return {};
objectHitCounter_++;
return {e->second.blob};
}
@@ -117,4 +121,18 @@ SimpleCache::size() const
std::shared_lock lck{mtx_};
return map_.size();
}
float
SimpleCache::getObjectHitRate() const
{
if (!objectReqCounter_)
return 1;
return ((float)objectHitCounter_) / objectReqCounter_;
}
float
SimpleCache::getSuccessorHitRate() const
{
if (!successorReqCounter_)
return 1;
return ((float)successorHitCounter_) / successorReqCounter_;
}
} // namespace Backend

View File

@@ -18,6 +18,13 @@ class SimpleCache
Blob blob;
};
// counters for fetchLedgerObject(s) hit rate
mutable std::atomic_uint32_t objectReqCounter_;
mutable std::atomic_uint32_t objectHitCounter_;
// counters for fetchSuccessorKey hit rate
mutable std::atomic_uint32_t successorReqCounter_;
mutable std::atomic_uint32_t successorHitCounter_;
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
uint32_t latestSeq_ = 0;
@@ -62,6 +69,12 @@ public:
size_t
size() const;
float
getObjectHitRate() const;
float
getSuccessorHitRate() const;
};
} // namespace Backend

View File

@@ -1,6 +1,7 @@
#ifndef CLIO_TYPES_H_INCLUDED
#define CLIO_TYPES_H_INCLUDED
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/AccountID.h>
#include <optional>
#include <string>
#include <vector>
@@ -46,16 +47,34 @@ struct TransactionAndMetadata
}
};
struct AccountTransactionsCursor
struct TransactionsCursor
{
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
};
struct AccountTransactions
struct TransactionsAndCursor
{
std::vector<TransactionAndMetadata> txns;
std::optional<AccountTransactionsCursor> cursor;
std::optional<TransactionsCursor> cursor;
};
struct NFT
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
ripple::AccountID owner;
bool isBurned;
// clearly two tokens are the same if they have the same ID, but this
// struct stores the state of a given token at a given ledger sequence, so
// we also need to compare with ledgerSequence
bool
operator==(NFT const& other) const
{
return tokenID == other.tokenID &&
ledgerSequence == other.ledgerSequence;
}
};
struct LedgerRange

View File

@@ -23,8 +23,6 @@ class NetworkValidatedLedgers
std::condition_variable cv_;
bool stopping_ = false;
public:
static std::shared_ptr<NetworkValidatedLedgers>
make_ValidatedLedgers()
@@ -174,4 +172,4 @@ getMarkers(size_t numMarkers)
return markers;
}
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
#endif // RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED

View File

@@ -8,6 +8,7 @@
#include <boost/log/trivial.hpp>
#include <backend/DBHelpers.h>
#include <etl/ETLSource.h>
#include <etl/ProbingETLSource.h>
#include <etl/ReportingETL.h>
#include <rpc/RPCHelpers.h>
#include <thread>
@@ -72,57 +73,26 @@ ForwardCache::get(boost::json::object const& request) const
return {latestForwarded_.at(*command)};
}
// Create ETL source without grpc endpoint
// Fetch ledger and load initial ledger will fail for this source
// Primarly used in read-only mode, to monitor when ledgers are validated
template <class Derived>
ETLSourceImpl<Derived>::ETLSourceImpl(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer)
: resolver_(boost::asio::make_strand(ioContext))
, networkValidatedLedgers_(networkValidatedLedgers)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, forwardCache_(config, ioContext, *this)
, ioc_(ioContext)
, timer_(ioContext)
static boost::beast::websocket::stream_base::timeout
make_TimeoutOption()
{
if (config.contains("ip"))
// See #289 for details.
// TODO: investigate the issue and find if there is a solution other than
// introducing artificial timeouts.
if (true)
{
auto ipJs = config.at("ip").as_string();
ip_ = {ipJs.c_str(), ipJs.size()};
// The only difference between this and the suggested client role is
// that idle_timeout is set to 20 instead of none()
auto opt = boost::beast::websocket::stream_base::timeout{};
opt.handshake_timeout = std::chrono::seconds(30);
opt.idle_timeout = std::chrono::seconds(20);
opt.keep_alive_pings = false;
return opt;
}
if (config.contains("ws_port"))
else
{
auto portjs = config.at("ws_port").as_string();
wsPort_ = {portjs.c_str(), portjs.size()};
}
if (config.contains("grpc_port"))
{
auto portjs = config.at("grpc_port").as_string();
grpcPort_ = {portjs.c_str(), portjs.size()};
try
{
boost::asio::ip::tcp::endpoint endpoint{
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
std::stringstream ss;
ss << endpoint;
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateChannel(
ss.str(), grpc::InsecureChannelCredentials()));
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(debug)
<< "Exception while creating stub = " << e.what()
<< " . Remote = " << toString();
}
return boost::beast::websocket::stream_base::timeout::suggested(
boost::beast::role_type::client);
}
}
@@ -130,6 +100,12 @@ template <class Derived>
void
ETLSourceImpl<Derived>::reconnect(boost::beast::error_code ec)
{
if (paused_)
return;
if (connected_)
hooks_.onDisconnected(ec);
connected_ = false;
// These are somewhat normal errors. operation_aborted occurs on shutdown,
// when the timer is cancelled. connection_refused will occur repeatedly
@@ -198,11 +174,21 @@ PlainETLSource::close(bool startAgain)
}
closing_ = false;
if (startAgain)
{
ws_ = std::make_unique<boost::beast::websocket::stream<
boost::beast::tcp_stream>>(
boost::asio::make_strand(ioc_));
run();
}
});
}
else if (startAgain)
{
ws_ = std::make_unique<
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
boost::asio::make_strand(ioc_));
run();
}
});
@@ -297,10 +283,8 @@ PlainETLSource::onConnect(
// own timeout system
boost::beast::get_lowest_layer(derived().ws()).expires_never();
// Set suggested timeout settings for the websocket
derived().ws().set_option(
boost::beast::websocket::stream_base::timeout::suggested(
boost::beast::role_type::client));
// Set a desired timeout for the websocket stream
derived().ws().set_option(make_TimeoutOption());
// Set a decorator to change the User-Agent of the handshake
derived().ws().set_option(
@@ -341,10 +325,8 @@ SslETLSource::onConnect(
// own timeout system
boost::beast::get_lowest_layer(derived().ws()).expires_never();
// Set suggested timeout settings for the websocket
derived().ws().set_option(
boost::beast::websocket::stream_base::timeout::suggested(
boost::beast::role_type::client));
// Set a desired timeout for the websocket stream
derived().ws().set_option(make_TimeoutOption());
// Set a decorator to change the User-Agent of the handshake
derived().ws().set_option(
@@ -391,6 +373,10 @@ ETLSourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
if (auto action = hooks_.onConnected(ec);
action == ETLSourceHooks::Action::STOP)
return;
if (ec)
{
// start over
@@ -922,8 +908,6 @@ ETLSourceImpl<Derived>::fetchLedger(
"correctly on the ETL source. source = "
<< toString() << " status = " << status.error_message();
}
// BOOST_LOG_TRIVIAL(debug)
// << __func__ << " Message size = " << response.ByteSizeLong();
return {status, std::move(response)};
}
@@ -931,34 +915,18 @@ static std::unique_ptr<ETLSource>
make_ETLSource(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer)
{
std::unique_ptr<ETLSource> src = nullptr;
if (sslCtx)
{
src = std::make_unique<SslETLSource>(
config,
ioContext,
sslCtx,
backend,
subscriptions,
networkValidatedLedgers,
balancer);
}
else
{
src = std::make_unique<PlainETLSource>(
config,
ioContext,
backend,
subscriptions,
networkValidatedLedgers,
balancer);
}
auto src = std::make_unique<ProbingETLSource>(
config,
ioContext,
backend,
subscriptions,
networkValidatedLedgers,
balancer);
src->run();
@@ -968,7 +936,6 @@ make_ETLSource(
ETLLoadBalancer::ETLLoadBalancer(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl)
@@ -987,13 +954,7 @@ ETLLoadBalancer::ETLLoadBalancer(
for (auto& entry : config.at("etl_sources").as_array())
{
std::unique_ptr<ETLSource> source = make_ETLSource(
entry.as_object(),
ioContext,
sslCtx,
backend,
subscriptions,
nwvl,
*this);
entry.as_object(), ioContext, backend, subscriptions, nwvl, *this);
sources_.push_back(std::move(source));
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "

View File

@@ -16,6 +16,7 @@
class ETLLoadBalancer;
class ETLSource;
class ProbingETLSource;
class SubscriptionManager;
/// This class manages a connection to a single ETL source. This is almost
@@ -96,6 +97,12 @@ public:
virtual void
run() = 0;
virtual void
pause() = 0;
virtual void
resume() = 0;
virtual std::string
toString() const = 0;
@@ -126,6 +133,7 @@ public:
private:
friend ForwardCache;
friend ProbingETLSource;
virtual std::optional<boost::json::object>
requestFromRippled(
@@ -134,6 +142,14 @@ private:
boost::asio::yield_context& yield) const = 0;
};
struct ETLSourceHooks
{
enum class Action { STOP, PROCEED };
std::function<Action(boost::beast::error_code)> onConnected;
std::function<Action(boost::beast::error_code)> onDisconnected;
};
template <class Derived>
class ETLSourceImpl : public ETLSource
{
@@ -199,6 +215,10 @@ protected:
std::atomic_bool closing_{false};
std::atomic_bool paused_{false};
ETLSourceHooks hooks_;
void
run() override
{
@@ -215,7 +235,7 @@ protected:
public:
~ETLSourceImpl()
{
close(false);
derived().close(false);
}
bool
@@ -247,7 +267,54 @@ public:
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer);
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: resolver_(boost::asio::make_strand(ioContext))
, networkValidatedLedgers_(networkValidatedLedgers)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, forwardCache_(config, ioContext, *this)
, ioc_(ioContext)
, timer_(ioContext)
, hooks_(hooks)
{
if (config.contains("ip"))
{
auto ipJs = config.at("ip").as_string();
ip_ = {ipJs.c_str(), ipJs.size()};
}
if (config.contains("ws_port"))
{
auto portjs = config.at("ws_port").as_string();
wsPort_ = {portjs.c_str(), portjs.size()};
}
if (config.contains("grpc_port"))
{
auto portjs = config.at("grpc_port").as_string();
grpcPort_ = {portjs.c_str(), portjs.size()};
try
{
boost::asio::ip::tcp::endpoint endpoint{
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
std::stringstream ss;
ss << endpoint;
grpc::ChannelArguments chArgs;
chArgs.SetMaxReceiveMessageSize(-1);
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateCustomChannel(
ss.str(), grpc::InsecureChannelCredentials(), chArgs));
BOOST_LOG_TRIVIAL(debug)
<< "Made stub for remote = " << toString();
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(debug)
<< "Exception while creating stub = " << e.what()
<< " . Remote = " << toString();
}
}
}
/// @param sequence ledger sequence to check for
/// @return true if this source has the desired ledger
@@ -371,6 +438,22 @@ public:
void
reconnect(boost::beast::error_code ec);
/// Pause the source effectively stopping it from trying to reconnect
void
pause() override
{
paused_ = true;
derived().close(false);
}
/// Resume the source allowing it to reconnect again
void
resume() override
{
paused_ = false;
derived().close(true);
}
/// Callback
void
onResolve(
@@ -420,8 +503,16 @@ public:
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer)
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: ETLSourceImpl(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
std::move(hooks))
, ws_(std::make_unique<
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
boost::asio::make_strand(ioc)))
@@ -462,8 +553,16 @@ public:
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer)
: ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer)
ETLLoadBalancer& balancer,
ETLSourceHooks hooks)
: ETLSourceImpl(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
std::move(hooks))
, sslCtx_(sslCtx)
, ws_(std::make_unique<boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
@@ -513,7 +612,6 @@ public:
ETLLoadBalancer(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl);
@@ -522,13 +620,12 @@ public:
make_ETLLoadBalancer(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
{
return std::make_shared<ETLLoadBalancer>(
config, ioc, sslCtx, backend, subscriptions, validatedLedgers);
config, ioc, backend, subscriptions, validatedLedgers);
}
~ETLLoadBalancer()

370
src/etl/NFTHelpers.cpp Normal file
View File

@@ -0,0 +1,370 @@
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <ripple/protocol/STBase.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/TxMeta.h>
#include <vector>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <backend/Types.h>
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
// To find the minted token ID, we put all tokenIDs referenced in the
// metadata from prior to the tx application into one vector, then all
// tokenIDs referenced in the metadata from after the tx application into
// another, then find the one tokenID that was added by this tx
// application.
std::vector<ripple::uint256> prevIDs;
std::vector<ripple::uint256> finalIDs;
// The owner is not necessarily the issuer, if using authorized minter
// flow. Determine owner from the ledger object ID of the NFTokenPages
// that were changed.
std::optional<ripple::AccountID> owner;
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE)
continue;
if (!owner)
owner = ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data());
if (node.getFName() == ripple::sfCreatedNode)
{
ripple::STArray const& toAddNFTs =
node.peekAtField(ripple::sfNewFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddNFTs.begin(),
toAddNFTs.end(),
std::back_inserter(finalIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
}
// Else it's modified, as there should never be a deleted NFToken page
// as a result of a mint.
else
{
// When a mint results in splitting an existing page,
// it results in a created page and a modified node. Sometimes,
// the created node needs to be linked to a third page, resulting
// in modifying that third page's PreviousPageMin or NextPageMin
// field changing, but no NFTs within that page changing. In this
// case, there will be no previous NFTs and we need to skip.
// However, there will always be NFTs listed in the final fields,
// as rippled outputs all fields in final fields even if they were
// not changed.
ripple::STObject const& previousFields =
node.peekAtField(ripple::sfPreviousFields)
.downcast<ripple::STObject>();
if (!previousFields.isFieldPresent(ripple::sfNFTokens))
continue;
ripple::STArray const& toAddNFTs =
previousFields.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddNFTs.begin(),
toAddNFTs.end(),
std::back_inserter(prevIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
ripple::STArray const& toAddFinalNFTs =
node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
std::transform(
toAddFinalNFTs.begin(),
toAddFinalNFTs.end(),
std::back_inserter(finalIDs),
[](ripple::STObject const& nft) {
return nft.getFieldH256(ripple::sfNFTokenID);
});
}
}
std::sort(finalIDs.begin(), finalIDs.end());
std::sort(prevIDs.begin(), prevIDs.end());
std::vector<ripple::uint256> tokenIDResult;
std::set_difference(
finalIDs.begin(),
finalIDs.end(),
prevIDs.begin(),
prevIDs.end(),
std::inserter(tokenIDResult, tokenIDResult.begin()));
if (tokenIDResult.size() == 1 && owner)
return {
{NFTTransactionsData(
tokenIDResult.front(), txMeta, sttx.getTransactionID())},
NFTsData(tokenIDResult.front(), *owner, txMeta, false)};
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenMint data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
std::vector<NFTTransactionsData> const txs = {
NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())};
// Determine who owned the token when it was burned by finding an
// NFTokenPage that was deleted or modified that contains this
// tokenID.
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE ||
node.getFName() == ripple::sfCreatedNode)
continue;
// NFT burn can result in an NFTokenPage being modified to no longer
// include the target, or an NFTokenPage being deleted. If this is
// modified, we want to look for the target in the fields prior to
// modification. If deleted, it's possible that the page was modified
// to remove the target NFT prior to the entire page being deleted. In
// this case, we need to look in the PreviousFields. Otherwise, the
// page was not modified prior to deleting and we need to look in the
// FinalFields.
std::optional<ripple::STArray> prevNFTs;
if (node.isFieldPresent(ripple::sfPreviousFields))
{
ripple::STObject const& previousFields =
node.peekAtField(ripple::sfPreviousFields)
.downcast<ripple::STObject>();
if (previousFields.isFieldPresent(ripple::sfNFTokens))
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
}
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
prevNFTs = node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
if (!prevNFTs)
continue;
auto const nft = std::find_if(
prevNFTs->begin(),
prevNFTs->end(),
[&tokenID](ripple::STObject const& candidate) {
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != prevNFTs->end())
return std::make_pair(
txs,
NFTsData(
tokenID,
ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data()),
txMeta,
true));
}
std::stringstream msg;
msg << __func__ << " - could not determine owner at burntime for tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenAcceptOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
// If we have the buy offer from this tx, we can determine the owner
// more easily by just looking at the owner of the accepted NFTokenOffer
// object.
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
{
auto const affectedBuyOffer = std::find_if(
txMeta.getNodes().begin(),
txMeta.getNodes().end(),
[&sttx](ripple::STObject const& node) {
return node.getFieldH256(ripple::sfLedgerIndex) ==
sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
});
if (affectedBuyOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
ripple::uint256 const tokenID =
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
ripple::AccountID const owner =
affectedBuyOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getAccountID(ripple::sfOwner);
return {
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
NFTsData(tokenID, owner, txMeta, false)};
}
// Otherwise we have to infer the new owner from the affected nodes.
auto const affectedSellOffer = std::find_if(
txMeta.getNodes().begin(),
txMeta.getNodes().end(),
[&sttx](ripple::STObject const& node) {
return node.getFieldH256(ripple::sfLedgerIndex) ==
sttx.getFieldH256(ripple::sfNFTokenSellOffer);
});
if (affectedSellOffer == txMeta.getNodes().end())
{
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
ripple::uint256 const tokenID =
affectedSellOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
ripple::AccountID const seller =
affectedSellOffer->peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getAccountID(ripple::sfOwner);
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_PAGE ||
node.getFName() == ripple::sfDeletedNode)
continue;
ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid(
node.getFieldH256(ripple::sfLedgerIndex).data());
if (nodeOwner == seller)
continue;
ripple::STArray const& nfts = [&node] {
if (node.getFName() == ripple::sfCreatedNode)
return node.peekAtField(ripple::sfNewFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
return node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
}();
auto const nft = std::find_if(
nfts.begin(),
nfts.end(),
[&tokenID](ripple::STObject const& candidate) {
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != nfts.end())
return {
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
NFTsData(tokenID, nodeOwner, txMeta, false)};
}
std::stringstream msg;
msg << __func__ << " - unexpected NFTokenAcceptOffer data in tx "
<< sttx.getTransactionID();
throw std::runtime_error(msg.str());
}
// This is the only transaction where there can be more than 1 element in
// the returned vector, because you can cancel multiple offers in one
// transaction using this feature. This transaction also never returns an
// NFTsData because it does not change the state of an NFT itself.
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenCancelOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
std::vector<NFTTransactionsData> txs;
for (ripple::STObject const& node : txMeta.getNodes())
{
if (node.getFieldU16(ripple::sfLedgerEntryType) !=
ripple::ltNFTOKEN_OFFER)
continue;
ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldH256(ripple::sfNFTokenID);
txs.emplace_back(tokenID, txMeta, sttx.getTransactionID());
}
// Deduplicate any transactions based on tokenID/txIdx combo. Can't just
// use txIdx because in this case one tx can cancel offers for several
// NFTs.
std::sort(
txs.begin(),
txs.end(),
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
return a.tokenID < b.tokenID &&
a.transactionIndex < b.transactionIndex;
});
auto last = std::unique(
txs.begin(),
txs.end(),
[](NFTTransactionsData const& a, NFTTransactionsData const& b) {
return a.tokenID == b.tokenID &&
a.transactionIndex == b.transactionIndex;
});
txs.erase(last, txs.end());
return {txs, {}};
}
// This transaction never returns an NFTokensData because it does not
// change the state of an NFT itself.
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTokenCreateOfferData(
ripple::TxMeta const& txMeta,
ripple::STTx const& sttx)
{
return {
{NFTTransactionsData(
sttx.getFieldH256(ripple::sfNFTokenID),
txMeta,
sttx.getTransactionID())},
{}};
}
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
{
if (txMeta.getResultTER() != ripple::tesSUCCESS)
return {{}, {}};
switch (sttx.getTxnType())
{
case ripple::TxType::ttNFTOKEN_MINT:
return getNFTokenMintData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_BURN:
return getNFTokenBurnData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_ACCEPT_OFFER:
return getNFTokenAcceptOfferData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_CANCEL_OFFER:
return getNFTokenCancelOfferData(txMeta, sttx);
case ripple::TxType::ttNFTOKEN_CREATE_OFFER:
return getNFTokenCreateOfferData(txMeta, sttx);
default:
return {{}, {}};
}
}

View File

@@ -0,0 +1,190 @@
#include <etl/ProbingETLSource.h>
ProbingETLSource::ProbingETLSource(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
boost::asio::ssl::context sslCtx)
: ioc_{ioc}
, sslCtx_{std::move(sslCtx)}
, sslSrc_{make_shared<SslETLSource>(
config,
ioc,
std::ref(sslCtx_),
backend,
subscriptions,
nwvl,
balancer,
make_SSLHooks())}
, plainSrc_{make_shared<PlainETLSource>(
config,
ioc,
backend,
subscriptions,
nwvl,
balancer,
make_PlainHooks())}
{
}
void
ProbingETLSource::run()
{
sslSrc_->run();
plainSrc_->run();
}
void
ProbingETLSource::pause()
{
sslSrc_->pause();
plainSrc_->pause();
}
void
ProbingETLSource::resume()
{
sslSrc_->resume();
plainSrc_->resume();
}
bool
ProbingETLSource::isConnected() const
{
return currentSrc_ && currentSrc_->isConnected();
}
bool
ProbingETLSource::hasLedger(uint32_t sequence) const
{
if (!currentSrc_)
return false;
return currentSrc_->hasLedger(sequence);
}
boost::json::object
ProbingETLSource::toJson() const
{
if (!currentSrc_)
return {};
return currentSrc_->toJson();
}
std::string
ProbingETLSource::toString() const
{
if (!currentSrc_)
return "{ probing }";
return currentSrc_->toString();
}
bool
ProbingETLSource::loadInitialLedger(
std::uint32_t ledgerSequence,
std::uint32_t numMarkers,
bool cacheOnly)
{
if (!currentSrc_)
return false;
return currentSrc_->loadInitialLedger(
ledgerSequence, numMarkers, cacheOnly);
}
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
ProbingETLSource::fetchLedger(
uint32_t ledgerSequence,
bool getObjects,
bool getObjectNeighbors)
{
if (!currentSrc_)
return {};
return currentSrc_->fetchLedger(
ledgerSequence, getObjects, getObjectNeighbors);
}
std::optional<boost::json::object>
ProbingETLSource::forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
if (!currentSrc_)
return {};
return currentSrc_->forwardToRippled(request, clientIp, yield);
}
std::optional<boost::json::object>
ProbingETLSource::requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
if (!currentSrc_)
return {};
return currentSrc_->requestFromRippled(request, clientIp, yield);
}
ETLSourceHooks
ProbingETLSource::make_SSLHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
return ETLSourceHooks::Action::STOP;
if (!ec)
{
plainSrc_->pause();
currentSrc_ = sslSrc_;
BOOST_LOG_TRIVIAL(info)
<< "Selected WSS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;
plainSrc_->resume();
}
return ETLSourceHooks::Action::STOP;
}};
}
ETLSourceHooks
ProbingETLSource::make_PlainHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
return ETLSourceHooks::Action::STOP;
if (!ec)
{
sslSrc_->pause();
currentSrc_ = plainSrc_;
BOOST_LOG_TRIVIAL(info)
<< "Selected Plain WS as the main source: "
<< currentSrc_->toString();
}
return ETLSourceHooks::Action::PROCEED;
},
// onDisconnected
[this](auto ec) {
std::lock_guard lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;
sslSrc_->resume();
}
return ETLSourceHooks::Action::STOP;
}};
}

View File

@@ -0,0 +1,91 @@
#ifndef RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
#define RIPPLE_APP_REPORTING_PROBINGETLSOURCE_H_INCLUDED
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <etl/ETLSource.h>
#include <mutex>
/// This ETLSource implementation attempts to connect over both secure websocket
/// and plain websocket. First to connect pauses the other and the probing is
/// considered done at this point. If however the connected source loses
/// connection the probing is kickstarted again.
class ProbingETLSource : public ETLSource
{
std::mutex mtx_;
boost::asio::io_context& ioc_;
boost::asio::ssl::context sslCtx_;
std::shared_ptr<ETLSource> sslSrc_;
std::shared_ptr<ETLSource> plainSrc_;
std::shared_ptr<ETLSource> currentSrc_;
public:
ProbingETLSource(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl,
ETLLoadBalancer& balancer,
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{
boost::asio::ssl::context::tlsv12});
~ProbingETLSource() = default;
void
run() override;
void
pause() override;
void
resume() override;
bool
isConnected() const override;
bool
hasLedger(uint32_t sequence) const override;
boost::json::object
toJson() const override;
std::string
toString() const override;
bool
loadInitialLedger(
std::uint32_t ledgerSequence,
std::uint32_t numMarkers,
bool cacheOnly = false) override;
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(
uint32_t ledgerSequence,
bool getObjects = true,
bool getObjectNeighbors = false) override;
std::optional<boost::json::object>
forwardToRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
private:
std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
ETLSourceHooks
make_SSLHooks() noexcept;
ETLSourceHooks
make_PlainHooks() noexcept;
};
#endif

View File

@@ -28,12 +28,13 @@ toString(ripple::LedgerInfo const& info)
}
} // namespace detail
std::vector<AccountTransactionsData>
FormattedTransactionsData
ReportingETL::insertTransactions(
ripple::LedgerInfo const& ledger,
org::xrpl::rpc::v1::GetLedgerResponse& data)
{
std::vector<AccountTransactionsData> accountTxData;
FormattedTransactionsData result;
for (auto& txn :
*(data.mutable_transactions_list()->mutable_transactions()))
{
@@ -42,21 +43,22 @@ ReportingETL::insertTransactions(
ripple::SerialIter it{raw->data(), raw->size()};
ripple::STTx sttx{it};
auto txSerializer =
std::make_shared<ripple::Serializer>(sttx.getSerializer());
ripple::TxMeta txMeta{
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
auto metaSerializer = std::make_shared<ripple::Serializer>(
txMeta.getAsObject().getSerializer());
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "Inserting transaction = " << sttx.getTransactionID();
ripple::TxMeta txMeta{
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
auto const [nftTxs, maybeNFT] = getNFTData(txMeta, sttx);
result.nfTokenTxData.insert(
result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end());
if (maybeNFT)
result.nfTokensData.push_back(*maybeNFT);
auto journal = ripple::debugLog();
accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
result.accountTxData.emplace_back(
txMeta, sttx.getTransactionID(), journal);
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
backend_->writeTransaction(
std::move(keyStr),
@@ -65,7 +67,27 @@ ReportingETL::insertTransactions(
std::move(*raw),
std::move(*txn.mutable_metadata_blob()));
}
return accountTxData;
// Remove all but the last NFTsData for each id. unique removes all
// but the first of a group, so we want to reverse sort by transaction
// index
std::sort(
result.nfTokensData.begin(),
result.nfTokensData.end(),
[](NFTsData const& a, NFTsData const& b) {
return a.tokenID > b.tokenID &&
a.transactionIndex > b.transactionIndex;
});
// Now we can unique the NFTs by tokenID.
auto last = std::unique(
result.nfTokensData.begin(),
result.nfTokensData.end(),
[](NFTsData const& a, NFTsData const& b) {
return a.tokenID == b.tokenID;
});
result.nfTokensData.erase(last, result.nfTokensData.end());
return result;
}
std::optional<ripple::LedgerInfo>
@@ -106,7 +128,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
lgrInfo, std::move(*ledgerData->mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " wrote ledger";
std::vector<AccountTransactionsData> accountTxData =
FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, *ledgerData);
BOOST_LOG_TRIVIAL(debug) << __func__ << " inserted txns";
@@ -119,8 +141,12 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
BOOST_LOG_TRIVIAL(debug) << __func__ << " loaded initial ledger";
if (!stopping_)
backend_->writeAccountTransactions(std::move(accountTxData));
{
backend_->writeAccountTransactions(
std::move(insertTxResult.accountTxData));
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
}
backend_->finishWrites(startingSequence);
auto end = std::chrono::system_clock::now();
@@ -511,15 +537,15 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
<< __func__ << " : "
<< "Inserted/modified/deleted all objects. Number of objects = "
<< rawData.ledger_objects().objects_size();
std::vector<AccountTransactionsData> accountTxData{
insertTransactions(lgrInfo, rawData)};
FormattedTransactionsData insertTxResult =
insertTransactions(lgrInfo, rawData);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Inserted all transactions. Number of transactions = "
<< rawData.transactions_list().transactions_size();
backend_->writeAccountTransactions(std::move(accountTxData));
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote account_tx";
auto start = std::chrono::system_clock::now();
@@ -668,8 +694,6 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
beast::setCurrentThreadName("rippled: ReportingETL transform");
uint32_t currentSequence = startSequence;
auto begin = std::chrono::system_clock::now();
while (!writeConflict)
{
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
@@ -914,7 +938,7 @@ ReportingETL::loadCache(uint32_t seq)
a.insert(std::end(a), std::begin(b), std::end(b));
};
for (size_t i = 0; i < numDiffs_; ++i)
for (size_t i = 0; i < numCacheDiffs_; ++i)
{
append(diff, Backend::synchronousAndRetryOnTimeout([&](auto yield) {
return backend_->fetchLedgerDiff(seq - i, yield);
@@ -949,55 +973,74 @@ ReportingETL::loadCache(uint32_t seq)
<< "Loading cache. num cursors = " << cursors.size() - 1;
BOOST_LOG_TRIVIAL(debug) << __func__ << " cursors = " << cursorStr.str();
std::atomic_uint* numRemaining = new std::atomic_uint{cursors.size() - 1};
auto startTime = std::chrono::system_clock::now();
for (size_t i = 0; i < cursors.size() - 1; ++i)
{
std::optional<ripple::uint256> start = cursors[i];
std::optional<ripple::uint256> end = cursors[i + 1];
boost::asio::spawn(
ioContext_,
[this, seq, start, end, numRemaining, startTime](
boost::asio::yield_context yield) {
std::optional<ripple::uint256> cursor = start;
while (true)
{
auto res =
Backend::retryOnTimeout([this, seq, &cursor, &yield]() {
return backend_->fetchLedgerPage(
cursor, seq, 256, false, yield);
});
backend_->cache().update(res.objects, seq, true);
if (!res.cursor || (end && *(res.cursor) > *end))
break;
cacheDownloader_ = std::thread{[this, seq, cursors]() {
auto startTime = std::chrono::system_clock::now();
auto markers = std::make_shared<std::atomic_int>(0);
auto numRemaining =
std::make_shared<std::atomic_int>(cursors.size() - 1);
for (size_t i = 0; i < cursors.size() - 1; ++i)
{
std::optional<ripple::uint256> start = cursors[i];
std::optional<ripple::uint256> end = cursors[i + 1];
markers->wait(numCacheMarkers_);
++(*markers);
boost::asio::spawn(
ioContext_,
[this, seq, start, end, numRemaining, startTime, markers](
boost::asio::yield_context yield) {
std::optional<ripple::uint256> cursor = start;
std::string cursorStr = cursor.has_value()
? ripple::strHex(cursor.value())
: ripple::strHex(Backend::firstKey);
BOOST_LOG_TRIVIAL(debug)
<< "Loading cache. cache size = "
<< backend_->cache().size()
<< " - cursor = " << ripple::strHex(res.cursor.value());
cursor = std::move(res.cursor);
}
if (--(*numRemaining) == 0)
{
auto endTime = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::seconds>(
endTime - startTime);
BOOST_LOG_TRIVIAL(info)
<< "Finished loading cache. cache size = "
<< backend_->cache().size() << ". Took "
<< duration.count() << " seconds";
backend_->cache().setFull();
delete numRemaining;
}
else
{
BOOST_LOG_TRIVIAL(info)
<< "Finished a cursor. num remaining = "
<< *numRemaining;
}
});
}
<< "Starting a cursor: " << cursorStr
<< " markers = " << *markers;
while (!stopping_)
{
auto res = Backend::retryOnTimeout([this,
seq,
&cursor,
&yield]() {
return backend_->fetchLedgerPage(
cursor, seq, cachePageFetchSize_, false, yield);
});
backend_->cache().update(res.objects, seq, true);
if (!res.cursor || (end && *(res.cursor) > *end))
break;
BOOST_LOG_TRIVIAL(debug)
<< "Loading cache. cache size = "
<< backend_->cache().size() << " - cursor = "
<< ripple::strHex(res.cursor.value())
<< " start = " << cursorStr
<< " markers = " << *markers;
cursor = std::move(res.cursor);
}
--(*markers);
markers->notify_one();
if (--(*numRemaining) == 0)
{
auto endTime = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::seconds>(
endTime - startTime);
BOOST_LOG_TRIVIAL(info)
<< "Finished loading cache. cache size = "
<< backend_->cache().size() << ". Took "
<< duration.count() << " seconds";
backend_->cache().setFull();
}
else
{
BOOST_LOG_TRIVIAL(info)
<< "Finished a cursor. num remaining = "
<< *numRemaining << " start = " << cursorStr
<< " markers = " << *markers;
}
});
}
}};
// If loading synchronously, poll cache until full
while (cacheLoadStyle_ == CacheLoadStyle::SYNC &&
!backend_->cache().isFull())
@@ -1107,9 +1150,12 @@ ReportingETL::ReportingETL(
if (entry == "none" || entry == "no")
cacheLoadStyle_ = CacheLoadStyle::NOT_AT_ALL;
}
if (cache.contains("num_diffs") && cache.at("num_diffs").as_int64())
{
numDiffs_ = cache.at("num_diffs").as_int64();
}
if (cache.contains("num_diffs") && cache.at("num_diffs").is_int64())
numCacheDiffs_ = cache.at("num_diffs").as_int64();
if (cache.contains("num_markers") && cache.at("num_markers").is_int64())
numCacheMarkers_ = cache.at("num_markers").as_int64();
if (cache.contains("page_fetch_size") &&
cache.at("page_fetch_size").is_int64())
cachePageFetchSize_ = cache.at("page_fetch_size").as_int64();
}
}

View File

@@ -19,7 +19,22 @@
#include <chrono>
/**
* Helper function for the ReportingETL, implemented in NFTHelpers.cpp, to
* pull to-write data out of a transaction that relates to NFTs.
*/
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
getNFTData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
struct AccountTransactionsData;
struct NFTTransactionsData;
struct NFTsData;
struct FormattedTransactionsData
{
std::vector<AccountTransactionsData> accountTxData;
std::vector<NFTTransactionsData> nfTokenTxData;
std::vector<NFTsData> nfTokensData;
};
class SubscriptionManager;
/**
@@ -52,7 +67,15 @@ private:
// number of diffs to use to generate cursors to traverse the ledger in
// parallel during initial cache download
size_t numDiffs_ = 1;
size_t numCacheDiffs_ = 32;
// number of markers to use at one time to traverse the ledger in parallel
// during initial cache download
size_t numCacheMarkers_ = 48;
// number of ledger objects to fetch concurrently per marker during cache
// download
size_t cachePageFetchSize_ = 512;
// thread responsible for syncing the cache on startup
std::thread cacheDownloader_;
std::thread worker_;
boost::asio::io_context& ioContext_;
@@ -86,18 +109,6 @@ private:
// deletion
std::atomic_bool deleting_ = false;
/// Used to determine when to write to the database during the initial
/// ledger download. By default, the software downloads an entire ledger and
/// then writes to the database. If flushInterval_ is non-zero, the software
/// will write to the database as new ledger data (SHAMap leaf nodes)
/// arrives. It is not neccesarily more effient to write the data as it
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
/// nodes; flushing prematurely can result in the same SHAMap inner node
/// being written to the database more than once. It is recommended to use
/// the default value of 0 for this variable; however, different values can
/// be experimented with if better performance is desired.
size_t flushInterval_ = 0;
/// This variable controls the number of GetLedgerData calls that will be
/// executed in parallel during the initial ledger download. GetLedgerData
/// allows clients to page through a ledger over many RPC calls.
@@ -123,7 +134,6 @@ private:
std::optional<uint32_t> startSequence_;
std::optional<uint32_t> finishSequence_;
size_t accumTxns_ = 0;
size_t txnThreshold_ = 0;
/// The time that the most recently published ledger was published. Used by
@@ -213,14 +223,16 @@ private:
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedgerDataAndDiff(uint32_t sequence);
/// Insert all of the extracted transactions into the ledger
/// Insert all of the extracted transactions into the ledger, returning
/// transactions related to accounts, transactions related to NFTs, and
/// NFTs themselves for later processsing.
/// @param ledger ledger to insert transactions into
/// @param data data extracted from an ETL source
/// @return struct that contains the neccessary info to write to the
/// transctions and account_transactions tables in Postgres (mostly
/// transaction hashes, corresponding nodestore hashes and affected
/// account_transactions/account_tx and nft_token_transactions tables
/// (mostly transaction hashes, corresponding nodestore hashes and affected
/// accounts)
std::vector<AccountTransactionsData>
FormattedTransactionsData
insertTransactions(
ripple::LedgerInfo const& ledger,
org::xrpl::rpc::v1::GetLedgerResponse& data);
@@ -313,6 +325,8 @@ public:
if (worker_.joinable())
worker_.join();
if (cacheDownloader_.joinable())
cacheDownloader_.join();
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
}
@@ -355,6 +369,8 @@ public:
std::chrono::system_clock::now().time_since_epoch())
.count();
auto closeTime = lastCloseTime_.time_since_epoch().count();
if (now < (rippleEpochStart + closeTime))
return 0;
return now - (rippleEpochStart + closeTime);
}
};

View File

@@ -12,7 +12,7 @@ namespace Build {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "1.0.1"
char const* const versionString = "1.0.3"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

View File

@@ -115,8 +115,7 @@ initLogging(boost::json::object const& config)
{
boost::log::add_console_log(std::cout, keywords::format = format);
}
if (config.contains("log_to_file") && config.at("log_to_file").as_bool() &&
config.contains("log_directory"))
if (config.contains("log_directory"))
{
if (!config.at("log_directory").is_string())
throw std::runtime_error("log directory must be a string");
@@ -273,7 +272,7 @@ main(int argc, char* argv[])
// The balancer itself publishes to streams (transactions_proposed and
// accounts_proposed)
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
*config, ioc, ctxRef, backend, subscriptions, ledgers);
*config, ioc, backend, subscriptions, ledgers);
// ETL is responsible for writing and publishing to streams. In read-only
// mode, ETL only publishes

View File

@@ -44,7 +44,10 @@ doChannelAuthorize(Context const& context);
Result
doChannelVerify(Context const& context);
// offers methods
// book methods
Result
doBookChanges(Context const& context);
Result
doBookOffers(Context const& context);
@@ -55,6 +58,9 @@ doNFTBuyOffers(Context const& context);
Result
doNFTSellOffers(Context const& context);
Result
doNFTInfo(Context const& context);
// ledger methods
Result
doLedger(Context const& context);

View File

@@ -91,6 +91,38 @@ make_HttpContext(
clientIp};
}
constexpr static WarningInfo warningInfos[]{
{warnUNKNOWN, "Unknown warning"},
{warnRPC_CLIO,
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request"},
{warnRPC_OUTDATED, "This server may be out of date"},
{warnRPC_RATE_LIMIT, "You are about to be rate limited"}};
WarningInfo const&
get_warning_info(warning_code code)
{
for (WarningInfo const& info : warningInfos)
{
if (info.code == code)
{
return info;
}
}
throw(std::out_of_range("Invalid warning_code"));
}
boost::json::object
make_warning(warning_code code)
{
boost::json::object json;
WarningInfo const& info(get_warning_info(code));
json["id"] = code;
json["message"] = static_cast<std::string>(info.message);
return json;
}
boost::json::object
make_error(Error err)
{
@@ -187,10 +219,12 @@ static HandlerTable handlerTable{
{"account_tx", &doAccountTx, LimitRange{1, 50, 100}},
{"gateway_balances", &doGatewayBalances, {}},
{"noripple_check", &doNoRippleCheck, {}},
{"book_changes", &doBookChanges, {}},
{"book_offers", &doBookOffers, LimitRange{1, 50, 100}},
{"ledger", &doLedger, {}},
{"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}},
{"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}},
{"nft_info", &doNFTInfo},
{"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}},
{"ledger_entry", &doLedgerEntry, {}},
{"ledger_range", &doLedgerRange, {}},

View File

@@ -162,6 +162,33 @@ public:
}
};
enum warning_code {
warnUNKNOWN = -1,
warnRPC_CLIO = 2001,
warnRPC_OUTDATED = 2002,
warnRPC_RATE_LIMIT = 2003
};
struct WarningInfo
{
constexpr WarningInfo() : code(warnUNKNOWN), message("unknown warning")
{
}
constexpr WarningInfo(warning_code code_, char const* message_)
: code(code_), message(message_)
{
}
warning_code code;
std::string_view const message;
};
WarningInfo const&
get_warning_info(warning_code code);
boost::json::object
make_warning(warning_code code);
boost::json::object
make_error(Status const& status);

View File

@@ -296,7 +296,8 @@ std::optional<ripple::STAmount>
getDeliveredAmount(
std::shared_ptr<ripple::STTx const> const& txn,
std::shared_ptr<ripple::TxMeta const> const& meta,
std::uint32_t const ledgerSequence)
std::uint32_t const ledgerSequence,
uint32_t date)
{
if (meta->hasDeliveredAmount())
return meta->getDeliveredAmount();
@@ -312,7 +313,7 @@ getDeliveredAmount(
// then its absence indicates that the amount delivered is listed in the
// Amount field. DeliveredAmount went live January 24, 2014.
// 446000000 is in Feb 2014, well after DeliveredAmount went live
if (ledgerSequence >= 4594095)
if (ledgerSequence >= 4594095 || date > 446000000)
{
return txn->getFieldAmount(ripple::sfAmount);
}
@@ -458,7 +459,7 @@ toExpandedJson(Backend::TransactionAndMetadata const& blobs)
auto [txn, meta] = deserializeTxPlusMeta(blobs, blobs.ledgerSequence);
auto txnJson = toJson(*txn);
auto metaJson = toJson(*meta);
insertDeliveredAmount(metaJson, txn, meta);
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
return {txnJson, metaJson};
}
@@ -466,11 +467,12 @@ bool
insertDeliveredAmount(
boost::json::object& metaJson,
std::shared_ptr<ripple::STTx const> const& txn,
std::shared_ptr<ripple::TxMeta const> const& meta)
std::shared_ptr<ripple::TxMeta const> const& meta,
uint32_t date)
{
if (canHaveDeliveredAmount(txn, meta))
{
if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq()))
if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq(), date))
metaJson["delivered_amount"] =
toBoostJson(amt->getJson(ripple::JsonOptions::include_date));
else
@@ -569,7 +571,7 @@ ledgerInfoFromRequest(Context const& ctx)
auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield);
if (!lgrInfo)
if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence)
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
return *lgrInfo;
@@ -604,7 +606,7 @@ ledgerInfoFromRequest(Context const& ctx)
auto lgrInfo =
ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield);
if (!lgrInfo)
if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence)
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
return *lgrInfo;

View File

@@ -62,7 +62,8 @@ bool
insertDeliveredAmount(
boost::json::object& metaJson,
std::shared_ptr<ripple::STTx const> const& txn,
std::shared_ptr<ripple::TxMeta const> const& meta);
std::shared_ptr<ripple::TxMeta const> const& meta,
uint32_t date);
boost::json::object
toJson(ripple::STBase const& obj);

View File

@@ -54,10 +54,8 @@ doAccountInfo(Context const& context)
auto key = ripple::keylet::account(accountID.value());
auto start = std::chrono::system_clock::now();
std::optional<std::vector<unsigned char>> dbResponse =
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse)
{

View File

@@ -19,7 +19,7 @@ doAccountTx(Context const& context)
bool const binary = getBool(request, JS(binary), false);
bool const forward = getBool(request, JS(forward), false);
std::optional<Backend::AccountTransactionsCursor> cursor;
std::optional<Backend::TransactionsCursor> cursor;
if (request.contains(JS(marker)))
{

View File

@@ -0,0 +1,250 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/ToString.h>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
#include <boost/json.hpp>
#include <algorithm>
namespace json = boost::json;
using namespace ripple;
namespace RPC {
struct BookChange
{
STAmount sideAVolume;
STAmount sideBVolume;
STAmount highRate;
STAmount lowRate;
STAmount openRate;
STAmount closeRate;
};
class BookChangesHandler
{
std::reference_wrapper<Context const> context_;
std::map<std::string, BookChange> tally_ = {};
std::optional<uint32_t> offerCancel_ = {};
public:
~BookChangesHandler() = default;
explicit BookChangesHandler(Context const& context)
: context_{std::cref(context)}
{
}
BookChangesHandler(BookChangesHandler const&) = delete;
BookChangesHandler(BookChangesHandler&&) = delete;
BookChangesHandler&
operator=(BookChangesHandler const&) = delete;
BookChangesHandler&
operator=(BookChangesHandler&&) = delete;
/**
* @brief Handles the `book_change` request for given transactions
*
* @param transactions The transactions to compute changes for
* @return std::vector<BookChange> The changes
*/
std::vector<BookChange>
handle(LedgerInfo const& ledger)
{
reset();
for (auto const transactions =
context_.get().backend->fetchAllTransactionsInLedger(
ledger.seq, context_.get().yield);
auto const& tx : transactions)
{
handleBookChange(tx);
}
// TODO: rewrite this with std::ranges when compilers catch up
std::vector<BookChange> changes;
std::transform(
std::make_move_iterator(std::begin(tally_)),
std::make_move_iterator(std::end(tally_)),
std::back_inserter(changes),
[](auto obj) { return obj.second; });
return changes;
}
private:
inline void
reset() noexcept
{
tally_.clear();
offerCancel_ = std::nullopt;
}
void
handleAffectedNode(STObject const& node)
{
auto const& metaType = node.getFName();
auto const nodeType = node.getFieldU16(sfLedgerEntryType);
// we only care about ltOFFER objects being modified or
// deleted
if (nodeType != ltOFFER || metaType == sfCreatedNode)
return;
// if either FF or PF are missing we can't compute
// but generally these are cancelled rather than crossed
// so skipping them is consistent
if (!node.isFieldPresent(sfFinalFields) ||
!node.isFieldPresent(sfPreviousFields))
return;
auto const& finalFields =
node.peekAtField(sfFinalFields).downcast<STObject>();
auto const& previousFields =
node.peekAtField(sfPreviousFields).downcast<STObject>();
// defensive case that should never be hit
if (!finalFields.isFieldPresent(sfTakerGets) ||
!finalFields.isFieldPresent(sfTakerPays) ||
!previousFields.isFieldPresent(sfTakerGets) ||
!previousFields.isFieldPresent(sfTakerPays))
return;
// filter out any offers deleted by explicit offer cancels
if (metaType == sfDeletedNode && offerCancel_ &&
finalFields.getFieldU32(sfSequence) == *offerCancel_)
return;
// compute the difference in gets and pays actually
// affected onto the offer
auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) -
previousFields.getFieldAmount(sfTakerGets);
auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) -
previousFields.getFieldAmount(sfTakerPays);
auto const g = to_string(deltaGets.issue());
auto const p = to_string(deltaPays.issue());
auto const noswap =
isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p));
auto first = noswap ? deltaGets : deltaPays;
auto second = noswap ? deltaPays : deltaGets;
// defensively programmed, should (probably) never happen
if (second == beast::zero)
return;
auto const rate = divide(first, second, noIssue());
if (first < beast::zero)
first = -first;
if (second < beast::zero)
second = -second;
auto const key = noswap ? (g + '|' + p) : (p + '|' + g);
if (tally_.contains(key))
{
auto& entry = tally_.at(key);
entry.sideAVolume += first;
entry.sideBVolume += second;
if (entry.highRate < rate)
entry.highRate = rate;
if (entry.lowRate > rate)
entry.lowRate = rate;
entry.closeRate = rate;
}
else
{
// TODO: use paranthesized initialization when clang catches up
tally_[key] = {
first, // sideAVolume
second, // sideBVolume
rate, // highRate
rate, // lowRate
rate, // openRate
rate, // closeRate
};
}
}
void
handleBookChange(Backend::TransactionAndMetadata const& blob)
{
auto const [tx, meta] = deserializeTxPlusMeta(blob);
if (!tx || !meta || !tx->isFieldPresent(sfTransactionType))
return;
offerCancel_ = shouldCancelOffer(tx);
for (auto const& node : meta->getFieldArray(sfAffectedNodes))
handleAffectedNode(node);
}
std::optional<uint32_t>
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
{
switch (tx->getFieldU16(sfTransactionType))
{
// in future if any other ways emerge to cancel an offer
// this switch makes them easy to add
case ttOFFER_CANCEL:
case ttOFFER_CREATE:
if (tx->isFieldPresent(sfOfferSequence))
return tx->getFieldU32(sfOfferSequence);
default:
return std::nullopt;
}
}
};
void
tag_invoke(
const json::value_from_tag&,
json::value& jv,
BookChange const& change)
{
auto amountStr = [](STAmount const& amount) -> std::string {
return isXRP(amount) ? to_string(amount.xrp())
: to_string(amount.iou());
};
auto currencyStr = [](STAmount const& amount) -> std::string {
return isXRP(amount) ? "XRP_drops" : to_string(amount.issue());
};
jv = {
{JS(currency_a), currencyStr(change.sideAVolume)},
{JS(currency_b), currencyStr(change.sideBVolume)},
{JS(volume_a), amountStr(change.sideAVolume)},
{JS(volume_b), amountStr(change.sideBVolume)},
{JS(high), to_string(change.highRate.iou())},
{JS(low), to_string(change.lowRate.iou())},
{JS(open), to_string(change.openRate.iou())},
{JS(close), to_string(change.closeRate.iou())},
};
}
Result
doBookChanges(Context const& context)
{
auto const request = context.params;
auto const info = ledgerInfoFromRequest(context);
if (auto const status = std::get_if<Status>(&info))
return *status;
auto const lgrInfo = std::get<ripple::LedgerInfo>(info);
auto const changes = BookChangesHandler{context}.handle(lgrInfo);
return json::object{
{JS(type), "bookChanges"},
{JS(ledger_index), lgrInfo.seq},
{JS(ledger_hash), to_string(lgrInfo.hash)},
{JS(ledger_time), lgrInfo.closeTime.time_since_epoch().count()},
{JS(changes), json::value_from(changes)},
};
}
} // namespace RPC

View File

@@ -132,7 +132,7 @@ doLedger(Context const& context)
for (auto const& obj : diff)
{
boost::json::object entry;
entry[JS(id)] = ripple::strHex(obj.key);
entry["object_id"] = ripple::strHex(obj.key);
if (binary)
entry["object"] = ripple::strHex(obj.blob);
else if (obj.blob.size())

View File

@@ -79,7 +79,7 @@ doLedgerData(Context const& context)
boost::json::object header;
// no marker means this is the first call, so we return header info
if (!marker)
if (!request.contains(JS(marker)))
{
if (binary)
{
@@ -106,9 +106,9 @@ doLedgerData(Context const& context)
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
response[JS(ledger)] = header;
}
response[JS(ledger)] = header;
}
else
{

View File

@@ -351,10 +351,8 @@ doLedgerEntry(Context const& context)
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
}
auto start = std::chrono::system_clock::now();
auto dbResponse =
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse or dbResponse->size() == 0)
return Status{"entryNotFound"};

View File

@@ -0,0 +1,146 @@
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <ripple/protocol/Indexes.h>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
#include <rpc/RPCHelpers.h>
// {
// nft_id: <ident>
// ledger_hash: <ledger>
// ledger_index: <ledger_index>
// }
namespace RPC {
std::variant<std::monostate, std::string, Status>
getURI(Backend::NFT const& dbResponse, Context const& context)
{
// Fetch URI from ledger
// The correct page will be > bookmark and <= last. We need to calculate
// the first possible page however, since bookmark is not guaranteed to
// exist.
auto const bookmark = ripple::keylet::nftpage(
ripple::keylet::nftpage_min(dbResponse.owner), dbResponse.tokenID);
auto const last = ripple::keylet::nftpage_max(dbResponse.owner);
ripple::uint256 nextKey = last.key;
std::optional<ripple::STLedgerEntry> sle;
// when this loop terminates, `sle` will contain the correct page for
// this NFT.
//
// 1) We start at the last NFTokenPage, which is guaranteed to exist,
// grab the object from the DB and deserialize it.
//
// 2) If that NFTokenPage has a PreviousPageMin value and the
// PreviousPageMin value is > bookmark, restart loop. Otherwise
// terminate and use the `sle` from this iteration.
do
{
auto const blob = context.backend->fetchLedgerObject(
ripple::Keylet(ripple::ltNFTOKEN_PAGE, nextKey).key,
dbResponse.ledgerSequence,
context.yield);
if (!blob || blob->size() == 0)
return Status{
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
sle = ripple::STLedgerEntry(
ripple::SerialIter{blob->data(), blob->size()}, nextKey);
if (sle->isFieldPresent(ripple::sfPreviousPageMin))
nextKey = sle->getFieldH256(ripple::sfPreviousPageMin);
} while (sle && sle->key() != nextKey && nextKey > bookmark.key);
if (!sle)
return Status{
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
auto const nfts = sle->getFieldArray(ripple::sfNFTokens);
auto const nft = std::find_if(
nfts.begin(),
nfts.end(),
[&dbResponse](ripple::STObject const& candidate) {
return candidate.getFieldH256(ripple::sfNFTokenID) ==
dbResponse.tokenID;
});
if (nft == nfts.end())
return Status{
Error::rpcINTERNAL, "Cannot find NFTokenPage for this NFT"};
ripple::Blob const uriField = nft->getFieldVL(ripple::sfURI);
// NOTE this cannot use a ternary or value_or because then the
// expression's type is unclear. We want to explicitly set the `uri`
// field to null when not present to avoid any confusion.
if (std::string const uri = std::string(uriField.begin(), uriField.end());
uri.size() > 0)
return uri;
return std::monostate{};
}
Result
doNFTInfo(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
if (!request.contains("nft_id"))
return Status{Error::rpcINVALID_PARAMS, "Missing nft_id"};
auto const& jsonTokenID = request.at("nft_id");
if (!jsonTokenID.is_string())
return Status{Error::rpcINVALID_PARAMS, "nft_id is not a string"};
ripple::uint256 tokenID;
if (!tokenID.parseHex(jsonTokenID.as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "Malformed nft_id"};
// We only need to fetch the ledger header because the ledger hash is
// supposed to be included in the response. The ledger sequence is specified
// in the request
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
ripple::LedgerInfo lgrInfo = std::get<ripple::LedgerInfo>(v);
std::optional<Backend::NFT> dbResponse =
context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield);
if (!dbResponse)
return Status{Error::rpcOBJECT_NOT_FOUND, "NFT not found"};
response["nft_id"] = ripple::strHex(dbResponse->tokenID);
response["ledger_index"] = dbResponse->ledgerSequence;
response["owner"] = ripple::toBase58(dbResponse->owner);
response["is_burned"] = dbResponse->isBurned;
response["flags"] = ripple::nft::getFlags(dbResponse->tokenID);
response["transfer_fee"] = ripple::nft::getTransferFee(dbResponse->tokenID);
response["issuer"] =
ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID));
response["nft_taxon"] =
ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID));
response["nft_sequence"] = ripple::nft::getSerial(dbResponse->tokenID);
if (!dbResponse->isBurned)
{
auto const maybeURI = getURI(*dbResponse, context);
// An error occurred
if (Status const* status = std::get_if<Status>(&maybeURI))
return *status;
// A URI was found
if (std::string const* uri = std::get_if<std::string>(&maybeURI))
response["uri"] = *uri;
// A URI was not found, explicitly set to null
else
response["uri"] = nullptr;
}
return response;
}
} // namespace RPC

View File

@@ -16,19 +16,19 @@ appendNftOfferJson(ripple::SLE const& offer, boost::json::array& offers)
offers.push_back(boost::json::object_kind);
boost::json::object& obj(offers.back().as_object());
obj.at(JS(index)) = ripple::to_string(offer.key());
obj.at(JS(flags)) = (offer)[ripple::sfFlags];
obj.at(JS(owner)) = ripple::toBase58(offer.getAccountID(ripple::sfOwner));
obj[JS(index)] = ripple::to_string(offer.key());
obj[JS(flags)] = (offer)[ripple::sfFlags];
obj[JS(owner)] = ripple::toBase58(offer.getAccountID(ripple::sfOwner));
if (offer.isFieldPresent(ripple::sfDestination))
obj[JS(destination)] =
ripple::toBase58(offer.getAccountID(ripple::sfDestination));
if (offer.isFieldPresent(ripple::sfExpiration))
obj.at(JS(expiration)) = offer.getFieldU32(ripple::sfExpiration);
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
obj.at(JS(amount)) = toBoostJson(offer.getFieldAmount(ripple::sfAmount)
.getJson(ripple::JsonOptions::none));
obj[JS(amount)] = toBoostJson(offer.getFieldAmount(ripple::sfAmount)
.getJson(ripple::JsonOptions::none));
}
static Result
@@ -118,8 +118,8 @@ enumerateNFTOffers(
if (offers.size() == reserve)
{
response.at(JS(limit)) = limit;
response.at(JS(marker)) = to_string(offers.back().key());
response[JS(limit)] = limit;
response[JS(marker)] = to_string(offers.back().key());
offers.pop_back();
}

View File

@@ -43,10 +43,15 @@ doServerInfo(Context const& context)
info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" +
std::to_string(range->maxSequence);
info[JS(counters)] = boost::json::object{};
info[JS(counters)].as_object()[JS(rpc)] = context.counters.report();
info[JS(counters)].as_object()["subscriptions"] =
context.subscriptions->report();
bool admin = context.clientIp == "127.0.0.1";
if (admin)
{
info[JS(counters)] = boost::json::object{};
info[JS(counters)].as_object()[JS(rpc)] = context.counters.report();
info[JS(counters)].as_object()["subscriptions"] =
context.subscriptions->report();
}
auto serverInfoRippled = context.balancer->forwardToRippled(
{{"command", "server_info"}}, context.clientIp, context.yield);
@@ -78,15 +83,21 @@ doServerInfo(Context const& context)
validated[JS(reserve_base_xrp)] = fees->reserve.decimalXRP();
validated[JS(reserve_inc_xrp)] = fees->increment.decimalXRP();
response["cache"] = boost::json::object{};
auto& cache = response["cache"].as_object();
info["cache"] = boost::json::object{};
auto& cache = info["cache"].as_object();
cache["size"] = context.backend->cache().size();
cache["is_full"] = context.backend->cache().isFull();
cache["latest_ledger_seq"] =
context.backend->cache().latestLedgerSequence();
cache["object_hit_rate"] = context.backend->cache().getObjectHitRate();
cache["successor_hit_rate"] =
context.backend->cache().getSuccessorHitRate();
response["etl"] = context.etl->getInfo();
if (admin)
{
info["etl"] = context.etl->getInfo();
}
return response;
}

View File

@@ -257,7 +257,8 @@ SubscriptionManager::pubTransaction(
boost::json::object pubObj;
pubObj["transaction"] = RPC::toJson(*tx);
pubObj["meta"] = RPC::toJson(*meta);
RPC::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta);
RPC::insertDeliveredAmount(
pubObj["meta"].as_object(), tx, meta, blobs.date);
pubObj["type"] = "transaction";
pubObj["validated"] = true;
pubObj["status"] = "closed";

View File

@@ -15,6 +15,7 @@ class DOSGuard
std::uint32_t const maxFetches_;
std::uint32_t const sweepInterval_;
// Load config setting for DOSGuard
std::optional<boost::json::object>
getConfig(boost::json::object const& config) const
{

View File

@@ -242,12 +242,16 @@ public:
},
dosGuard_.isWhiteListed(*ip)))
{
// Non-whitelist connection rejected due to full connection queue
http::response<http::string_body> res{
http::status::ok, req_.version()};
res.set(http::field::server, "clio-server-v0.0.0");
res.set(
http::field::server,
"clio-server-" + Build::getClioVersionString());
res.set(http::field::content_type, "application/json");
res.keep_alive(req_.keep_alive());
res.body() = "Server overloaded";
res.body() = boost::json::serialize(
RPC::make_error(RPC::Error::rpcTOO_BUSY));
res.prepare_payload();
lambda_(std::move(res));
}
@@ -304,7 +308,9 @@ handle_request(
std::string content_type,
std::string message) {
http::response<http::string_body> res{status, req.version()};
res.set(http::field::server, "xrpl-reporting-server-v0.0.0");
res.set(
http::field::server,
"clio-server-" + Build::getClioVersionString());
res.set(http::field::content_type, content_type);
res.keep_alive(req.keep_alive());
res.body() = std::string(message);
@@ -324,9 +330,9 @@ handle_request(
if (!dosGuard.isOk(ip))
return send(httpResponse(
http::status::ok,
"application/json",
boost::json::serialize(RPC::make_error(RPC::Error::rpcSLOW_DOWN))));
http::status::service_unavailable,
"text/plain",
"Server is overloaded"));
try
{
@@ -350,13 +356,6 @@ handle_request(
RPC::make_error(RPC::Error::rpcBAD_SYNTAX))));
}
if (!dosGuard.isOk(ip))
return send(httpResponse(
http::status::ok,
"application/json",
boost::json::serialize(
RPC::make_error(RPC::Error::rpcSLOW_DOWN))));
auto range = backend->fetchLedgerRange();
if (!range)
return send(httpResponse(
@@ -418,18 +417,16 @@ handle_request(
}
boost::json::array warnings;
warnings.emplace_back(
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request");
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_CLIO));
auto lastCloseAge = context->etl->lastCloseAgeSeconds();
if (lastCloseAge >= 60)
warnings.emplace_back("This server may be out of date");
result["warnings"] = warnings;
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_OUTDATED));
response["warnings"] = warnings;
responseStr = boost::json::serialize(response);
if (!dosGuard.add(ip, responseStr.size()))
{
warnings.emplace_back("Too many requests");
response["warning"] = "load";
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_RATE_LIMIT));
response["warnings"] = warnings;
// reserialize when we need to include this warning
responseStr = boost::json::serialize(response);

View File

@@ -321,8 +321,8 @@ make_HttpServer(
static_cast<unsigned short>(serverConfig.at("port").as_int64());
uint32_t numThreads = std::thread::hardware_concurrency();
if (serverConfig.contains("workers"))
numThreads = serverConfig.at("workers").as_int64();
if (config.contains("workers"))
numThreads = config.at("workers").as_int64();
uint32_t maxQueueSize = 0; // no max
if (serverConfig.contains("max_queue_size"))
maxQueueSize = serverConfig.at("max_queue_size").as_int64();

View File

@@ -245,40 +245,24 @@ public:
}
void
handle_request(std::string const&& msg, boost::asio::yield_context& yc)
handle_request(
boost::json::object const&& request,
boost::json::value const& id,
boost::asio::yield_context& yield)
{
auto ip = derived().ip();
if (!ip)
return;
boost::json::object response = {};
auto sendError = [this](auto error, boost::json::value id) {
auto sendError = [this, &request, id](auto error) {
auto e = RPC::make_error(error);
if (!id.is_null())
e["id"] = id;
send(boost::json::serialize(e));
e["request"] = request;
this->send(boost::json::serialize(e));
};
boost::json::value raw = [](std::string const&& msg) {
try
{
return boost::json::parse(msg);
}
catch (std::exception&)
{
return boost::json::value{nullptr};
}
}(std::move(msg));
if (!raw.is_object())
return sendError(RPC::Error::rpcINVALID_PARAMS, nullptr);
boost::json::object request = raw.as_object();
auto id = request.contains("id") ? request.at("id") : nullptr;
try
{
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
@@ -286,10 +270,10 @@ public:
{
auto range = backend_->fetchLedgerRange();
if (!range)
return sendError(RPC::Error::rpcNOT_READY, id);
return sendError(RPC::Error::rpcNOT_READY);
std::optional<RPC::Context> context = RPC::make_WsContext(
yc,
yield,
request,
backend_,
subscriptions_.lock(),
@@ -301,7 +285,7 @@ public:
*ip);
if (!context)
return sendError(RPC::Error::rpcBAD_SYNTAX, id);
return sendError(RPC::Error::rpcBAD_SYNTAX);
response = getDefaultWsResponse(id);
@@ -334,7 +318,7 @@ public:
catch (Backend::DatabaseTimeout const& t)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
return sendError(RPC::Error::rpcNOT_READY, id);
return sendError(RPC::Error::rpcNOT_READY);
}
}
catch (std::exception const& e)
@@ -342,23 +326,22 @@ public:
BOOST_LOG_TRIVIAL(error)
<< __func__ << " caught exception : " << e.what();
return sendError(RPC::Error::rpcINTERNAL, id);
return sendError(RPC::Error::rpcINTERNAL);
}
boost::json::array warnings;
warnings.emplace_back(
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request");
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_CLIO));
auto lastCloseAge = etl_->lastCloseAgeSeconds();
if (lastCloseAge >= 60)
warnings.emplace_back("This server may be out of date");
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_OUTDATED));
response["warnings"] = warnings;
std::string responseStr = boost::json::serialize(response);
if (!dosGuard_.add(*ip, responseStr.size()))
{
warnings.emplace_back("Too many requests");
response["warning"] = "load";
warnings.emplace_back(RPC::make_warning(RPC::warnRPC_RATE_LIMIT));
response["warnings"] = warnings;
// reserialize if we need to include this warning
responseStr = boost::json::serialize(response);
@@ -383,29 +366,55 @@ public:
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << *ip;
auto sendError = [&](auto&& msg) {
boost::json::object response;
response["error"] = std::move(msg);
std::string responseStr = boost::json::serialize(response);
auto sendError = [this, ip](
auto error,
boost::json::value const& id,
boost::json::object const& request) {
auto e = RPC::make_error(error);
if (!id.is_null())
e["id"] = id;
e["request"] = request;
auto responseStr = boost::json::serialize(e);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr));
};
boost::json::value raw = [](std::string const&& msg) {
try
{
return boost::json::parse(msg);
}
catch (std::exception&)
{
return boost::json::value{nullptr};
}
}(std::move(msg));
boost::json::object request;
if (!raw.is_object())
return sendError(RPC::Error::rpcINVALID_PARAMS, nullptr, request);
request = raw.as_object();
auto id = request.contains("id") ? request.at("id") : nullptr;
if (!dosGuard_.isOk(*ip))
{
sendError("Too many requests. Slow down");
sendError(RPC::Error::rpcSLOW_DOWN, id, request);
}
else
{
if (!queue_.postCoro(
[m = std::move(msg), shared_this = shared_from_this()](
boost::asio::yield_context yield) {
shared_this->handle_request(std::move(m), yield);
[shared_this = shared_from_this(),
r = std::move(request),
id](boost::asio::yield_context yield) {
shared_this->handle_request(std::move(r), id, yield);
},
dosGuard_.isWhiteListed(*ip)))
sendError("Server overloaded");
sendError(RPC::Error::rpcTOO_BUSY, id, request);
}
do_read();

57
test.py
View File

@@ -475,14 +475,13 @@ async def ledger_data(ip, port, ledger, limit, binary, cursor):
except websockets.exceptions.connectionclosederror as e:
print(e)
def writeLedgerData(data,filename):
print(len(data[0]))
def writeLedgerData(state,filename):
print(len(state))
with open(filename,'w') as f:
data[0].sort()
data[1].sort()
for k,v in zip(data[0],data[1]):
for k,v in state.items():
f.write(k)
f.write('\n')
f.write(':')
f.write(v)
f.write('\n')
@@ -490,15 +489,14 @@ def writeLedgerData(data,filename):
async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1, marker = None):
address = 'ws://' + str(ip) + ':' + str(port)
try:
blobs = []
keys = []
state = {}
async with websockets.connect(address,max_size=1000000000) as ws:
if int(limit) < 2048:
limit = 2048
while True:
res = {}
if marker is None:
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":binary, "limit":int(limit)}))
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":binary, "limit":int(limit),"out_of_order":True}))
res = json.loads(await ws.recv())
else:
@@ -520,16 +518,15 @@ async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1,
if binary:
if typ is None or x["data"][2:6] == typ:
#print(json.dumps(x))
keys.append(x["index"])
state[x["index"]] = x["data"]
else:
if typ is None or x["LedgerEntryType"] == typ:
blobs.append(x)
keys.append(x["index"])
if count != -1 and len(keys) > count:
state[x["index"]] = x
if count != -1 and len(state) > count:
print("stopping early")
print(len(keys))
print(len(state))
print("done")
return (keys,blobs)
return state
if "cursor" in res:
marker = res["cursor"]
print(marker)
@@ -538,7 +535,7 @@ async def ledger_data_full(ip, port, ledger, binary, limit, typ=None, count=-1,
print(marker)
else:
print("done")
return (keys, blobs)
return state
except websockets.exceptions.connectionclosederror as e:
@@ -574,7 +571,19 @@ def compare_book_offers(aldous, p2p):
print("offers match!")
return True
async def book_changes(ip, port, ledger):
address = 'ws://' + str(ip) + ':' + str(port)
try:
async with websockets.connect(address) as ws:
await ws.send(json.dumps({
"command" : "book_changes",
"ledger_index" : ledger
}))
res = json.loads(await ws.recv())
print(json.dumps(res, indent=4, sort_keys=True))
except websockets.exceptions.connectionclosederror as e:
print(e)
async def book_offerses(ip, port, ledger, books, numCalls):
address = 'ws://' + str(ip) + ':' + str(port)
random.seed()
@@ -792,6 +801,7 @@ async def fee(ip, port):
print(json.dumps(res,indent=4,sort_keys=True))
except websockets.exceptions.connectionclosederror as e:
print(e)
async def server_info(ip, port):
address = 'ws://' + str(ip) + ':' + str(port)
try:
@@ -971,7 +981,7 @@ async def verifySubscribe(ip,clioPort,ripdPort):
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_offerses","ledger_diff","perf","fee","server_info", "gaps","subscribe","verify_subscribe","call"])
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_changes","book_offerses","ledger_diff","perf","fee","server_info", "gaps","subscribe","verify_subscribe","call"])
parser.add_argument('--ip', default='127.0.0.1')
parser.add_argument('--port', default='8080')
@@ -1159,14 +1169,17 @@ def run(args):
end = datetime.datetime.now().timestamp()
num = int(args.numRunners) * int(args.numCalls)
print("Completed " + str(num) + " in " + str(end - start) + " seconds. Throughput = " + str(num / (end - start)) + " calls per second")
elif args.action == "book_changes":
asyncio.get_event_loop().run_until_complete(book_changes(args.ip, args.port, int(args.ledger)))
elif args.action == "book_offerses":
books = getBooks(args.filename)
async def runner():
tasks = []
for x in range(0,int(args.numRunners)):
tasks.append(asyncio.create_task(book_offerses(args.ip, args.port,int(args.ledger),books, int(args.numCalls))))
for x in range(0, int(args.numRunners)):
tasks.append(asyncio.create_task(book_offerses(args.ip, args.port, int(args.ledger), books, int(args.numCalls))))
for t in tasks:
await t
@@ -1263,7 +1276,7 @@ def run(args):
res = asyncio.get_event_loop().run_until_complete(
ledger_data_full(args.ip, args.port, args.ledger, bool(args.binary), args.limit,args.type, int(args.count), args.marker))
print(len(res[0]))
print(len(res))
if args.verify:
writeLedgerData(res,args.filename)

View File

@@ -1,5 +1,6 @@
#include <algorithm>
#include <backend/DBHelpers.h>
#include <etl/ReportingETL.h>
#include <gtest/gtest.h>
#include <rpc/RPCHelpers.h>
@@ -296,6 +297,122 @@ TEST(BackendTest, Basic)
"E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C"
"004F06";
// An NFTokenMint tx
std::string nftTxnHex =
"1200192200000008240011CC9B201B001F71D6202A0000000168400000"
"000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E"
"4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F"
"C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA"
"34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F"
"677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16"
"DE3538F248662FC73C";
std::string nftTxnMeta =
"201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E"
"07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0"
"22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00"
"0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000"
"0006751868747470733A2F2F677265677765697362726F642E636F6DE1"
"EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1"
"C200000028751868747470733A2F2F677265677765697362726F642E63"
"6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C"
"9808B6B90000001D751868747470733A2F2F677265677765697362726F"
"642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866"
"2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973"
"62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538"
"F248662FC73CA048C0A300000007751868747470733A2F2F6772656777"
"65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16"
"DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772"
"65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0"
"22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F"
"2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21"
"D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470"
"733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203"
"F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868"
"747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800"
"006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A"
"751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A"
"000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00"
"00001F751868747470733A2F2F677265677765697362726F642E636F6D"
"E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4"
"5DAE00000014751868747470733A2F2F677265677765697362726F642E"
"636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7"
"3CCE1462A500000009751868747470733A2F2F67726567776569736272"
"6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248"
"662FC73CD89A24C70000002B751868747470733A2F2F67726567776569"
"7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35"
"38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567"
"7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB"
"16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67"
"7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6"
"E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A"
"2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C"
"21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474"
"70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062"
"03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518"
"68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008"
"00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000"
"0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1"
"E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C"
"662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB"
"16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67"
"7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6"
"E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A"
"2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C"
"21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474"
"70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062"
"03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518"
"68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008"
"00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000"
"07751868747470733A2F2F677265677765697362726F642E636F6DE1EC"
"5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5"
"00000029751868747470733A2F2F677265677765697362726F642E636F"
"6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE"
"EE87B80000001E751868747470733A2F2F677265677765697362726F64"
"2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F"
"C73CB30E8CAF00000013751868747470733A2F2F677265677765697362"
"726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2"
"48662FC73CB72E91A200000008751868747470733A2F2F677265677765"
"697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE"
"3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265"
"677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022"
"CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F"
"677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5"
"D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073"
"3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4"
"9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874"
"7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000"
"6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75"
"1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00"
"0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000"
"0020751868747470733A2F2F677265677765697362726F642E636F6DE1"
"EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E"
"B100000015751868747470733A2F2F677265677765697362726F642E63"
"6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C"
"E4FA33A40000000A751868747470733A2F2F677265677765697362726F"
"642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866"
"2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973"
"62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538"
"F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777"
"65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16"
"DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772"
"65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0"
"22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F"
"2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71"
"B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435"
"40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94"
"883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3"
"ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000"
"066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866"
"2FC73CE1E1F1031000";
std::string nftTxnHashHex =
"6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757"
"E9284E";
ripple::uint256 nftID;
EXPECT_TRUE(
nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662"
"FC73CEF7FF5C60000002C"));
std::string metaBlob = hexStringToBinaryString(metaHex);
std::string txnBlob = hexStringToBinaryString(txnHex);
std::string hashBlob = hexStringToBinaryString(hashHex);
@@ -304,6 +421,10 @@ TEST(BackendTest, Basic)
hexStringToBinaryString(accountIndexHex);
std::vector<ripple::AccountID> affectedAccounts;
std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex);
std::string nftTxnMetaBlob =
hexStringToBinaryString(nftTxnMeta);
{
backend->startWrites();
lgrInfoNext.seq = lgrInfoNext.seq + 1;
@@ -322,23 +443,62 @@ TEST(BackendTest, Basic)
{
affectedAccounts.push_back(a);
}
std::vector<AccountTransactionsData> accountTxData;
accountTxData.emplace_back(txMeta, hash256, journal);
ripple::uint256 nftHash256;
EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex));
ripple::TxMeta nftTxMeta{
nftHash256, lgrInfoNext.seq, nftTxnMetaBlob};
ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()};
ripple::STTx sttx{it};
auto const [parsedNFTTxsRef, parsedNFT] =
getNFTData(nftTxMeta, sttx);
// need to copy the nft txns so we can std::move later
std::vector<NFTTransactionsData> parsedNFTTxs;
parsedNFTTxs.insert(
parsedNFTTxs.end(),
parsedNFTTxsRef.begin(),
parsedNFTTxsRef.end());
EXPECT_EQ(parsedNFTTxs.size(), 1);
EXPECT_TRUE(parsedNFT.has_value());
EXPECT_EQ(parsedNFT->tokenID, nftID);
std::vector<NFTsData> nftData;
nftData.push_back(*parsedNFT);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeTransaction(
std::move(std::string{hashBlob}),
std::string{hashBlob},
lgrInfoNext.seq,
lgrInfoNext.closeTime.time_since_epoch().count(),
std::move(std::string{txnBlob}),
std::move(std::string{metaBlob}));
std::string{txnBlob},
std::string{metaBlob});
backend->writeAccountTransactions(std::move(accountTxData));
// NFT writing not yet implemented for pg
if (config == cassandraConfig)
{
backend->writeNFTs(std::move(nftData));
backend->writeNFTTransactions(std::move(parsedNFTTxs));
}
else
{
EXPECT_THROW(
{ backend->writeNFTs(std::move(nftData)); },
std::runtime_error);
EXPECT_THROW(
{
backend->writeNFTTransactions(
std::move(parsedNFTTxs));
},
std::runtime_error);
}
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -384,6 +544,34 @@ TEST(BackendTest, Basic)
EXPECT_FALSE(cursor);
}
// NFT fetching not yet implemented for pg
if (config == cassandraConfig)
{
auto nft =
backend->fetchNFT(nftID, lgrInfoNext.seq, yield);
EXPECT_TRUE(nft.has_value());
auto [nftTxns, cursor] = backend->fetchNFTTransactions(
nftID, 100, true, {}, yield);
EXPECT_EQ(nftTxns.size(), 1);
EXPECT_EQ(nftTxns[0], nftTxns[0]);
EXPECT_FALSE(cursor);
}
else
{
EXPECT_THROW(
{
backend->fetchNFT(
nftID, lgrInfoNext.seq, yield);
},
std::runtime_error);
EXPECT_THROW(
{
backend->fetchNFTTransactions(
nftID, 100, true, {}, yield);
},
std::runtime_error);
}
ripple::uint256 key256;
EXPECT_TRUE(key256.parseHex(accountIndexHex));
auto obj = backend->fetchLedgerObject(
@@ -417,16 +605,15 @@ TEST(BackendTest, Basic)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
std::shuffle(
accountBlob.begin(),
accountBlob.end(),
std::default_random_engine(seed));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
@@ -480,12 +667,11 @@ TEST(BackendTest, Basic)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{}));
std::string{});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -527,9 +713,8 @@ TEST(BackendTest, Basic)
EXPECT_FALSE(obj);
}
auto generateObjects = [seed](
size_t numObjects,
uint32_t ledgerSequence) {
auto generateObjects = [](size_t numObjects,
uint32_t ledgerSequence) {
std::vector<std::pair<std::string, std::string>> res{
numObjects};
ripple::uint256 key;
@@ -551,26 +736,26 @@ TEST(BackendTest, Basic)
}
return objs;
};
auto generateTxns =
[seed](size_t numTxns, uint32_t ledgerSequence) {
std::vector<
std::tuple<std::string, std::string, std::string>>
res{numTxns};
ripple::uint256 base;
base = ledgerSequence * 100000;
for (auto& blob : res)
{
++base;
std::string hashStr{
(const char*)base.data(), base.size()};
std::string txnStr =
"tx" + std::to_string(ledgerSequence) + hashStr;
std::string metaStr = "meta" +
std::to_string(ledgerSequence) + hashStr;
blob = std::make_tuple(hashStr, txnStr, metaStr);
}
return res;
};
auto generateTxns = [](size_t numTxns,
uint32_t ledgerSequence) {
std::vector<
std::tuple<std::string, std::string, std::string>>
res{numTxns};
ripple::uint256 base;
base = ledgerSequence * 100000;
for (auto& blob : res)
{
++base;
std::string hashStr{
(const char*)base.data(), base.size()};
std::string txnStr =
"tx" + std::to_string(ledgerSequence) + hashStr;
std::string metaStr =
"meta" + std::to_string(ledgerSequence) + hashStr;
blob = std::make_tuple(hashStr, txnStr, metaStr);
}
return res;
};
auto generateAccounts = [](uint32_t ledgerSequence,
uint32_t numAccounts) {
std::vector<ripple::AccountID> accounts;
@@ -635,7 +820,7 @@ TEST(BackendTest, Basic)
backend->startWrites();
backend->writeLedger(
lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo)));
lgrInfo, ledgerInfoToBinaryString(lgrInfo));
for (auto [hash, txn, meta] : txns)
{
backend->writeTransaction(
@@ -729,8 +914,7 @@ TEST(BackendTest, Basic)
for (auto [account, data] : accountTx)
{
std::vector<Backend::TransactionAndMetadata> retData;
std::optional<Backend::AccountTransactionsCursor>
cursor;
std::optional<Backend::TransactionsCursor> cursor;
do
{
uint32_t limit = 10;
@@ -1854,12 +2038,11 @@ TEST(Backend, cacheIntegration)
lgrInfoNext.hash++;
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
auto key =
ripple::uint256::fromVoidChecked(accountIndexBlob);
backend->cache().update(
@@ -1921,8 +2104,7 @@ TEST(Backend, cacheIntegration)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
std::shuffle(
accountBlob.begin(),
accountBlob.end(),
@@ -1933,9 +2115,9 @@ TEST(Backend, cacheIntegration)
{{*key, {accountBlob.begin(), accountBlob.end()}}},
lgrInfoNext.seq);
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
@@ -1983,15 +2165,14 @@ TEST(Backend, cacheIntegration)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
auto key =
ripple::uint256::fromVoidChecked(accountIndexBlob);
backend->cache().update({{*key, {}}}, lgrInfoNext.seq);
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{}));
std::string{});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -2027,9 +2208,8 @@ TEST(Backend, cacheIntegration)
EXPECT_FALSE(obj);
}
auto generateObjects = [seed](
size_t numObjects,
uint32_t ledgerSequence) {
auto generateObjects = [](size_t numObjects,
uint32_t ledgerSequence) {
std::vector<std::pair<std::string, std::string>> res{
numObjects};
ripple::uint256 key;