Compare commits

...

37 Commits
0.2.0 ... 1.0.0

Author SHA1 Message Date
Nathan Nichols
1e7645419f set version to 1.0.0 (#202) 2022-06-29 18:38:07 -04:00
Michael Legleux
35db5d3da9 add headers for building with gcc-12 (#201)
Signed-off-by: Michael Legleux <mlegleux@ripple.com>
2022-06-29 18:37:51 -04:00
Nathan Nichols
4e581e659f reserve correctly when limit is numeric_limits::max() (#198) 2022-06-28 09:28:00 -07:00
Nathan Nichols
55f0536dca set version to 0.3.0-b3 (#197) 2022-06-27 18:32:57 -04:00
Nathan Nichols
a3a15754b4 forward channel_verify and channel_authorize (#196) 2022-06-27 13:00:36 -07:00
Nathan Nichols
59d7d1bc49 allow user to specify no peer in doAccountLines (#193) 2022-06-23 13:18:44 -04:00
Nathan Nichols
5f5648470a append warnings to response instead of result (#192) 2022-06-21 12:39:48 -04:00
Nathan Nichols
13afe9373d set version to 0.3.0-b2 (#188) 2022-06-17 20:26:17 -04:00
Nathan Nichols
9a79bdc50b sendError will send id: in WsBase (#184) 2022-06-17 20:25:58 -04:00
CJ Cobb
7d5415e8b0 always append clio warning (#186)
* appends a warning stating that this is a clio server to every response
2022-06-17 16:01:33 -05:00
Nathan Nichols
54669420bf return warnings in response instead of response.result (#182) 2022-06-17 16:15:14 -04:00
CJ Cobb
a62849b89a log every request and duration at info (#183) 2022-06-17 14:07:01 -05:00
CJ Cobb
20c2654abc bypass forwarding cache if ledger_index is current or closed (#185) 2022-06-17 14:06:47 -05:00
Brandon Kong
37c810f6fa Added log rotation feature and console/file logging config options (#181)
Fixes an issue that occurred when rebasing the previous log rotation PR.

Updated config to allow log rotation size, log rotation interval, and log directory max size specification

Updated file size base unit to Mb, added documentation for logging

The file size base unit is now in Mb, with detailed description of logging configurations in readme.md

Updated CMake install script to correctly set path in production mode

Co-authored-by: Brandon Kong <bkong@ripple.com>
2022-06-17 09:43:15 -05:00
Nathan Nichols
d64753c0dd set version to 0.3.0-b1 (#178) 2022-06-15 18:29:40 -05:00
Nathan Nichols
92d6687151 specify [min, default, max] limits in handler table (#135)
* specify rpc limits in the handler table
* special case in ledger_data if !binary
2022-06-15 16:51:49 -05:00
Nathan Nichols
fa8405df83 return no offers when an owner directory is not found (#176) 2022-06-15 16:19:08 -05:00
Nathan Nichols
3d3b8e91b6 fix ledger_index_min/max in account_tx response (#172) 2022-06-15 16:18:57 -05:00
Nathan Nichols
14a972c8e2 error when marker does not exist (#167) 2022-06-15 16:18:45 -05:00
Nathan Nichols
166ff63dbc cache commands that dont take parameters (#153)
* Adds a forwardCache to each ETLSource which allows operators to specify which commands (that don't require parameters) they want to cache.
2022-06-15 16:18:25 -05:00
CJ Cobb
b7ae6a0495 Iterate account nft pages without using successor (#177)
* NFTs are iterated in reverse order, starting from the max page,
  working towards the min page.
* Iteration always continues to page end

Signed-off-by: CJ Cobb <ccobb@ripple.com>
2022-06-15 16:17:31 -05:00
CJ Cobb
d0ea9d20ab Use separate IO context for socket IO (#168)
* Keep track of number of requests currently being processed
* Reject new requests when number of in flight requests exceeds a
  configurable limit
* Track time spent between request arrival and start of request
  processing

Signed-off-by: CJ Cobb <ccobb@ripple.com>

Co-authored-by: natenichols <natenichols@cox.net>
2022-06-15 16:17:15 -05:00
ethanlabelle
b45b34edb1 append warning to response if clio is out of date (#175)
Fixes #46.
2022-06-14 13:50:42 -05:00
Brandon Kong
7ecb894632 Added log rotation feature and console/file logging config options (#161)
- Added log rotation feature, currently set to rotate for every 12h or if log file size exceeds 2 Gb. If the log directory exceeds 50 Gb, old log files will be deleted.
- Added config options for toggling console and file logging.
- Changed config options for log file storage, now writing log files to a directory instead of a single file.
- Added config options to allow specifying the log rotation size, log rotation interval, and log directory max size.
- Added detailed documentation in README.md regarding how to configure log rotation.
- Updated CMake install script to correctly set path in production mode

Co-authored-by: Brandon Kong <bkong@ripple.com>
2022-06-13 11:22:00 -05:00
Nathan Nichols
8de39739fa remove unused file that was accidentally included in #162 (#169) 2022-06-03 16:09:39 -05:00
Nathan Nichols
f16a05ae7a cleanup websocket sessions that are subscribed to books or accounts (#146) 2022-06-03 12:46:45 -05:00
Nathan Nichols
458fac776c move version specifier to Build.h 2022-06-02 16:37:43 -07:00
Nathan Nichols
af575b1bcf dont report error.what() when returning rpcINTERNAL (#163) 2022-06-02 16:41:09 -05:00
Nathan Nichols
ee615a290b report transactions as validated in account_tx (#165) 2022-06-02 16:21:55 -05:00
Nathan Nichols
31cc06d4f4 handle string ledger_index values in doAccountTx (#162)
* handle string ledger_index values in doAccountTx

* return ledgerInfo when ledger_hash is specified
2022-06-02 15:53:12 -05:00
Michael Legleux
f90dac2f85 pin-dependency-versions (#157) 2022-05-25 13:42:04 -04:00
Michael Legleux
8a5be14ba8 Fix clio package
Configure example-config's clio.log path to /var/log/clio
2022-05-18 14:56:34 -07:00
Nathan Nichols
ba6b764e38 send messages to subscribers w/ shared_ptr (#147) 2022-05-18 16:47:12 -05:00
Devon White
9939f6e6f4 Add NFT RPC infrastructure 2022-05-18 15:41:56 -04:00
Michael Legleux
a72aa73afe Run clio_tests with gha 2022-05-18 11:29:48 -07:00
Michael Legleux
3d02803135 Save .deb package after build 2022-05-18 00:28:39 -07:00
Nathan Nichols
3f47b85e3b disable cache when CacheLoadStyle::NONE (#152) 2022-05-15 19:29:05 -05:00
61 changed files with 2334 additions and 1035 deletions

View File

@@ -6,3 +6,4 @@
# clang-format
e41150248a97e4bdc1cf21b54650c4bb7c63928e
2e542e7b0d94451a933c88778461cc8d3d7e6417

View File

@@ -22,19 +22,19 @@ jobs:
runs-on: [self-hosted, Linux]
needs: lint
steps:
- name: Get Clio repo
- name: Clone Clio repo
uses: actions/checkout@v3
with:
path: clio_src
ref: 'develop-next'
- name: Get Clio CI repo
- name: Clone Clio CI repo
uses: actions/checkout@v3
with:
path: clio_ci
repository: 'XRPLF/clio-ci'
- name: Get GitHub actions repo
- name: Clone GitHub actions repo
uses: actions/checkout@v3
with:
repository: XRPLF/clio-gha
@@ -43,8 +43,27 @@ jobs:
- name: Build
uses: XRPLF/clio-gha/build@main
# - name: Artifact clio_tests
# uses: actions/upload-artifact@v2
# with:
# name: clio_output
# path: clio_src/build/clio_tests
- name: Artifact clio_tests
uses: actions/upload-artifact@v2
with:
name: clio_tests
path: clio_tests
- name: Artifact Debian package
uses: actions/upload-artifact@v2
with:
name: deb_package-${{ github.sha }}
path: clio_ci/build/*.deb
test_clio:
name: Test Clio
runs-on: [self-hosted, Linux]
needs: build_clio
steps:
- name: Get clio_tests artifact
uses: actions/download-artifact@v3
with:
name: clio_tests
- name: Run tests
uses: XRPLF/clio-gha/test@main

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
*clio*.log
build/
.python-version

15
CMake/ClioVersion.cmake Normal file
View File

@@ -0,0 +1,15 @@
#[===================================================================[
read version from source
#]===================================================================]
file (STRINGS src/main/impl/Build.cpp BUILD_INFO)
foreach (line_ ${BUILD_INFO})
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
set (clio_version ${CMAKE_MATCH_1})
endif ()
endforeach ()
if (clio_version)
message (STATUS "clio version: ${clio_version}")
else ()
message (FATAL_ERROR "unable to determine clio version")
endif ()

View File

@@ -10,7 +10,7 @@ if(NOT cassandra)
ExternalProject_Add(zlib_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/madler/zlib.git
GIT_TAG master
GIT_TAG v1.2.12
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
)
@@ -33,7 +33,7 @@ if(NOT cassandra)
ExternalProject_Add(krb5_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/krb5/krb5.git
GIT_TAG master
GIT_TAG krb5-1.20
UPDATE_COMMAND ""
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
BUILD_IN_SOURCE 1
@@ -66,7 +66,7 @@ if(NOT cassandra)
ExternalProject_Add(libuv_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/libuv/libuv.git
GIT_TAG v1.x
GIT_TAG v1.44.1
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
)
@@ -89,7 +89,7 @@ if(NOT cassandra)
ExternalProject_Add(cassandra_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
GIT_TAG master
GIT_TAG 2.16.2
CMAKE_ARGS
-DLIBUV_ROOT_DIR=${BINARY_DIR}
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include

View File

@@ -3,8 +3,14 @@ set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
install(TARGETS clio_server DESTINATION bin)
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
install(FILES example-config.json DESTINATION etc RENAME config.json)
#install(FILES example-config.json DESTINATION etc RENAME config.json)
file(READ example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

View File

@@ -1 +0,0 @@
#define VERSION "@PROJECT_VERSION@"

View File

@@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.16.3)
project(clio VERSION 0.2.0)
project(clio)
option(BUILD_TESTS "Build tests" TRUE)
@@ -10,6 +10,22 @@ if(VERBOSE)
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
endif()
if(NOT GIT_COMMIT_HASH)
if(VERBOSE)
message(WARNING "GIT_COMMIT_HASH not provided...looking for git")
endif()
find_package(Git)
if(Git_FOUND)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=8
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
if(gch)
set(GIT_COMMIT_HASH "${gch}")
message(STATUS "Git commit: ${GIT_COMMIT_HASH}")
add_definitions(-DCLIO_GIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
endif()
endif()
endif() #git
add_library(clio)
target_compile_features(clio PUBLIC cxx_std_20)
target_include_directories(clio PUBLIC src)
@@ -17,14 +33,15 @@ target_include_directories(clio PUBLIC src)
include(FetchContent)
include(ExternalProject)
include(CMake/settings.cmake)
include(CMake/ClioVersion.cmake)
include(CMake/deps/rippled.cmake)
include(CMake/deps/Boost.cmake)
include(CMake/deps/cassandra.cmake)
include(CMake/deps/Postgres.cmake)
# configure_file(CMake/version-config.h include/version.h) # NOTE: Not used, but an idea how to handle versioning.
target_sources(clio PRIVATE
## Main
src/main/impl/Build.cpp
## Backend
src/backend/BackendInterface.cpp
src/backend/CassandraBackend.cpp
@@ -41,6 +58,7 @@ target_sources(clio PRIVATE
src/rpc/RPC.cpp
src/rpc/RPCHelpers.cpp
src/rpc/Counters.cpp
src/rpc/WorkQueue.cpp
## RPC Methods
# Account
src/rpc/handlers/AccountChannels.cpp
@@ -62,6 +80,8 @@ target_sources(clio PRIVATE
src/rpc/handlers/AccountTx.cpp
# Dex
src/rpc/handlers/BookOffers.cpp
# NFT
src/rpc/handlers/NFTOffers.cpp
# Payment Channel
src/rpc/handlers/ChannelAuthorize.cpp
src/rpc/handlers/ChannelVerify.cpp
@@ -72,7 +92,7 @@ target_sources(clio PRIVATE
# Utility
src/rpc/handlers/Random.cpp)
add_executable(clio_server src/main.cpp)
add_executable(clio_server src/main/main.cpp)
target_link_libraries(clio_server PUBLIC clio)
if(BUILD_TESTS)

View File

@@ -1,9 +1,12 @@
[![Build Clio](https://github.com/legleux/clio/actions/workflows/build.yml/badge.svg?branch=run-tests)](https://github.com/legleux/clio/actions/workflows/build.yml)
**Status:** This software is in beta mode. We encourage anyone to try it out and
report any issues they discover. Version 1.0 coming soon.
# Clio
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
historical ledger and transaction data is stored in a more space efficient format,
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
historical ledger and transaction data are stored in a more space-efficient format,
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
allowing for scalable read throughput. Multiple Clio nodes can share
access to the same dataset, allowing for a highly available cluster of Clio nodes,
@@ -12,9 +15,9 @@ without the need for redundant data storage or computation.
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
Other non-validated data is also not returned, such as information about queued transactions.
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
Clio does not connect to the peer to peer network. Instead, Clio extracts data from a specified rippled node. Running Clio requires access to a rippled node
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
@@ -25,11 +28,11 @@ from which data can be extracted. The rippled node does not need to be running o
## Building
Clio is built with cmake. Clio requires c++20, and boost 1.75.0 or later.
Clio is built with CMake. Clio requires c++20, and boost 1.75.0 or later.
Use these instructions to build a Clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
```
```sh
# Install dependencies
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
@@ -49,28 +52,30 @@ Use these instructions to build a Clio executable from source. These instruction
```
## Running
`./clio_server config.json`
```sh
./clio_server config.json
```
Clio needs access to a rippled server. The config files of rippled and Clio need
to match in a certain sense.
Clio needs to know:
- the ip of rippled
- the port on which rippled is accepting unencrypted websocket connections
- the IP of rippled
- the port on which rippled is accepting unencrypted WebSocket connections
- the port on which rippled is handling gRPC requests
rippled needs to open:
- a port to accept unencrypted websocket connections
- a port to handle gRPC requests, with the ip(s) of Clio specified in the `secure_gateway` entry
- a port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry
The example configs of rippled and Clio are setup such that minimal changes are
The example configs of rippled and Clio are setups such that minimal changes are
required. When running locally, the only change needed is to uncomment the `port_grpc`
section of the rippled config. When running Clio and rippled on separate machines,
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
1. change the `ip` of the first entry of `etl_sources` to the ip where your rippled
1. change the `ip` of the first entry of `etl_sources` to the IP where your rippled
server is running
2. open a public, unencrypted websocket port on your rippled server
3. change the ip specified in `secure_gateway` of `port_grpc` section of the rippled config
to the ip of your Clio server. This entry can take the form of a comma separated list if
2. open a public, unencrypted WebSocket port on your rippled server
3. change the IP specified in `secure_gateway` of `port_grpc` section of the rippled config
to the IP of your Clio server. This entry can take the form of a comma-separated list if
you are running multiple Clio nodes.
Once your config files are ready, start rippled and Clio. It doesn't matter which you
@@ -84,7 +89,7 @@ the most recent ledger on the network, and then backfill. If Clio is extracting
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
is dependent on the online_delete and ledger_history config values; if these values
are dependent on the online_delete and ledger_history config values; if these values
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
proportional to the amount of time that you expect rippled to be offline. For example, if you
@@ -106,7 +111,7 @@ This can take some time, and depends on database throughput. With a moderately f
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
is fully downloaded, Clio only needs to extract the changed data for each ledger,
so extraction is much faster and Clio can keep up with rippled in real time. Even under
so extraction is much faster and Clio can keep up with rippled in real-time. Even under
intense load, Clio should not lag behind the network, as Clio is not processing the data,
and is simply writing to a database. The throughput of Clio is dependent on the throughput
of your database, but a standard Cassandra or Scylla deployment can handle
@@ -140,3 +145,26 @@ are doing this, be aware that database traffic will be flowing across regions,
which can cause high latencies. A possible alternative to this is to just deploy
a database in each region, and the Clio nodes in each region use their region's database.
This is effectively two systems.
## Logging
Clio provides several logging options, all are configurable via the config file and are detailed below.
`log_level`: The minimum level of severity at which the log message will be outputted.
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`.
`log_to_file`: Enable/disable log saving to files in persistent local storage. Options are `true`/`false`.
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it.
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file.
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
deleted to free up space.
`log_rotation_hour_interval`: The time interval in **hours** after the last log rotation to automatically
rotate the current log file.
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
size-based log rotation occurs, the timer for the time-based rotation will reset.

View File

@@ -30,7 +30,12 @@
"port":51233
},
"log_level":"debug",
"log_file":"./clio.log",
"log_to_console": true,
"log_to_file": true,
"log_directory":"./clio_log",
"log_rotation_size": 2048,
"log_directory_max_size": 51200,
"log_rotation_hour_interval": 12,
"online_delete":0,
"extractor_threads":8,
"read_only":false

View File

@@ -259,7 +259,8 @@ BackendInterface::fetchLedgerPage(
ripple::uint256 const& curCursor = keys.size() ? keys.back()
: cursor ? *cursor
: firstKey;
uint32_t seq = outOfOrder ? range->maxSequence : ledgerSequence;
std::uint32_t const seq =
outOfOrder ? range->maxSequence : ledgerSequence;
auto succ = fetchSuccessorKey(curCursor, seq, yield);
if (!succ)
reachedEnd = true;

View File

@@ -1,5 +1,6 @@
#include <backend/SimpleCache.h>
namespace Backend {
uint32_t
SimpleCache::latestLedgerSequence() const
{
@@ -13,6 +14,9 @@ SimpleCache::update(
uint32_t seq,
bool isBackground)
{
if (disabled_)
return;
{
std::unique_lock lck{mtx_};
if (seq > latestSeq_)
@@ -26,6 +30,7 @@ SimpleCache::update(
{
if (isBackground && deletes_.count(obj.key))
continue;
auto& e = map_[obj.key];
if (seq > e.seq)
{
@@ -41,6 +46,7 @@ SimpleCache::update(
}
}
}
std::optional<LedgerObject>
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
@@ -54,6 +60,7 @@ SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
return {};
return {{e->first, e->second.blob}};
}
std::optional<LedgerObject>
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
{
@@ -82,9 +89,18 @@ SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
return {e->second.blob};
}
void
SimpleCache::setDisabled()
{
disabled_ = true;
}
void
SimpleCache::setFull()
{
if (disabled_)
return;
full_ = true;
std::unique_lock lck{mtx_};
deletes_.clear();

View File

@@ -17,10 +17,12 @@ class SimpleCache
uint32_t seq = 0;
Blob blob;
};
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
uint32_t latestSeq_ = 0;
std::atomic_bool full_ = false;
std::atomic_bool disabled_ = false;
// temporary set to prevent background thread from writing already deleted
// data. not used when cache is full
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
@@ -45,6 +47,9 @@ public:
std::optional<LedgerObject>
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
void
setDisabled();
void
setFull();

View File

@@ -9,8 +9,69 @@
#include <backend/DBHelpers.h>
#include <etl/ETLSource.h>
#include <etl/ReportingETL.h>
#include <rpc/RPCHelpers.h>
#include <thread>
void
ForwardCache::freshen()
{
BOOST_LOG_TRIVIAL(trace) << "Freshening ForwardCache";
auto numOutstanding =
std::make_shared<std::atomic_uint>(latestForwarded_.size());
for (auto const& cacheEntry : latestForwarded_)
{
boost::asio::spawn(
strand_,
[this, numOutstanding, command = cacheEntry.first](
boost::asio::yield_context yield) {
boost::json::object request = {{"command", command}};
auto resp = source_.requestFromRippled(request, {}, yield);
if (!resp || resp->contains("error"))
resp = {};
{
std::unique_lock lk(mtx_);
latestForwarded_[command] = resp;
}
});
}
}
void
ForwardCache::clear()
{
std::unique_lock lk(mtx_);
for (auto& cacheEntry : latestForwarded_)
latestForwarded_[cacheEntry.first] = {};
}
std::optional<boost::json::object>
ForwardCache::get(boost::json::object const& request) const
{
std::optional<std::string> command = {};
if (request.contains("command") && !request.contains("method") &&
request.at("command").is_string())
command = request.at("command").as_string().c_str();
else if (
request.contains("method") && !request.contains("command") &&
request.at("method").is_string())
command = request.at("method").as_string().c_str();
if (!command)
return {};
if (RPC::specifiesCurrentOrClosedLedger(request))
return {};
std::shared_lock lk(mtx_);
if (!latestForwarded_.contains(*command))
return {};
return {latestForwarded_.at(*command)};
}
// Create ETL source without grpc endpoint
// Fetch ledger and load initial ledger will fail for this source
// Primarly used in read-only mode, to monitor when ledgers are validated
@@ -27,6 +88,7 @@ ETLSourceImpl<Derived>::ETLSourceImpl(
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, forwardCache_(config, ioContext, *this)
, ioc_(ioContext)
, timer_(ioContext)
{
@@ -245,11 +307,9 @@ PlainETLSource::onConnect(
boost::beast::websocket::stream_base::decorator(
[](boost::beast::websocket::request_type& req) {
req.set(
boost::beast::http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" clio-client");
boost::beast::http::field::user_agent, "clio-client");
req.set("X-User", "coro-client");
req.set("X-User", "clio-client");
}));
// Update the host_ string. This will provide the value of the
@@ -291,11 +351,9 @@ SslETLSource::onConnect(
boost::beast::websocket::stream_base::decorator(
[](boost::beast::websocket::request_type& req) {
req.set(
boost::beast::http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" clio-client");
boost::beast::http::field::user_agent, "clio-client");
req.set("X-User", "coro-client");
req.set("X-User", "clio-client");
}));
// Update the host_ string. This will provide the value of the
@@ -475,6 +533,7 @@ ETLSourceImpl<Derived>::handleMessage()
{
if (response.contains("transaction"))
{
forwardCache_.freshen();
subscriptions_->forwardProposedTransaction(response);
}
else if (
@@ -1026,7 +1085,23 @@ ETLSourceImpl<Derived>::forwardToRippled(
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
if (auto resp = forwardCache_.get(request); resp)
{
BOOST_LOG_TRIVIAL(debug) << "request hit forwardCache";
return resp;
}
return requestFromRippled(request, clientIp, yield);
}
template <class Derived>
std::optional<boost::json::object>
ETLSourceImpl<Derived>::requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const
{
BOOST_LOG_TRIVIAL(trace) << "Attempting to forward request to tx. "
<< "request = " << boost::json::serialize(request);
boost::json::object response;
@@ -1047,7 +1122,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
// These objects perform our I/O
tcp::resolver resolver{ioc_};
BOOST_LOG_TRIVIAL(debug) << "Creating websocket";
BOOST_LOG_TRIVIAL(trace) << "Creating websocket";
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
// Look up the domain name
@@ -1057,7 +1132,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
ws->next_layer().expires_after(std::chrono::seconds(3));
BOOST_LOG_TRIVIAL(debug) << "Connecting websocket";
BOOST_LOG_TRIVIAL(trace) << "Connecting websocket";
// Make the connection on the IP address we get from a lookup
ws->next_layer().async_connect(results, yield[ec]);
if (ec)
@@ -1076,15 +1151,15 @@ ETLSourceImpl<Derived>::forwardToRippled(
" websocket-client-coro");
req.set(http::field::forwarded, "for=" + clientIp);
}));
BOOST_LOG_TRIVIAL(debug) << "client ip: " << clientIp;
BOOST_LOG_TRIVIAL(trace) << "client ip: " << clientIp;
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
BOOST_LOG_TRIVIAL(trace) << "Performing websocket handshake";
// Perform the websocket handshake
ws->async_handshake(ip_, "/", yield[ec]);
if (ec)
return {};
BOOST_LOG_TRIVIAL(debug) << "Sending request";
BOOST_LOG_TRIVIAL(trace) << "Sending request";
// Send the message
ws->async_write(
net::buffer(boost::json::serialize(request)), yield[ec]);
@@ -1106,7 +1181,7 @@ ETLSourceImpl<Derived>::forwardToRippled(
<< "Error parsing response: " << std::string{begin, end};
return {};
}
BOOST_LOG_TRIVIAL(debug) << "Successfully forward request";
BOOST_LOG_TRIVIAL(trace) << "Successfully forward request";
response = parsed.as_object();

View File

@@ -15,6 +15,7 @@
#include <grpcpp/grpcpp.h>
class ETLLoadBalancer;
class ETLSource;
class SubscriptionManager;
/// This class manages a connection to a single ETL source. This is almost
@@ -24,6 +25,64 @@ class SubscriptionManager;
/// has. This class also has methods for extracting said ledgers. Lastly this
/// class forwards transactions received on the transactions_proposed streams to
/// any subscribers.
class ForwardCache
{
using response_type = std::optional<boost::json::object>;
mutable std::atomic_bool stopping_ = false;
mutable std::shared_mutex mtx_;
std::unordered_map<std::string, response_type> latestForwarded_;
boost::asio::io_context::strand strand_;
boost::asio::steady_timer timer_;
ETLSource const& source_;
std::uint32_t duration_ = 10;
void
clear();
public:
ForwardCache(
boost::json::object const& config,
boost::asio::io_context& ioc,
ETLSource const& source)
: strand_(ioc), timer_(strand_), source_(source)
{
if (config.contains("cache") && !config.at("cache").is_array())
throw std::runtime_error("ETLSource cache must be array");
if (config.contains("cache_duration") &&
!config.at("cache_duration").is_int64())
throw std::runtime_error(
"ETLSource cache_duration must be a number");
duration_ = config.contains("cache_duration")
? config.at("cache_duration").as_int64()
: 10;
auto commands = config.contains("cache") ? config.at("cache").as_array()
: boost::json::array{};
for (auto const& command : commands)
{
if (!command.is_string())
throw std::runtime_error(
"ETLSource forward command must be array of strings");
latestForwarded_[command.as_string().c_str()] = {};
}
}
// This is to be called every freshenDuration_ seconds.
// It will request information from this etlSource, and
// will populate the cache with the latest value. If the
// request fails, it will evict that value from the cache.
void
freshen();
std::optional<boost::json::object>
get(boost::json::object const& command) const;
};
class ETLSource
{
@@ -64,6 +123,15 @@ public:
virtual ~ETLSource()
{
}
private:
friend ForwardCache;
virtual std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const = 0;
};
template <class Derived>
@@ -105,6 +173,14 @@ class ETLSourceImpl : public ETLSource
std::shared_ptr<SubscriptionManager> subscriptions_;
ETLLoadBalancer& balancer_;
ForwardCache forwardCache_;
std::optional<boost::json::object>
requestFromRippled(
boost::json::object const& request,
std::string const& clientIp,
boost::asio::yield_context& yield) const override;
protected:
Derived&
derived()

View File

@@ -894,6 +894,7 @@ ReportingETL::loadCache(uint32_t seq)
{
if (cacheLoadStyle_ == CacheLoadStyle::NOT_AT_ALL)
{
backend_->cache().setDisabled();
BOOST_LOG_TRIVIAL(warning) << "Cache is disabled. Not loading";
return;
}

View File

@@ -130,19 +130,12 @@ private:
/// server_info
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
mutable std::mutex publishTimeMtx_;
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const
{
std::unique_lock<std::mutex> lck(publishTimeMtx_);
return lastPublish_;
}
mutable std::shared_mutex publishTimeMtx_;
void
setLastPublish()
{
std::unique_lock<std::mutex> lck(publishTimeMtx_);
std::unique_lock lck(publishTimeMtx_);
lastPublish_ = std::chrono::system_clock::now();
}
@@ -322,13 +315,25 @@ public:
result["read_only"] = readOnly_;
auto last = getLastPublish();
if (last.time_since_epoch().count() != 0)
result["last_publish_age_seconds"] = std::to_string(
std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - getLastPublish())
.count());
result["last_publish_age_seconds"] =
std::to_string(lastPublishAgeSeconds());
return result;
}
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const
{
std::shared_lock lck(publishTimeMtx_);
return lastPublish_;
}
std::uint32_t
lastPublishAgeSeconds() const
{
return std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now() - getLastPublish())
.count();
}
};
#endif

16
src/main/Build.h Normal file
View File

@@ -0,0 +1,16 @@
#ifndef CLIO_BUILD_INFO_H
#define CLIO_BUILD_INFO_H
#include <string>
namespace Build {
std::string const&
getClioVersionString();
std::string const&
getClioFullVersionString();
} // namespace Build
#endif // CLIO_BUILD_INFO_H

59
src/main/impl/Build.cpp Normal file
View File

@@ -0,0 +1,59 @@
#include <ripple/beast/core/SemanticVersion.h>
#include <boost/preprocessor/stringize.hpp>
#include <algorithm>
#include <main/Build.h>
#include <optional>
#include <stdexcept>
namespace Build {
//--------------------------------------------------------------------------
// The build version number. You must edit this for each release
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "1.0.0"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)
"+"
#ifdef CLIO_GIT_COMMIT_HASH
CLIO_GIT_COMMIT_HASH
"."
#endif
#ifdef DEBUG
"DEBUG"
#ifdef SANITIZER
"."
#endif
#endif
#ifdef SANITIZER
BOOST_PP_STRINGIZE(SANITIZER)
#endif
#endif
//--------------------------------------------------------------------------
;
std::string const&
getClioVersionString()
{
static std::string const value = [] {
std::string const s = versionString;
beast::SemanticVersion v;
if (!v.parse(s) || v.print() != s)
throw std::runtime_error(s + ": Bad server version string");
return s;
}();
return value;
}
std::string const&
getClioFullVersionString()
{
static std::string const value = "clio-" + getClioVersionString();
return value;
}
} // namespace Build

View File

@@ -28,6 +28,7 @@
#include <fstream>
#include <functional>
#include <iostream>
#include <main/Build.h>
#include <memory>
#include <sstream>
#include <string>
@@ -103,45 +104,86 @@ parse_certs(boost::json::object const& config)
void
initLogging(boost::json::object const& config)
{
namespace src = boost::log::sources;
namespace keywords = boost::log::keywords;
namespace sinks = boost::log::sinks;
namespace trivial = boost::log::trivial;
boost::log::add_common_attributes();
std::string format = "[%TimeStamp%] [%ThreadID%] [%Severity%] %Message%";
boost::log::add_console_log(
std::cout, boost::log::keywords::format = format);
if (config.contains("log_file"))
if (!config.contains("log_to_console") ||
config.at("log_to_console").as_bool())
{
boost::log::add_file_log(
config.at("log_file").as_string().c_str(),
boost::log::keywords::format = format,
boost::log::keywords::open_mode = std::ios_base::app);
boost::log::add_console_log(std::cout, keywords::format = format);
}
if (config.contains("log_to_file") && config.at("log_to_file").as_bool() &&
config.contains("log_directory"))
{
if (!config.at("log_directory").is_string())
throw std::runtime_error("log directory must be a string");
boost::filesystem::path dirPath{
config.at("log_directory").as_string().c_str()};
if (!boost::filesystem::exists(dirPath))
boost::filesystem::create_directories(dirPath);
const int64_t rotationSize = config.contains("log_rotation_size")
? config.at("log_rotation_size").as_int64() * 1024 * 1024u
: 2 * 1024 * 1024 * 1024u;
if (rotationSize <= 0)
throw std::runtime_error(
"log rotation size must be greater than 0");
const int64_t rotationPeriod =
config.contains("log_rotation_hour_interval")
? config.at("log_rotation_hour_interval").as_int64()
: 12u;
if (rotationPeriod <= 0)
throw std::runtime_error(
"log rotation time interval must be greater than 0");
const int64_t dirSize = config.contains("log_directory_max_size")
? config.at("log_directory_max_size").as_int64() * 1024 * 1024u
: 50 * 1024 * 1024 * 1024u;
if (dirSize <= 0)
throw std::runtime_error(
"log rotation directory max size must be greater than 0");
auto fileSink = boost::log::add_file_log(
keywords::file_name = dirPath / "clio.log",
keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log",
keywords::auto_flush = true,
keywords::format = format,
keywords::open_mode = std::ios_base::app,
keywords::rotation_size = rotationSize,
keywords::time_based_rotation =
sinks::file::rotation_at_time_interval(
boost::posix_time::hours(rotationPeriod)));
fileSink->locked_backend()->set_file_collector(
sinks::file::make_collector(
keywords::target = dirPath, keywords::max_size = dirSize));
fileSink->locked_backend()->scan_for_files();
}
auto const logLevel = config.contains("log_level")
? config.at("log_level").as_string()
: "info";
if (boost::iequals(logLevel, "trace"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::trace);
trivial::severity >= trivial::trace);
else if (boost::iequals(logLevel, "debug"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::debug);
trivial::severity >= trivial::debug);
else if (boost::iequals(logLevel, "info"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::info);
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
trivial::severity >= trivial::warning);
else if (boost::iequals(logLevel, "error"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::error);
trivial::severity >= trivial::error);
else if (boost::iequals(logLevel, "fatal"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::fatal);
trivial::severity >= trivial::fatal);
else
{
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
<< ". Setting log level to info";
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::info);
boost::log::core::get()->set_filter(trivial::severity >= trivial::info);
}
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
}
@@ -170,6 +212,12 @@ main(int argc, char* argv[])
return EXIT_FAILURE;
}
if (std::string{argv[1]} == "-v" || std::string{argv[1]} == "--version")
{
std::cout << Build::getClioFullVersionString() << std::endl;
return EXIT_SUCCESS;
}
auto const config = parse_config(argv[1]);
if (!config)
{
@@ -179,21 +227,25 @@ main(int argc, char* argv[])
initLogging(*config);
// Announce Clio version
BOOST_LOG_TRIVIAL(info)
<< "Clio version: " << Build::getClioFullVersionString();
auto ctx = parse_certs(*config);
auto ctxRef = ctx
? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()}
: std::nullopt;
auto const threads = config->contains("workers")
? config->at("workers").as_int64()
: std::thread::hardware_concurrency();
auto const threads = config->contains("io_threads")
? config->at("io_threads").as_int64()
: 2;
if (threads <= 0)
{
BOOST_LOG_TRIVIAL(fatal) << "Workers is less than 0";
BOOST_LOG_TRIVIAL(fatal) << "io_threads is less than 0";
return EXIT_FAILURE;
}
BOOST_LOG_TRIVIAL(info) << "Number of workers = " << threads;
BOOST_LOG_TRIVIAL(info) << "Number of io threads = " << threads;
// io context to handle all incoming requests, as well as other things
// This is not the only io context in the application

View File

@@ -6,6 +6,7 @@
#include <cstdint>
#include <shared_mutex>
#include <string>
#include <unordered_map>
namespace RPC {

View File

@@ -21,6 +21,9 @@ doAccountCurrencies(Context const& context);
Result
doAccountLines(Context const& context);
Result
doAccountNFTs(Context const& context);
Result
doAccountObjects(Context const& context);
@@ -45,6 +48,13 @@ doChannelVerify(Context const& context);
Result
doBookOffers(Context const& context);
// NFT methods
Result
doNFTBuyOffers(Context const& context);
Result
doNFTSellOffers(Context const& context);
// ledger methods
Result
doLedger(Context const& context);

View File

@@ -1,6 +1,7 @@
#include <boost/asio/spawn.hpp>
#include <etl/ETLSource.h>
#include <rpc/Handlers.h>
#include <rpc/RPCHelpers.h>
#include <unordered_map>
namespace RPC {
@@ -106,6 +107,14 @@ make_error(Error err)
boost::json::object
make_error(Status const& status)
{
if (status.error == ripple::rpcUNKNOWN)
{
return {
{"error", status.message},
{"type", "response"},
{"status", "error"}};
}
boost::json::object json;
ripple::RPC::ErrorInfo const& info(
ripple::RPC::get_error_info(status.error));
@@ -118,31 +127,79 @@ make_error(Status const& status)
json["type"] = "response";
return json;
}
static std::unordered_map<std::string, std::function<Result(Context const&)>>
handlerTable{
{"account_channels", &doAccountChannels},
{"account_currencies", &doAccountCurrencies},
{"account_info", &doAccountInfo},
{"account_lines", &doAccountLines},
{"account_objects", &doAccountObjects},
{"account_offers", &doAccountOffers},
{"account_tx", &doAccountTx},
{"gateway_balances", &doGatewayBalances},
{"noripple_check", &doNoRippleCheck},
{"book_offers", &doBookOffers},
{"channel_authorize", &doChannelAuthorize},
{"channel_verify", &doChannelVerify},
{"ledger", &doLedger},
{"ledger_data", &doLedgerData},
{"ledger_entry", &doLedgerEntry},
{"ledger_range", &doLedgerRange},
{"ledger_data", &doLedgerData},
{"subscribe", &doSubscribe},
{"server_info", &doServerInfo},
{"unsubscribe", &doUnsubscribe},
{"tx", &doTx},
{"transaction_entry", &doTransactionEntry},
{"random", &doRandom}};
using LimitRange = std::tuple<std::uint32_t, std::uint32_t, std::uint32_t>;
using HandlerFunction = std::function<Result(Context const&)>;
struct Handler
{
std::string method;
std::function<Result(Context const&)> handler;
std::optional<LimitRange> limit;
};
class HandlerTable
{
std::unordered_map<std::string, Handler> handlerMap_;
public:
HandlerTable(std::initializer_list<Handler> handlers)
{
for (auto const& handler : handlers)
{
handlerMap_[handler.method] = std::move(handler);
}
}
bool
contains(std::string const& method)
{
return handlerMap_.contains(method);
}
std::optional<LimitRange>
getLimitRange(std::string const& command)
{
if (!handlerMap_.contains(command))
return {};
return handlerMap_[command].limit;
}
std::optional<HandlerFunction>
getHandler(std::string const& command)
{
if (!handlerMap_.contains(command))
return {};
return handlerMap_[command].handler;
}
};
static HandlerTable handlerTable{
{"account_channels", &doAccountChannels, LimitRange{10, 50, 256}},
{"account_currencies", &doAccountCurrencies, {}},
{"account_info", &doAccountInfo, {}},
{"account_lines", &doAccountLines, LimitRange{10, 50, 256}},
{"account_nfts", &doAccountNFTs, LimitRange{1, 5, 10}},
{"account_objects", &doAccountObjects, LimitRange{10, 50, 256}},
{"account_offers", &doAccountOffers, LimitRange{10, 50, 256}},
{"account_tx", &doAccountTx, LimitRange{1, 50, 100}},
{"gateway_balances", &doGatewayBalances, {}},
{"noripple_check", &doNoRippleCheck, {}},
{"book_offers", &doBookOffers, LimitRange{1, 50, 100}},
{"ledger", &doLedger, {}},
{"ledger_data", &doLedgerData, LimitRange{1, 100, 2048}},
{"nft_buy_offers", &doNFTBuyOffers, LimitRange{1, 50, 100}},
{"nft_sell_offers", &doNFTSellOffers, LimitRange{1, 50, 100}},
{"ledger_entry", &doLedgerEntry, {}},
{"ledger_range", &doLedgerRange, {}},
{"subscribe", &doSubscribe, {}},
{"server_info", &doServerInfo, {}},
{"unsubscribe", &doUnsubscribe, {}},
{"tx", &doTx, {}},
{"transaction_entry", &doTransactionEntry, {}},
{"random", &doRandom, {}}};
static std::unordered_set<std::string> forwardCommands{
"submit",
@@ -151,7 +208,9 @@ static std::unordered_set<std::string> forwardCommands{
"ledger_closed",
"ledger_current",
"ripple_path_find",
"manifest"};
"manifest",
"channel_authorize",
"channel_verify"};
bool
validHandler(std::string const& method)
@@ -159,6 +218,36 @@ validHandler(std::string const& method)
return handlerTable.contains(method) || forwardCommands.contains(method);
}
Status
getLimit(RPC::Context const& context, std::uint32_t& limit)
{
if (!handlerTable.getHandler(context.method))
return Status{Error::rpcUNKNOWN_COMMAND};
if (!handlerTable.getLimitRange(context.method))
return Status{Error::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"};
auto [lo, def, hi] = *handlerTable.getLimitRange(context.method);
if (context.params.contains(JS(limit)))
{
if (!context.params.at(JS(limit)).is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = context.params.at(JS(limit)).as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
limit = std::clamp(limit, lo, hi);
}
else
{
limit = def;
}
return {};
}
bool
shouldForwardToRippled(Context const& ctx)
{
@@ -167,15 +256,8 @@ shouldForwardToRippled(Context const& ctx)
if (forwardCommands.find(ctx.method) != forwardCommands.end())
return true;
if (request.contains("ledger_index"))
{
auto indexValue = request.at("ledger_index");
if (indexValue.is_string())
{
std::string index = indexValue.as_string().c_str();
return index == "current" || index == "closed";
}
}
if (specifiesCurrentOrClosedLedger(request))
return true;
if (ctx.method == "account_info" && request.contains("queue") &&
request.at("queue").as_bool())
@@ -209,14 +291,14 @@ buildResponse(Context const& ctx)
if (ctx.method == "ping")
return boost::json::object{};
if (handlerTable.find(ctx.method) == handlerTable.end())
return Status{Error::rpcUNKNOWN_COMMAND};
auto method = handlerTable.getHandler(ctx.method);
auto method = handlerTable[ctx.method];
if (!method)
return Status{Error::rpcUNKNOWN_COMMAND};
try
{
auto v = method(ctx);
auto v = (*method)(ctx);
if (auto object = std::get_if<boost::json::object>(&v))
(*object)["validated"] = true;
@@ -235,7 +317,7 @@ buildResponse(Context const& ctx)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " caught exception : " << err.what();
return Status{Error::rpcINTERNAL, err.what()};
return Status{Error::rpcINTERNAL};
}
}

View File

@@ -103,10 +103,19 @@ struct Status
Status(Error error_) : error(error_){};
// HACK. Some rippled handlers explicitly specify errors.
// This means that we have to be able to duplicate this
// functionality.
Status(std::string const& message_)
: error(ripple::rpcUNKNOWN), message(message_)
{
}
Status(Error error_, std::string message_)
: error(error_), message(message_)
{
}
Status(Error error_, std::string strCode_, std::string message_)
: error(error_), strCode(strCode_), message(message_)
{
@@ -190,6 +199,9 @@ buildResponse(Context const& ctx);
bool
validHandler(std::string const& method);
Status
getLimit(RPC::Context const& context, std::uint32_t& limit);
template <class T>
void
logDuration(Context const& ctx, T const& dur)
@@ -205,7 +217,7 @@ logDuration(Context const& ctx, T const& dur)
else if (seconds > 1)
BOOST_LOG_TRIVIAL(warning) << ss.str();
else
BOOST_LOG_TRIVIAL(debug) << ss.str();
BOOST_LOG_TRIVIAL(info) << ss.str();
}
} // namespace RPC

View File

@@ -13,6 +13,7 @@ getBool(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Invalid field " + field + ", not bool.");
}
bool
getBool(
boost::json::object const& request,
@@ -24,6 +25,7 @@ getBool(
else
return dfault;
}
bool
getRequiredBool(boost::json::object const& request, std::string const& field)
{
@@ -152,6 +154,7 @@ getString(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Invalid field " + field + ", not string.");
}
std::string
getRequiredString(boost::json::object const& request, std::string const& field)
{
@@ -160,6 +163,7 @@ getRequiredString(boost::json::object const& request, std::string const& field)
else
throw InvalidParamsError("Missing field " + field);
}
std::string
getString(
boost::json::object const& request,
@@ -172,6 +176,122 @@ getString(
return dfault;
}
Status
getHexMarker(boost::json::object const& request, ripple::uint256& marker)
{
if (request.contains(JS(marker)))
{
if (!request.at(JS(marker)).is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
if (!marker.parseHex(request.at(JS(marker)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedMarker"};
}
return {};
}
Status
getAccount(
boost::json::object const& request,
ripple::AccountID& account,
boost::string_view const& field,
bool required)
{
if (!request.contains(field))
{
if (required)
return Status{
Error::rpcINVALID_PARAMS, field.to_string() + "Missing"};
return {};
}
if (!request.at(field).is_string())
return Status{
Error::rpcINVALID_PARAMS, field.to_string() + "NotString"};
if (auto a = accountFromStringStrict(request.at(field).as_string().c_str());
a)
{
account = a.value();
return {};
}
return Status{Error::rpcINVALID_PARAMS, field.to_string() + "Malformed"};
}
Status
getOptionalAccount(
boost::json::object const& request,
std::optional<ripple::AccountID>& account,
boost::string_view const& field)
{
if (!request.contains(field))
{
account = {};
return {};
}
if (!request.at(field).is_string())
return Status{
Error::rpcINVALID_PARAMS, field.to_string() + "NotString"};
if (auto a = accountFromStringStrict(request.at(field).as_string().c_str());
a)
{
account = a.value();
return {};
}
return Status{Error::rpcINVALID_PARAMS, field.to_string() + "Malformed"};
}
Status
getAccount(boost::json::object const& request, ripple::AccountID& accountId)
{
return getAccount(request, accountId, JS(account), true);
}
Status
getAccount(
boost::json::object const& request,
ripple::AccountID& destAccount,
boost::string_view const& field)
{
return getAccount(request, destAccount, field, false);
}
Status
getTaker(boost::json::object const& request, ripple::AccountID& takerID)
{
if (request.contains(JS(taker)))
{
auto parsed = parseTaker(request.at(JS(taker)));
if (auto status = std::get_if<Status>(&parsed))
return *status;
else
takerID = std::get<ripple::AccountID>(parsed);
}
return {};
}
Status
getChannelId(boost::json::object const& request, ripple::uint256& channelId)
{
if (!request.contains(JS(channel_id)))
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
if (!request.at(JS(channel_id)).is_string())
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
if (!channelId.parseHex(request.at(JS(channel_id)).as_string().c_str()))
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
return {};
}
std::optional<ripple::STAmount>
getDeliveredAmount(
std::shared_ptr<ripple::STTx const> const& txn,
@@ -448,6 +568,11 @@ ledgerInfoFromRequest(Context const& ctx)
return Status{Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield);
if (!lgrInfo)
return Status{Error::rpcLGR_NOT_FOUND, "ledgerNotFound"};
return *lgrInfo;
}
auto indexValue = ctx.params.contains("ledger_index")
@@ -537,20 +662,47 @@ traverseOwnedNodes(
if (!parsedCursor)
return Status(ripple::rpcINVALID_PARAMS, "Malformed cursor");
auto cursor = AccountCursor({beast::zero, 0});
auto [hexCursor, startHint] = *parsedCursor;
auto const rootIndex = ripple::keylet::ownerDir(accountID);
return traverseOwnedNodes(
backend,
ripple::keylet::ownerDir(accountID),
hexCursor,
startHint,
sequence,
limit,
jsonCursor,
yield,
atOwnedNode);
}
std::variant<Status, AccountCursor>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::Keylet const& owner,
ripple::uint256 const& hexMarker,
std::uint32_t const startHint,
std::uint32_t sequence,
std::uint32_t limit,
std::optional<std::string> jsonCursor,
boost::asio::yield_context& yield,
std::function<void(ripple::SLE)> atOwnedNode)
{
auto cursor = AccountCursor({beast::zero, 0});
auto const rootIndex = owner;
auto currentIndex = rootIndex;
std::vector<ripple::uint256> keys;
keys.reserve(limit);
// Only reserve 2048 nodes when fetching all owned ledger objects. If there
// are more, then keys will allocate more memory, which is suboptimal, but
// should only occur occasionally.
keys.reserve(std::min(std::uint32_t{2048}, limit));
auto start = std::chrono::system_clock::now();
// If startAfter is not zero try jumping to that page using the hint
if (hexCursor.isNonZero())
if (hexMarker.isNonZero())
{
auto const hintIndex = ripple::keylet::page(rootIndex, startHint);
auto hintDir =
@@ -563,7 +715,7 @@ traverseOwnedNodes(
for (auto const& key : sle.getFieldV256(ripple::sfIndexes))
{
if (key == hexCursor)
if (key == hexMarker)
{
// We found the hint, we can start here
currentIndex = hintIndex;
@@ -589,7 +741,7 @@ traverseOwnedNodes(
{
if (!found)
{
if (key == hexCursor)
if (key == hexMarker)
found = true;
}
else
@@ -625,7 +777,7 @@ traverseOwnedNodes(
backend.fetchLedgerObject(currentIndex.key, sequence, yield);
if (!ownerDir)
return Status(ripple::rpcACT_NOT_FOUND);
break;
ripple::SerialIter it{ownerDir->data(), ownerDir->size()};
ripple::SLE sle{it, currentIndex.key};
@@ -678,6 +830,23 @@ traverseOwnedNodes(
return AccountCursor({beast::zero, 0});
}
std::shared_ptr<ripple::SLE const>
read(
ripple::Keylet const& keylet,
ripple::LedgerInfo const& lgrInfo,
Context const& context)
{
if (auto const blob = context.backend->fetchLedgerObject(
keylet.key, lgrInfo.seq, context.yield);
blob)
{
return std::make_shared<ripple::SLE const>(
ripple::SerialIter{blob->data(), blob->size()}, keylet.key);
}
return nullptr;
}
std::optional<ripple::Seed>
parseRippleLibSeed(boost::json::value const& value)
{
@@ -1280,6 +1449,7 @@ parseBook(boost::json::object const& request)
return ripple::Book{{pay_currency, pay_issuer}, {get_currency, get_issuer}};
}
std::variant<Status, ripple::AccountID>
parseTaker(boost::json::value const& taker)
{
@@ -1293,5 +1463,19 @@ parseTaker(boost::json::value const& taker)
return Status{Error::rpcINVALID_PARAMS, "invalidTakerAccount"};
return *takerID;
}
bool
specifiesCurrentOrClosedLedger(boost::json::object const& request)
{
if (request.contains("ledger_index"))
{
auto indexValue = request.at("ledger_index");
if (indexValue.is_string())
{
std::string index = indexValue.as_string().c_str();
return index == "current" || index == "closed";
}
}
return false;
}
} // namespace RPC

View File

@@ -14,6 +14,13 @@
#include <backend/BackendInterface.h>
#include <rpc/RPC.h>
// Useful macro for borrowing from ripple::jss
// static strings. (J)son (S)trings
#define JS(x) ripple::jss::x.c_str()
// Access (SF)ield name (S)trings
#define SFS(x) ripple::x.jsonName.c_str()
namespace RPC {
std::optional<ripple::AccountID>
accountFromStringStrict(std::string const& account);
@@ -93,6 +100,24 @@ traverseOwnedNodes(
boost::asio::yield_context& yield,
std::function<void(ripple::SLE)> atOwnedNode);
std::variant<Status, AccountCursor>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::Keylet const& owner,
ripple::uint256 const& hexMarker,
std::uint32_t const startHint,
std::uint32_t sequence,
std::uint32_t limit,
std::optional<std::string> jsonCursor,
boost::asio::yield_context& yield,
std::function<void(ripple::SLE)> atOwnedNode);
std::shared_ptr<ripple::SLE const>
read(
ripple::Keylet const& keylet,
ripple::LedgerInfo const& lgrInfo,
Context const& context);
std::variant<Status, std::pair<ripple::PublicKey, ripple::SecretKey>>
keypairFromRequst(boost::json::object const& request);
@@ -200,5 +225,33 @@ getString(
boost::json::object const& request,
std::string const& field,
std::string dfault);
Status
getHexMarker(boost::json::object const& request, ripple::uint256& marker);
Status
getAccount(boost::json::object const& request, ripple::AccountID& accountId);
Status
getAccount(
boost::json::object const& request,
ripple::AccountID& destAccount,
boost::string_view const& field);
Status
getOptionalAccount(
boost::json::object const& request,
std::optional<ripple::AccountID>& account,
boost::string_view const& field);
Status
getTaker(boost::json::object const& request, ripple::AccountID& takerID);
Status
getChannelId(boost::json::object const& request, ripple::uint256& channelId);
bool
specifiesCurrentOrClosedLedger(boost::json::object const& request);
} // namespace RPC
#endif

11
src/rpc/WorkQueue.cpp Normal file
View File

@@ -0,0 +1,11 @@
#include <rpc/WorkQueue.h>
WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize)
{
if (maxSize != 0)
maxSize_ = maxSize;
while (--numWorkers)
{
threads_.emplace_back([this] { ioc_.run(); });
}
}

82
src/rpc/WorkQueue.h Normal file
View File

@@ -0,0 +1,82 @@
#ifndef CLIO_WORK_QUEUE_H
#define CLIO_WORK_QUEUE_H
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/log/trivial.hpp>
#include <memory>
#include <optional>
#include <queue>
#include <shared_mutex>
#include <thread>
class WorkQueue
{
// these are cumulative for the lifetime of the process
std::atomic_uint64_t queued_ = 0;
std::atomic_uint64_t durationUs_ = 0;
std::atomic_uint64_t curSize_ = 0;
uint32_t maxSize_ = std::numeric_limits<uint32_t>::max();
public:
WorkQueue(std::uint32_t numWorkers, uint32_t maxSize = 0);
template <typename F>
bool
postCoro(F&& f, bool isWhiteListed)
{
if (curSize_ >= maxSize_ && !isWhiteListed)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__
<< " queue is full. rejecting job. current size = " << curSize_
<< " max size = " << maxSize_;
return false;
}
++curSize_;
auto start = std::chrono::system_clock::now();
// Each time we enqueue a job, we want to post a symmetrical job that
// will dequeue and run the job at the front of the job queue.
boost::asio::spawn(
ioc_,
[this, f = std::move(f), start](boost::asio::yield_context yield) {
auto run = std::chrono::system_clock::now();
auto wait =
std::chrono::duration_cast<std::chrono::microseconds>(
run - start)
.count();
// increment queued_ here, in the same place we implement
// durationUs_
++queued_;
durationUs_ += wait;
BOOST_LOG_TRIVIAL(debug) << "WorkQueue wait time = " << wait
<< " queue size = " << curSize_;
f(yield);
--curSize_;
});
return true;
}
// TODO: this is not actually being called. Wait for application refactor
boost::json::object
report()
{
boost::json::object obj;
obj["queued"] = queued_;
obj["queued_duration_us"] = durationUs_;
obj["current_queue_size"] = curSize_;
obj["max_queue_size"] = maxSize_;
return obj;
}
private:
std::vector<std::thread> threads_ = {};
boost::asio::io_context ioc_ = {};
std::optional<boost::asio::io_context::work> work_{ioc_};
};
#endif // CLIO_WORK_QUEUE_H

View File

@@ -17,27 +17,27 @@ void
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
{
boost::json::object jDst;
jDst["channel_id"] = ripple::to_string(line.key());
jDst["account"] = ripple::to_string(line.getAccountID(ripple::sfAccount));
jDst["destination_account"] =
jDst[JS(channel_id)] = ripple::to_string(line.key());
jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount));
jDst[JS(destination_account)] =
ripple::to_string(line.getAccountID(ripple::sfDestination));
jDst["amount"] = line[ripple::sfAmount].getText();
jDst["balance"] = line[ripple::sfBalance].getText();
jDst[JS(amount)] = line[ripple::sfAmount].getText();
jDst[JS(balance)] = line[ripple::sfBalance].getText();
if (publicKeyType(line[ripple::sfPublicKey]))
{
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
jDst["public_key"] = toBase58(ripple::TokenType::AccountPublic, pk);
jDst["public_key_hex"] = strHex(pk);
jDst[JS(public_key)] = toBase58(ripple::TokenType::AccountPublic, pk);
jDst[JS(public_key_hex)] = strHex(pk);
}
jDst["settle_delay"] = line[ripple::sfSettleDelay];
jDst[JS(settle_delay)] = line[ripple::sfSettleDelay];
if (auto const& v = line[~ripple::sfExpiration])
jDst["expiration"] = *v;
jDst[JS(expiration)] = *v;
if (auto const& v = line[~ripple::sfCancelAfter])
jDst["cancel_after"] = *v;
jDst[JS(cancel_after)] = *v;
if (auto const& v = line[~ripple::sfSourceTag])
jDst["source_tag"] = *v;
jDst[JS(source_tag)] = *v;
if (auto const& v = line[~ripple::sfDestinationTag])
jDst["destination_tag"] = *v;
jDst[JS(destination_tag)] = *v;
jsonLines.push_back(jDst);
}
@@ -54,66 +54,45 @@ doAccountChannels(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!rawAcct)
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
ripple::AccountID destAccount;
if (auto const status =
getAccount(request, destAccount, JS(destination_account));
status)
return status;
std::optional<ripple::AccountID> destAccount = {};
if (request.contains("destination_account"))
{
if (!request.at("destination_account").is_string())
return Status{Error::rpcINVALID_PARAMS, "destinationNotString"};
destAccount = accountFromStringStrict(
request.at("destination_account").as_string().c_str());
if (!destAccount)
return Status{Error::rpcINVALID_PARAMS, "destinationMalformed"};
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
}
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains("marker"))
if (request.contains(JS(marker)))
{
if (!request.at("marker").is_string())
if (!request.at(JS(marker)).is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
marker = request.at("marker").as_string().c_str();
marker = request.at(JS(marker)).as_string().c_str();
}
response["account"] = ripple::to_string(*accountID);
response["channels"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonChannels = response.at("channels").as_array();
response[JS(account)] = ripple::to_string(accountID);
response[JS(channels)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonChannels = response.at(JS(channels)).as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltPAYCHAN &&
sle.getAccountID(ripple::sfAccount) == *accountID &&
sle.getAccountID(ripple::sfAccount) == accountID &&
(!destAccount ||
*destAccount == sle.getAccountID(ripple::sfDestination)))
destAccount == sle.getAccountID(ripple::sfDestination)))
{
if (limit-- == 0)
{
return false;
}
addChannel(jsonChannels, sle);
}
@@ -122,23 +101,23 @@ doAccountChannels(Context const& context)
auto next = traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
limit,
marker,
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextCursor = std::get<RPC::AccountCursor>(next);
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextCursor.isNonZero())
response["marker"] = nextCursor.toString();
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}

View File

@@ -24,17 +24,15 @@ doAccountCurrencies(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
if (!rawAcct)
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
std::set<std::string> send, receive;
auto const addToResponse = [&](ripple::SLE const& sle) {
@@ -61,26 +59,26 @@ doAccountCurrencies(Context const& context)
traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
std::numeric_limits<std::uint32_t>::max(),
{},
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response["receive_currencies"] =
response[JS(receive_currencies)] =
boost::json::value(boost::json::array_kind);
boost::json::array& jsonReceive =
response.at("receive_currencies").as_array();
response.at(JS(receive_currencies)).as_array();
for (auto const& currency : receive)
jsonReceive.push_back(currency.c_str());
response["send_currencies"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonSend = response.at("send_currencies").as_array();
response[JS(send_currencies)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonSend = response.at(JS(send_currencies)).as_array();
for (auto const& currency : send)
jsonSend.push_back(currency.c_str());

View File

@@ -29,10 +29,10 @@ doAccountInfo(Context const& context)
boost::json::object response = {};
std::string strIdent;
if (request.contains("account"))
strIdent = request.at("account").as_string().c_str();
else if (request.contains("ident"))
strIdent = request.at("ident").as_string().c_str();
if (request.contains(JS(account)))
strIdent = request.at(JS(account)).as_string().c_str();
else if (request.contains(JS(ident)))
strIdent = request.at(JS(ident)).as_string().c_str();
else
return Status{Error::rpcACT_MALFORMED};
@@ -71,18 +71,18 @@ doAccountInfo(Context const& context)
return Status{Error::rpcDB_DESERIALIZATION};
// if (!binary)
// response["account_data"] = getJson(sle);
// response[JS(account_data)] = getJson(sle);
// else
// response["account_data"] = ripple::strHex(*dbResponse);
// response["db_time"] = time;
// response[JS(account_data)] = ripple::strHex(*dbResponse);
// response[JS(db_time)] = time;
response["account_data"] = toJson(sle);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(account_data)] = toJson(sle);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
// Return SignerList(s) if that is requested.
if (request.contains("signer_lists") &&
request.at("signer_lists").as_bool())
if (request.contains(JS(signer_lists)) &&
request.at(JS(signer_lists)).as_bool())
{
// We put the SignerList in an array because of an anticipated
// future when we support multiple signer lists on one account.
@@ -104,7 +104,7 @@ doAccountInfo(Context const& context)
signerList.push_back(toJson(sleSigners));
}
response["account_data"].as_object()["signer_lists"] =
response[JS(account_data)].as_object()[JS(signer_lists)] =
std::move(signerList);
}

View File

@@ -39,7 +39,7 @@ addLine(
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
if (peerAccount and peerAccount != lineAccountIDPeer)
if (peerAccount && peerAccount != lineAccountIDPeer)
return;
if (!viewLowest)
@@ -64,25 +64,25 @@ addLine(
ripple::STAmount const& saLimitPeer(lineLimitPeer);
boost::json::object jPeer;
jPeer["account"] = ripple::to_string(lineAccountIDPeer);
jPeer["balance"] = saBalance.getText();
jPeer["currency"] = ripple::to_string(saBalance.issue().currency);
jPeer["limit"] = saLimit.getText();
jPeer["limit_peer"] = saLimitPeer.getText();
jPeer["quality_in"] = lineQualityIn;
jPeer["quality_out"] = lineQualityOut;
jPeer[JS(account)] = ripple::to_string(lineAccountIDPeer);
jPeer[JS(balance)] = saBalance.getText();
jPeer[JS(currency)] = ripple::to_string(saBalance.issue().currency);
jPeer[JS(limit)] = saLimit.getText();
jPeer[JS(limit_peer)] = saLimitPeer.getText();
jPeer[JS(quality_in)] = lineQualityIn;
jPeer[JS(quality_out)] = lineQualityOut;
if (lineAuth)
jPeer["authorized"] = true;
jPeer[JS(authorized)] = true;
if (lineAuthPeer)
jPeer["peer_authorized"] = true;
jPeer[JS(peer_authorized)] = true;
if (lineNoRipple || !lineDefaultRipple)
jPeer["no_ripple"] = lineNoRipple;
jPeer[JS(no_ripple)] = lineNoRipple;
if (lineNoRipple || !lineDefaultRipple)
jPeer["no_ripple_peer"] = lineNoRipplePeer;
jPeer[JS(no_ripple_peer)] = lineNoRipplePeer;
if (lineFreeze)
jPeer["freeze"] = true;
jPeer[JS(freeze)] = true;
if (lineFreezePeer)
jPeer["freeze_peer"] = true;
jPeer[JS(freeze_peer)] = true;
jsonLines.push_back(jPeer);
}
@@ -99,82 +99,65 @@ doAccountLines(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
if (!rawAcct)
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
std::optional<ripple::AccountID> peerAccount;
if (request.contains("peer"))
if (auto const status = getOptionalAccount(request, peerAccount, JS(peer));
status)
return status;
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::optional<std::string> marker = {};
if (request.contains(JS(marker)))
{
if (!request.at("peer").is_string())
return Status{Error::rpcINVALID_PARAMS, "peerNotString"};
peerAccount =
accountFromStringStrict(request.at("peer").as_string().c_str());
if (!peerAccount)
return Status{Error::rpcINVALID_PARAMS, "peerMalformed"};
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
}
std::optional<std::string> cursor = {};
if (request.contains("marker"))
{
if (!request.at("marker").is_string())
if (!request.at(JS(marker)).is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
cursor = request.at("marker").as_string().c_str();
marker = request.at(JS(marker)).as_string().c_str();
}
response["account"] = ripple::to_string(*accountID);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response["lines"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at("lines").as_array();
response[JS(account)] = ripple::to_string(accountID);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(lines)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at(JS(lines)).as_array();
auto const addToResponse = [&](ripple::SLE const& sle) -> void {
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
addLine(jsonLines, sle, *accountID, peerAccount);
addLine(jsonLines, sle, accountID, peerAccount);
}
};
auto next = traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
limit,
cursor,
marker,
context.yield,
addToResponse);
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextCursor = std::get<RPC::AccountCursor>(next);
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextCursor.isNonZero())
response["marker"] = nextCursor.toString();
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC
} // namespace RPC

View File

@@ -1,10 +1,12 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/app/tx/impl/details/NFTokenUtils.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <ripple/protocol/nftPageMask.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
@@ -23,7 +25,112 @@ std::unordered_map<std::string, ripple::LedgerEntryType> types{
{"escrow", ripple::ltESCROW},
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
{"check", ripple::ltCHECK},
};
{"nft_page", ripple::ltNFTOKEN_PAGE},
{"nft_offer", ripple::ltNFTOKEN_OFFER}};
Result
doAccountNFTs(Context const& context)
{
auto request = context.params;
boost::json::object response = {};
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
if (!rawAcct)
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
ripple::uint256 marker;
if (auto const status = getHexMarker(request, marker); status)
return status;
response[JS(account)] = ripple::toBase58(accountID);
response[JS(validated)] = true;
std::uint32_t numPages = 0;
response[JS(account_nfts)] = boost::json::value(boost::json::array_kind);
auto& nfts = response.at(JS(account_nfts)).as_array();
// if a marker was passed, start at the page specified in marker. Else,
// start at the max page
auto const pageKey =
marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker;
auto const blob =
context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield);
if (!blob)
return response;
std::optional<ripple::SLE const> page{
ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}};
// Continue iteration from the current page
while (page)
{
auto arr = page->getFieldArray(ripple::sfNFTokens);
for (auto const& o : arr)
{
ripple::uint256 const nftokenID = o[ripple::sfNFTokenID];
{
nfts.push_back(
toBoostJson(o.getJson(ripple::JsonOptions::none)));
auto& obj = nfts.back().as_object();
// Pull out the components of the nft ID.
obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID);
obj[SFS(sfIssuer)] =
to_string(ripple::nft::getIssuer(nftokenID));
obj[SFS(sfNFTokenTaxon)] =
ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID));
obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID);
if (std::uint16_t xferFee = {
ripple::nft::getTransferFee(nftokenID)})
obj[SFS(sfTransferFee)] = xferFee;
}
}
++numPages;
if (auto npm = (*page)[~ripple::sfPreviousPageMin])
{
auto const nextKey = ripple::Keylet(ripple::ltNFTOKEN_PAGE, *npm);
if (numPages == limit)
{
response[JS(marker)] = to_string(nextKey.key);
response[JS(limit)] = numPages;
return response;
}
auto const nextBlob = context.backend->fetchLedgerObject(
nextKey.key, lgrInfo.seq, context.yield);
page.emplace(ripple::SLE{
ripple::SerialIter{nextBlob->data(), nextBlob->size()},
nextKey.key});
}
else
page.reset();
}
return response;
}
Result
doAccountObjects(Context const& context)
@@ -37,54 +144,40 @@ doAccountObjects(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
}
std::optional<std::string> cursor = {};
std::optional<std::string> marker = {};
if (request.contains("marker"))
{
if (!request.at("marker").is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
cursor = request.at("marker").as_string().c_str();
marker = request.at("marker").as_string().c_str();
}
std::optional<ripple::LedgerEntryType> objectType = {};
if (request.contains("type"))
if (request.contains(JS(type)))
{
if (!request.at("type").is_string())
if (!request.at(JS(type)).is_string())
return Status{Error::rpcINVALID_PARAMS, "typeNotString"};
std::string typeAsString = request.at("type").as_string().c_str();
std::string typeAsString = request.at(JS(type)).as_string().c_str();
if (types.find(typeAsString) == types.end())
return Status{Error::rpcINVALID_PARAMS, "typeInvalid"};
objectType = types[typeAsString];
}
response["account"] = ripple::to_string(*accountID);
response["account_objects"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonObjects = response.at("account_objects").as_array();
response[JS(account)] = ripple::to_string(accountID);
response[JS(account_objects)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonObjects =
response.at(JS(account_objects)).as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (!objectType || objectType == sle.getType())
@@ -95,23 +188,23 @@ doAccountObjects(Context const& context)
auto next = traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
limit,
cursor,
marker,
context.yield,
addToResponse);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextCursor = std::get<RPC::AccountCursor>(next);
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextCursor.isNonZero())
response["marker"] = nextCursor.toString();
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}

View File

@@ -27,37 +27,39 @@ addOffer(boost::json::array& offersJson, ripple::SLE const& offer)
if (!takerPays.native())
{
obj["taker_pays"] = boost::json::value(boost::json::object_kind);
boost::json::object& takerPaysJson = obj.at("taker_pays").as_object();
obj[JS(taker_pays)] = boost::json::value(boost::json::object_kind);
boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object();
takerPaysJson["value"] = takerPays.getText();
takerPaysJson["currency"] = ripple::to_string(takerPays.getCurrency());
takerPaysJson["issuer"] = ripple::to_string(takerPays.getIssuer());
takerPaysJson[JS(value)] = takerPays.getText();
takerPaysJson[JS(currency)] =
ripple::to_string(takerPays.getCurrency());
takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer());
}
else
{
obj["taker_pays"] = takerPays.getText();
obj[JS(taker_pays)] = takerPays.getText();
}
if (!takerGets.native())
{
obj["taker_gets"] = boost::json::value(boost::json::object_kind);
boost::json::object& takerGetsJson = obj.at("taker_gets").as_object();
obj[JS(taker_gets)] = boost::json::value(boost::json::object_kind);
boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object();
takerGetsJson["value"] = takerGets.getText();
takerGetsJson["currency"] = ripple::to_string(takerGets.getCurrency());
takerGetsJson["issuer"] = ripple::to_string(takerGets.getIssuer());
takerGetsJson[JS(value)] = takerGets.getText();
takerGetsJson[JS(currency)] =
ripple::to_string(takerGets.getCurrency());
takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer());
}
else
{
obj["taker_gets"] = takerGets.getText();
obj[JS(taker_gets)] = takerGets.getText();
}
obj["seq"] = offer.getFieldU32(ripple::sfSequence);
obj["flags"] = offer.getFieldU32(ripple::sfFlags);
obj["quality"] = rate.getText();
obj[JS(seq)] = offer.getFieldU32(ripple::sfSequence);
obj[JS(flags)] = offer.getFieldU32(ripple::sfFlags);
obj[JS(quality)] = rate.getText();
if (offer.isFieldPresent(ripple::sfExpiration))
obj["expiration"] = offer.getFieldU32(ripple::sfExpiration);
obj[JS(expiration)] = offer.getFieldU32(ripple::sfExpiration);
offersJson.push_back(obj);
};
@@ -74,52 +76,38 @@ doAccountOffers(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto rawAcct = context.backend->fetchLedgerObject(
ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield);
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!rawAcct)
return Status{Error::rpcACT_NOT_FOUND, "accountNotFound"};
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::uint32_t limit = 200;
if (request.contains("limit"))
std::optional<std::string> marker = {};
if (request.contains(JS(marker)))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
}
std::optional<std::string> cursor = {};
if (request.contains("marker"))
{
if (!request.at("marker").is_string())
if (!request.at(JS(marker)).is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
cursor = request.at("marker").as_string().c_str();
marker = request.at(JS(marker)).as_string().c_str();
}
response["account"] = ripple::to_string(*accountID);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response["offers"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at("offers").as_array();
response[JS(account)] = ripple::to_string(accountID);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(offers)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at(JS(offers)).as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltOFFER)
{
if (limit-- == 0)
{
return false;
}
addOffer(jsonLines, sle);
}
@@ -128,22 +116,22 @@ doAccountOffers(Context const& context)
auto next = traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
limit,
cursor,
marker,
context.yield,
addToResponse);
if (auto status = std::get_if<RPC::Status>(&next))
return *status;
auto nextCursor = std::get<RPC::AccountCursor>(next);
auto nextMarker = std::get<RPC::AccountCursor>(next);
if (nextCursor.isNonZero())
response["marker"] = nextCursor.toString();
if (nextMarker.isNonZero())
response[JS(marker)] = nextMarker.toString();
return response;
}
} // namespace RPC
} // namespace RPC

View File

@@ -12,60 +12,38 @@ doAccountTx(Context const& context)
auto request = context.params;
boost::json::object response = {};
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
bool binary = false;
if (request.contains("binary"))
{
if (!request.at("binary").is_bool())
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
binary = request.at("binary").as_bool();
}
bool forward = false;
if (request.contains("forward"))
{
if (!request.at("forward").is_bool())
return Status{Error::rpcINVALID_PARAMS, "forwardNotBool"};
forward = request.at("forward").as_bool();
}
bool const binary = getBool(request, JS(binary), false);
bool const forward = getBool(request, JS(forward), false);
std::optional<Backend::AccountTransactionsCursor> cursor;
if (request.contains("marker"))
if (request.contains(JS(marker)))
{
auto const& obj = request.at("marker").as_object();
auto const& obj = request.at(JS(marker)).as_object();
std::optional<std::uint32_t> transactionIndex = {};
if (obj.contains("seq"))
if (obj.contains(JS(seq)))
{
if (!obj.at("seq").is_int64())
if (!obj.at(JS(seq)).is_int64())
return Status{
Error::rpcINVALID_PARAMS, "transactionIndexNotInt"};
transactionIndex =
boost::json::value_to<std::uint32_t>(obj.at("seq"));
boost::json::value_to<std::uint32_t>(obj.at(JS(seq)));
}
std::optional<std::uint32_t> ledgerIndex = {};
if (obj.contains("ledger"))
if (obj.contains(JS(ledger)))
{
if (!obj.at("ledger").is_int64())
if (!obj.at(JS(ledger)).is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotInt"};
ledgerIndex =
boost::json::value_to<std::uint32_t>(obj.at("ledger"));
boost::json::value_to<std::uint32_t>(obj.at(JS(ledger)));
}
if (!transactionIndex || !ledgerIndex)
@@ -75,9 +53,9 @@ doAccountTx(Context const& context)
}
auto minIndex = context.range.minSequence;
if (request.contains("ledger_index_min"))
if (request.contains(JS(ledger_index_min)))
{
auto& min = request.at("ledger_index_min");
auto& min = request.at(JS(ledger_index_min));
if (!min.is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"};
@@ -87,7 +65,7 @@ doAccountTx(Context const& context)
if (context.range.maxSequence < min.as_int64() ||
context.range.minSequence > min.as_int64())
return Status{
Error::rpcINVALID_PARAMS, "ledgerSeqMaxOutOfRange"};
Error::rpcINVALID_PARAMS, "ledgerSeqMinOutOfRange"};
else
minIndex = value_to<std::uint32_t>(min);
}
@@ -97,9 +75,9 @@ doAccountTx(Context const& context)
}
auto maxIndex = context.range.maxSequence;
if (request.contains("ledger_index_max"))
if (request.contains(JS(ledger_index_max)))
{
auto& max = request.at("ledger_index_max");
auto& max = request.at(JS(ledger_index_max));
if (!max.is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"};
@@ -121,30 +99,18 @@ doAccountTx(Context const& context)
cursor = {maxIndex, INT32_MAX};
}
if (request.contains("ledger_index"))
if (request.contains(JS(ledger_index)) || request.contains(JS(ledger_hash)))
{
if (!request.at("ledger_index").is_int64())
return Status{Error::rpcINVALID_PARAMS, "ledgerIndexNotNumber"};
if (request.contains(JS(ledger_index_max)) ||
request.contains(JS(ledger_index_min)))
return Status{
Error::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"};
auto ledgerIndex =
boost::json::value_to<std::uint32_t>(request.at("ledger_index"));
maxIndex = minIndex = ledgerIndex;
}
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
if (request.contains("ledger_hash"))
{
if (!request.at("ledger_hash").is_string())
return RPC::Status{
RPC::Error::rpcINVALID_PARAMS, "ledgerHashNotString"};
ripple::uint256 ledgerHash;
if (!ledgerHash.parseHex(request.at("ledger_hash").as_string().c_str()))
return RPC::Status{
RPC::Error::rpcINVALID_PARAMS, "ledgerHashMalformed"};
auto lgrInfo =
context.backend->fetchLedgerByHash(ledgerHash, context.yield);
maxIndex = minIndex = lgrInfo->seq;
maxIndex = minIndex = std::get<ripple::LedgerInfo>(v).seq;
}
if (!cursor)
@@ -155,37 +121,31 @@ doAccountTx(Context const& context)
cursor = {maxIndex, INT32_MAX};
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
response["limit"] = limit;
}
if (request.contains(JS(limit)))
response[JS(limit)] = limit;
boost::json::array txns;
auto start = std::chrono::system_clock::now();
auto [blobs, retCursor] = context.backend->fetchAccountTransactions(
*accountID, limit, forward, cursor, context.yield);
accountID, limit, forward, cursor, context.yield);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
<< ((end - start).count() / 1000000000.0)
<< " num blobs = " << blobs.size();
response["account"] = ripple::to_string(*accountID);
response[JS(account)] = ripple::to_string(accountID);
if (retCursor)
{
boost::json::object cursorJson;
cursorJson["ledger"] = retCursor->ledgerSequence;
cursorJson["seq"] = retCursor->transactionIndex;
response["marker"] = cursorJson;
cursorJson[JS(ledger)] = retCursor->ledgerSequence;
cursorJson[JS(seq)] = retCursor->transactionIndex;
response[JS(marker)] = cursorJson;
}
std::optional<size_t> maxReturnedIndex;
@@ -206,18 +166,20 @@ doAccountTx(Context const& context)
if (!binary)
{
auto [txn, meta] = toExpandedJson(txnPlusMeta);
obj["meta"] = meta;
obj["tx"] = txn;
obj["tx"].as_object()["ledger_index"] = txnPlusMeta.ledgerSequence;
obj["tx"].as_object()["date"] = txnPlusMeta.date;
obj[JS(meta)] = meta;
obj[JS(tx)] = txn;
obj[JS(tx)].as_object()[JS(ledger_index)] =
txnPlusMeta.ledgerSequence;
obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date;
}
else
{
obj["meta"] = ripple::strHex(txnPlusMeta.metadata);
obj["tx_blob"] = ripple::strHex(txnPlusMeta.transaction);
obj["ledger_index"] = txnPlusMeta.ledgerSequence;
obj["date"] = txnPlusMeta.date;
obj[JS(meta)] = ripple::strHex(txnPlusMeta.metadata);
obj[JS(tx_blob)] = ripple::strHex(txnPlusMeta.transaction);
obj[JS(ledger_index)] = txnPlusMeta.ledgerSequence;
obj[JS(date)] = txnPlusMeta.date;
}
obj[JS(validated)] = true;
txns.push_back(obj);
if (!minReturnedIndex || txnPlusMeta.ledgerSequence < *minReturnedIndex)
@@ -227,24 +189,18 @@ doAccountTx(Context const& context)
}
assert(cursor);
if (forward)
if (!forward)
{
response["ledger_index_min"] = cursor->ledgerSequence;
if (blobs.size() >= limit)
response["ledger_index_max"] = *maxReturnedIndex;
else
response["ledger_index_max"] = maxIndex;
response[JS(ledger_index_min)] = cursor->ledgerSequence;
response[JS(ledger_index_max)] = maxIndex;
}
else
{
response["ledger_index_max"] = cursor->ledgerSequence;
if (blobs.size() >= limit)
response["ledger_index_min"] = *minReturnedIndex;
else
response["ledger_index_min"] = minIndex;
response[JS(ledger_index_max)] = cursor->ledgerSequence;
response[JS(ledger_index_min)] = minIndex;
}
response["transactions"] = txns;
response[JS(transactions)] = txns;
auto end2 = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "

View File

@@ -48,42 +48,21 @@ doBookOffers(Context const& context)
}
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInt"};
limit = request.at("limit").as_int64();
if (limit <= 0)
return Status{Error::rpcINVALID_PARAMS, "limitNotPositive"};
}
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
ripple::AccountID takerID = beast::zero;
if (request.contains("taker"))
{
auto parsed = parseTaker(request["taker"]);
if (auto status = std::get_if<Status>(&parsed))
return *status;
else
{
takerID = std::get<ripple::AccountID>(parsed);
}
}
if (auto const status = getTaker(request, takerID); status)
return status;
ripple::uint256 cursor = beast::zero;
if (request.contains("cursor"))
{
if (!request.at("cursor").is_string())
return Status{Error::rpcINVALID_PARAMS, "cursorNotString"};
if (!cursor.parseHex(request.at("cursor").as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
}
ripple::uint256 marker = beast::zero;
if (auto const status = getHexMarker(request, marker); status)
return status;
auto start = std::chrono::system_clock::now();
auto [offers, retCursor] = context.backend->fetchBookOffers(
bookBase, lgrInfo.seq, limit, cursor, context.yield);
auto [offers, retMarker] = context.backend->fetchBookOffers(
bookBase, lgrInfo.seq, limit, marker, context.yield);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning)
@@ -92,10 +71,10 @@ doBookOffers(Context const& context)
.count()
<< " milliseconds - request = " << request;
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response["offers"] = postProcessOrderBook(
response[JS(offers)] = postProcessOrderBook(
offers, book, takerID, *context.backend, lgrInfo.seq, context.yield);
auto end2 = std::chrono::system_clock::now();
@@ -106,8 +85,8 @@ doBookOffers(Context const& context)
.count()
<< " milliseconds - request = " << request;
if (retCursor)
response["marker"] = ripple::strHex(*retCursor);
if (retMarker)
response["marker"] = ripple::strHex(*retMarker);
return response;
}

View File

@@ -27,19 +27,13 @@ doChannelAuthorize(Context const& context)
auto request = context.params;
boost::json::object response = {};
if (!request.contains("channel_id"))
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
if (!request.at("channel_id").is_string())
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
if (!request.contains("amount"))
if (!request.contains(JS(amount)))
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
if (!request.at("amount").is_string())
if (!request.at(JS(amount)).is_string())
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
if (!request.contains("key_type") && !request.contains("secret"))
if (!request.contains(JS(key_type)) && !request.contains(JS(secret)))
return Status{Error::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"};
auto v = keypairFromRequst(request);
@@ -50,10 +44,11 @@ doChannelAuthorize(Context const& context)
std::get<std::pair<ripple::PublicKey, ripple::SecretKey>>(v);
ripple::uint256 channelId;
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
if (auto const status = getChannelId(request, channelId); status)
return status;
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
auto optDrops =
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
if (!optDrops)
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
@@ -67,7 +62,7 @@ doChannelAuthorize(Context const& context)
try
{
auto const buf = ripple::sign(pk, sk, msg.slice());
response["signature"] = ripple::strHex(buf);
response[JS(signature)] = ripple::strHex(buf);
}
catch (std::exception&)
{

View File

@@ -16,33 +16,28 @@ doChannelVerify(Context const& context)
auto request = context.params;
boost::json::object response = {};
if (!request.contains("channel_id"))
return Status{Error::rpcINVALID_PARAMS, "missingChannelID"};
if (!request.at("channel_id").is_string())
return Status{Error::rpcINVALID_PARAMS, "channelIDNotString"};
if (!request.contains("amount"))
if (!request.contains(JS(amount)))
return Status{Error::rpcINVALID_PARAMS, "missingAmount"};
if (!request.at("amount").is_string())
if (!request.at(JS(amount)).is_string())
return Status{Error::rpcINVALID_PARAMS, "amountNotString"};
if (!request.contains("signature"))
if (!request.contains(JS(signature)))
return Status{Error::rpcINVALID_PARAMS, "missingSignature"};
if (!request.at("signature").is_string())
if (!request.at(JS(signature)).is_string())
return Status{Error::rpcINVALID_PARAMS, "signatureNotString"};
if (!request.contains("public_key"))
if (!request.contains(JS(public_key)))
return Status{Error::rpcINVALID_PARAMS, "missingPublicKey"};
if (!request.at("public_key").is_string())
if (!request.at(JS(public_key)).is_string())
return Status{Error::rpcINVALID_PARAMS, "publicKeyNotString"};
std::optional<ripple::PublicKey> pk;
{
std::string const strPk = request.at("public_key").as_string().c_str();
std::string const strPk =
request.at(JS(public_key)).as_string().c_str();
pk = ripple::parseBase58<ripple::PublicKey>(
ripple::TokenType::AccountPublic, strPk);
@@ -62,17 +57,18 @@ doChannelVerify(Context const& context)
}
ripple::uint256 channelId;
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
return Status{Error::rpcCHANNEL_MALFORMED, "malformedChannelID"};
if (auto const status = getChannelId(request, channelId); status)
return status;
auto optDrops = ripple::to_uint64(request.at("amount").as_string().c_str());
auto optDrops =
ripple::to_uint64(request.at(JS(amount)).as_string().c_str());
if (!optDrops)
return Status{Error::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"};
std::uint64_t drops = *optDrops;
auto sig = ripple::strUnHex(request.at("signature").as_string().c_str());
auto sig = ripple::strUnHex(request.at(JS(signature)).as_string().c_str());
if (!sig || !sig->size())
return Status{Error::rpcINVALID_PARAMS, "invalidSignature"};
@@ -81,7 +77,7 @@ doChannelVerify(Context const& context)
ripple::serializePayChanAuthorization(
msg, channelId, ripple::XRPAmount(drops));
response["signature_verified"] =
response[JS(signature_verified)] =
ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true);
return response;

View File

@@ -9,17 +9,9 @@ doGatewayBalances(Context const& context)
auto request = context.params;
boost::json::object response = {};
if (!request.contains("account"))
return Status{Error::rpcINVALID_PARAMS, "missingAccount"};
if (!request.at("account").is_string())
return Status{Error::rpcINVALID_PARAMS, "accountNotString"};
auto accountID =
accountFromStringStrict(request.at("account").as_string().c_str());
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
@@ -81,7 +73,7 @@ doGatewayBalances(Context const& context)
if (!valid)
{
response["error"] = "invalidHotWallet";
response[JS(error)] = "invalidHotWallet";
return response;
}
}
@@ -148,7 +140,7 @@ doGatewayBalances(Context const& context)
traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
std::numeric_limits<std::uint32_t>::max(),
{},
@@ -162,7 +154,7 @@ doGatewayBalances(Context const& context)
{
obj[ripple::to_string(k)] = v.getText();
}
response["obligations"] = std::move(obj);
response[JS(obligations)] = std::move(obj);
}
auto toJson =
@@ -177,9 +169,9 @@ doGatewayBalances(Context const& context)
for (auto const& balance : accBalances)
{
boost::json::object entry;
entry["currency"] =
entry[JS(currency)] =
ripple::to_string(balance.issue().currency);
entry["value"] = balance.getText();
entry[JS(value)] = balance.getText();
arr.push_back(std::move(entry));
}
obj[ripple::to_string(accId)] = std::move(arr);
@@ -189,14 +181,14 @@ doGatewayBalances(Context const& context)
};
if (auto balances = toJson(hotBalances); balances.size())
response["balances"] = balances;
response[JS(balances)] = balances;
if (auto balances = toJson(frozenBalances); balances.size())
response["frozen_balances"] = balances;
response[JS(frozen_balances)] = balances;
if (auto balances = toJson(assets); assets.size())
response["assets"] = toJson(assets);
response["account"] = request.at("account");
response["ledger_index"] = lgrInfo.seq;
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response[JS(assets)] = toJson(assets);
response[JS(account)] = request.at(JS(account));
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
return response;
}
} // namespace RPC

View File

@@ -10,30 +10,30 @@ doLedger(Context const& context)
boost::json::object response = {};
bool binary = false;
if (params.contains("binary"))
if (params.contains(JS(binary)))
{
if (!params.at("binary").is_bool())
if (!params.at(JS(binary)).is_bool())
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
binary = params.at("binary").as_bool();
binary = params.at(JS(binary)).as_bool();
}
bool transactions = false;
if (params.contains("transactions"))
if (params.contains(JS(transactions)))
{
if (!params.at("transactions").is_bool())
if (!params.at(JS(transactions)).is_bool())
return Status{Error::rpcINVALID_PARAMS, "transactionsFlagNotBool"};
transactions = params.at("transactions").as_bool();
transactions = params.at(JS(transactions)).as_bool();
}
bool expand = false;
if (params.contains("expand"))
if (params.contains(JS(expand)))
{
if (!params.at("expand").is_bool())
if (!params.at(JS(expand)).is_bool())
return Status{Error::rpcINVALID_PARAMS, "expandFlagNotBool"};
expand = params.at("expand").as_bool();
expand = params.at(JS(expand)).as_bool();
}
bool diff = false;
@@ -54,35 +54,34 @@ doLedger(Context const& context)
boost::json::object header;
if (binary)
{
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
}
else
{
header["accepted"] = true;
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
header["close_flags"] = lgrInfo.closeFlags;
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
;
header["close_time_resolution"] = lgrInfo.closeTimeResolution.count();
header["closed"] = true;
header["hash"] = ripple::strHex(lgrInfo.hash);
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
header["ledger_index"] = std::to_string(lgrInfo.seq);
header["parent_close_time"] =
header[JS(accepted)] = true;
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
header[JS(close_flags)] = lgrInfo.closeFlags;
header[JS(close_time)] = lgrInfo.closeTime.time_since_epoch().count();
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
header[JS(close_time_resolution)] = lgrInfo.closeTimeResolution.count();
header[JS(closed)] = true;
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
header[JS(parent_close_time)] =
lgrInfo.parentCloseTime.time_since_epoch().count();
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
header["seqNum"] = std::to_string(lgrInfo.seq);
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
header["total_coins"] = ripple::to_string(lgrInfo.drops);
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
}
header["closed"] = true;
header[JS(closed)] = true;
if (transactions)
{
header["transactions"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonTxs = header.at("transactions").as_array();
header[JS(transactions)] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonTxs = header.at(JS(transactions)).as_array();
if (expand)
{
auto txns = context.backend->fetchAllTransactionsInLedger(
@@ -98,14 +97,14 @@ doLedger(Context const& context)
{
auto [txn, meta] = toExpandedJson(obj);
entry = txn;
entry["metaData"] = meta;
entry[JS(metaData)] = meta;
}
else
{
entry["tx_blob"] = ripple::strHex(obj.transaction);
entry["meta"] = ripple::strHex(obj.metadata);
entry[JS(tx_blob)] = ripple::strHex(obj.transaction);
entry[JS(meta)] = ripple::strHex(obj.metadata);
}
// entry["ledger_index"] = obj.ledgerSequence;
// entry[JS(ledger_index)] = obj.ledgerSequence;
return entry;
});
}
@@ -133,7 +132,7 @@ doLedger(Context const& context)
for (auto const& obj : diff)
{
boost::json::object entry;
entry["id"] = ripple::strHex(obj.key);
entry[JS(id)] = ripple::strHex(obj.key);
if (binary)
entry["object"] = ripple::strHex(obj.blob);
else if (obj.blob.size())
@@ -149,9 +148,9 @@ doLedger(Context const& context)
}
}
response["ledger"] = header;
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(ledger)] = header;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
return response;
}

View File

@@ -28,23 +28,15 @@ doLedgerData(Context const& context)
auto request = context.params;
boost::json::object response = {};
bool binary = false;
if (request.contains("binary"))
{
if (!request.at("binary").is_bool())
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
bool const binary = getBool(request, "binary", false);
binary = request.at("binary").as_bool();
}
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
std::size_t limit = binary ? 2048 : 256;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
return Status{Error::rpcINVALID_PARAMS, "limitNotInteger"};
if (!binary)
limit = std::clamp(limit, {1}, {256});
limit = boost::json::value_to<int>(request.at("limit"));
}
bool outOfOrder = false;
if (request.contains("out_of_order"))
{
@@ -53,18 +45,18 @@ doLedgerData(Context const& context)
outOfOrder = request.at("out_of_order").as_bool();
}
std::optional<ripple::uint256> cursor;
std::optional<uint32_t> diffCursor;
if (request.contains("marker"))
std::optional<ripple::uint256> marker;
std::optional<uint32_t> diffMarker;
if (request.contains(JS(marker)))
{
if (!request.at("marker").is_string())
if (!request.at(JS(marker)).is_string())
{
if (outOfOrder)
{
if (!request.at("marker").is_int64())
if (!request.at(JS(marker)).is_int64())
return Status{
Error::rpcINVALID_PARAMS, "markerNotStringOrInt"};
diffCursor = value_to<uint32_t>(request.at("marker"));
diffMarker = value_to<uint32_t>(request.at(JS(marker)));
}
else
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
@@ -73,8 +65,8 @@ doLedgerData(Context const& context)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing marker";
cursor = ripple::uint256{};
if (!cursor->parseHex(request.at("marker").as_string().c_str()))
marker = ripple::uint256{};
if (!marker->parseHex(request.at(JS(marker)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "markerMalformed"};
}
}
@@ -84,49 +76,58 @@ doLedgerData(Context const& context)
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
boost::json::object header;
// no cursor means this is the first call, so we return header info
if (!cursor)
// no marker means this is the first call, so we return header info
if (!marker)
{
if (binary)
{
header["ledger_data"] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
header[JS(ledger_data)] = ripple::strHex(ledgerInfoToBlob(lgrInfo));
}
else
{
header["accepted"] = true;
header["account_hash"] = ripple::strHex(lgrInfo.accountHash);
header["close_flags"] = lgrInfo.closeFlags;
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
header["close_time_human"] = ripple::to_string(lgrInfo.closeTime);
;
header["close_time_resolution"] =
header[JS(accepted)] = true;
header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash);
header[JS(close_flags)] = lgrInfo.closeFlags;
header[JS(close_time)] =
lgrInfo.closeTime.time_since_epoch().count();
header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime);
header[JS(close_time_resolution)] =
lgrInfo.closeTimeResolution.count();
header["closed"] = true;
header["hash"] = ripple::strHex(lgrInfo.hash);
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
header["ledger_index"] = std::to_string(lgrInfo.seq);
header["parent_close_time"] =
header[JS(closed)] = true;
header[JS(hash)] = ripple::strHex(lgrInfo.hash);
header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
header[JS(ledger_index)] = std::to_string(lgrInfo.seq);
header[JS(parent_close_time)] =
lgrInfo.parentCloseTime.time_since_epoch().count();
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
header["seqNum"] = std::to_string(lgrInfo.seq);
header["totalCoins"] = ripple::to_string(lgrInfo.drops);
header["total_coins"] = ripple::to_string(lgrInfo.drops);
header["transaction_hash"] = ripple::strHex(lgrInfo.txHash);
header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash);
header[JS(seqNum)] = std::to_string(lgrInfo.seq);
header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops);
header[JS(total_coins)] = ripple::to_string(lgrInfo.drops);
header[JS(transaction_hash)] = ripple::strHex(lgrInfo.txHash);
response["ledger"] = header;
response[JS(ledger)] = header;
}
}
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
else
{
if (!outOfOrder &&
!context.backend->fetchLedgerObject(
*marker, lgrInfo.seq, context.yield))
return Status{Error::rpcINVALID_PARAMS, "markerDoesNotExist"};
}
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
auto start = std::chrono::system_clock::now();
std::vector<Backend::LedgerObject> results;
if (diffCursor)
if (diffMarker)
{
assert(outOfOrder);
auto diff =
context.backend->fetchLedgerDiff(*diffCursor, context.yield);
context.backend->fetchLedgerDiff(*diffMarker, context.yield);
std::vector<ripple::uint256> keys;
for (auto&& [key, object] : diff)
{
@@ -143,13 +144,13 @@ doLedgerData(Context const& context)
if (obj.size())
results.push_back({std::move(keys[i]), std::move(obj)});
}
if (*diffCursor > lgrInfo.seq)
response["marker"] = *diffCursor - 1;
if (*diffMarker > lgrInfo.seq)
response["marker"] = *diffMarker - 1;
}
else
{
auto page = context.backend->fetchLedgerPage(
cursor, lgrInfo.seq, limit, outOfOrder, context.yield);
marker, lgrInfo.seq, limit, outOfOrder, context.yield);
results = std::move(page.objects);
if (page.cursor)
response["marker"] = ripple::strHex(*(page.cursor));
@@ -175,14 +176,14 @@ doLedgerData(Context const& context)
if (binary)
{
boost::json::object entry;
entry["data"] = ripple::serializeHex(sle);
entry["index"] = ripple::to_string(sle.key());
entry[JS(data)] = ripple::serializeHex(sle);
entry[JS(index)] = ripple::to_string(sle.key());
objects.push_back(std::move(entry));
}
else
objects.push_back(toJson(sle));
}
response["state"] = std::move(objects);
response[JS(state)] = std::move(objects);
auto end2 = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::microseconds>(end2 - end)

View File

@@ -20,8 +20,7 @@ doLedgerEntry(Context const& context)
auto request = context.params;
boost::json::object response = {};
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
bool const binary = getBool(request, "binary", false);
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
@@ -30,59 +29,64 @@ doLedgerEntry(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::uint256 key;
if (request.contains("index"))
if (request.contains(JS(index)))
{
if (!request.at("index").is_string())
if (!request.at(JS(index)).is_string())
return Status{Error::rpcINVALID_PARAMS, "indexNotString"};
if (!key.parseHex(request.at("index").as_string().c_str()))
if (!key.parseHex(request.at(JS(index)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedIndex"};
}
else if (request.contains("account_root"))
else if (request.contains(JS(account_root)))
{
if (!request.at("account_root").is_string())
if (!request.at(JS(account_root)).is_string())
return Status{Error::rpcINVALID_PARAMS, "account_rootNotString"};
auto const account = ripple::parseBase58<ripple::AccountID>(
request.at("account_root").as_string().c_str());
request.at(JS(account_root)).as_string().c_str());
if (!account || account->isZero())
return Status{Error::rpcINVALID_PARAMS, "malformedAddress"};
else
key = ripple::keylet::account(*account).key;
}
else if (request.contains("check"))
else if (request.contains(JS(check)))
{
if (!request.at("check").is_string())
if (!request.at(JS(check)).is_string())
return Status{Error::rpcINVALID_PARAMS, "checkNotString"};
if (!key.parseHex(request.at("check").as_string().c_str()))
if (!key.parseHex(request.at(JS(check)).as_string().c_str()))
{
return Status{Error::rpcINVALID_PARAMS, "checkMalformed"};
}
}
else if (request.contains("deposit_preauth"))
else if (request.contains(JS(deposit_preauth)))
{
if (!request.at("deposit_preauth").is_object())
if (!request.at(JS(deposit_preauth)).is_object())
{
if (!request.at("deposit_preauth").is_string() ||
if (!request.at(JS(deposit_preauth)).is_string() ||
!key.parseHex(
request.at("deposit_preauth").as_string().c_str()))
request.at(JS(deposit_preauth)).as_string().c_str()))
{
return Status{
Error::rpcINVALID_PARAMS, "deposit_preauthMalformed"};
}
}
else if (
!request.at("deposit_preauth").as_object().contains("owner") ||
!request.at("deposit_preauth").as_object().at("owner").is_string())
!request.at(JS(deposit_preauth)).as_object().contains(JS(owner)) ||
!request.at(JS(deposit_preauth))
.as_object()
.at(JS(owner))
.is_string())
{
return Status{Error::rpcINVALID_PARAMS, "ownerNotString"};
}
else if (
!request.at("deposit_preauth").as_object().contains("authorized") ||
!request.at("deposit_preauth")
!request.at(JS(deposit_preauth))
.as_object()
.at("authorized")
.contains(JS(authorized)) ||
!request.at(JS(deposit_preauth))
.as_object()
.at(JS(authorized))
.is_string())
{
return Status{Error::rpcINVALID_PARAMS, "authorizedNotString"};
@@ -90,13 +94,13 @@ doLedgerEntry(Context const& context)
else
{
boost::json::object const& deposit_preauth =
request.at("deposit_preauth").as_object();
request.at(JS(deposit_preauth)).as_object();
auto const owner = ripple::parseBase58<ripple::AccountID>(
deposit_preauth.at("owner").as_string().c_str());
deposit_preauth.at(JS(owner)).as_string().c_str());
auto const authorized = ripple::parseBase58<ripple::AccountID>(
deposit_preauth.at("authorized").as_string().c_str());
deposit_preauth.at(JS(authorized)).as_string().c_str());
if (!owner)
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
@@ -106,37 +110,37 @@ doLedgerEntry(Context const& context)
key = ripple::keylet::depositPreauth(*owner, *authorized).key;
}
}
else if (request.contains("directory"))
else if (request.contains(JS(directory)))
{
if (!request.at("directory").is_object())
if (!request.at(JS(directory)).is_object())
{
if (!request.at("directory").is_string())
if (!request.at(JS(directory)).is_string())
return Status{Error::rpcINVALID_PARAMS, "directoryNotString"};
if (!key.parseHex(request.at("directory").as_string().c_str()))
if (!key.parseHex(request.at(JS(directory)).as_string().c_str()))
{
return Status{Error::rpcINVALID_PARAMS, "malformedDirectory"};
}
}
else if (
request.at("directory").as_object().contains("sub_index") &&
!request.at("directory").as_object().at("sub_index").is_int64())
request.at(JS(directory)).as_object().contains(JS(sub_index)) &&
!request.at(JS(directory)).as_object().at(JS(sub_index)).is_int64())
{
return Status{Error::rpcINVALID_PARAMS, "sub_indexNotInt"};
}
else
{
auto directory = request.at("directory").as_object();
std::uint64_t subIndex = directory.contains("sub_index")
auto directory = request.at(JS(directory)).as_object();
std::uint64_t subIndex = directory.contains(JS(sub_index))
? boost::json::value_to<std::uint64_t>(
directory.at("sub_index"))
directory.at(JS(sub_index)))
: 0;
if (directory.contains("dir_root"))
if (directory.contains(JS(dir_root)))
{
ripple::uint256 uDirRoot;
if (directory.contains("owner"))
if (directory.contains(JS(owner)))
{
// May not specify both dir_root and owner.
return Status{
@@ -144,7 +148,7 @@ doLedgerEntry(Context const& context)
"mayNotSpecifyBothDirRootAndOwner"};
}
else if (!uDirRoot.parseHex(
directory.at("dir_root").as_string().c_str()))
directory.at(JS(dir_root)).as_string().c_str()))
{
return Status{Error::rpcINVALID_PARAMS, "malformedDirRoot"};
}
@@ -153,10 +157,10 @@ doLedgerEntry(Context const& context)
key = ripple::keylet::page(uDirRoot, subIndex).key;
}
}
else if (directory.contains("owner"))
else if (directory.contains(JS(owner)))
{
auto const ownerID = ripple::parseBase58<ripple::AccountID>(
directory.at("owner").as_string().c_str());
directory.at(JS(owner)).as_string().c_str());
if (!ownerID)
{
@@ -176,31 +180,31 @@ doLedgerEntry(Context const& context)
}
}
}
else if (request.contains("escrow"))
else if (request.contains(JS(escrow)))
{
if (!request.at("escrow").is_object())
if (!request.at(JS(escrow)).is_object())
{
if (!key.parseHex(request.at("escrow").as_string().c_str()))
if (!key.parseHex(request.at(JS(escrow)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedEscrow"};
}
else if (
!request.at("escrow").as_object().contains("owner") ||
!request.at("escrow").as_object().at("owner").is_string())
!request.at(JS(escrow)).as_object().contains(JS(owner)) ||
!request.at(JS(escrow)).as_object().at(JS(owner)).is_string())
{
return Status{Error::rpcINVALID_PARAMS, "malformedOwner"};
}
else if (
!request.at("escrow").as_object().contains("seq") ||
!request.at("escrow").as_object().at("seq").is_int64())
!request.at(JS(escrow)).as_object().contains(JS(seq)) ||
!request.at(JS(escrow)).as_object().at(JS(seq)).is_int64())
{
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
}
else
{
auto const id =
ripple::parseBase58<ripple::AccountID>(request.at("escrow")
ripple::parseBase58<ripple::AccountID>(request.at(JS(escrow))
.as_object()
.at("owner")
.at(JS(owner))
.as_string()
.c_str());
@@ -209,120 +213,122 @@ doLedgerEntry(Context const& context)
else
{
std::uint32_t seq =
request.at("escrow").as_object().at("seq").as_int64();
request.at(JS(escrow)).as_object().at(JS(seq)).as_int64();
key = ripple::keylet::escrow(*id, seq).key;
}
}
}
else if (request.contains("offer"))
else if (request.contains(JS(offer)))
{
if (!request.at("offer").is_object())
if (!request.at(JS(offer)).is_object())
{
if (!key.parseHex(request.at("offer").as_string().c_str()))
if (!key.parseHex(request.at(JS(offer)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedOffer"};
}
else if (
!request.at("offer").as_object().contains("account") ||
!request.at("offer").as_object().at("account").is_string())
!request.at(JS(offer)).as_object().contains(JS(account)) ||
!request.at(JS(offer)).as_object().at(JS(account)).is_string())
{
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
}
else if (
!request.at("offer").as_object().contains("seq") ||
!request.at("offer").as_object().at("seq").is_int64())
!request.at(JS(offer)).as_object().contains(JS(seq)) ||
!request.at(JS(offer)).as_object().at(JS(seq)).is_int64())
{
return Status{Error::rpcINVALID_PARAMS, "malformedSeq"};
}
else
{
auto offer = request.at("offer").as_object();
auto offer = request.at(JS(offer)).as_object();
auto const id = ripple::parseBase58<ripple::AccountID>(
offer.at("account").as_string().c_str());
offer.at(JS(account)).as_string().c_str());
if (!id)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
else
{
std::uint32_t seq =
boost::json::value_to<std::uint32_t>(offer.at("seq"));
boost::json::value_to<std::uint32_t>(offer.at(JS(seq)));
key = ripple::keylet::offer(*id, seq).key;
}
}
}
else if (request.contains("payment_channel"))
else if (request.contains(JS(payment_channel)))
{
if (!request.at("payment_channel").is_string())
if (!request.at(JS(payment_channel)).is_string())
return Status{Error::rpcINVALID_PARAMS, "paymentChannelNotString"};
if (!key.parseHex(request.at("payment_channel").as_string().c_str()))
if (!key.parseHex(request.at(JS(payment_channel)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedPaymentChannel"};
}
else if (request.contains("ripple_state"))
else if (request.contains(JS(ripple_state)))
{
if (!request.at("ripple_state").is_object())
if (!request.at(JS(ripple_state)).is_object())
return Status{Error::rpcINVALID_PARAMS, "rippleStateNotObject"};
ripple::Currency currency;
boost::json::object const& state =
request.at("ripple_state").as_object();
request.at(JS(ripple_state)).as_object();
if (!state.contains("currency") || !state.at("currency").is_string())
if (!state.contains(JS(currency)) ||
!state.at(JS(currency)).is_string())
{
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
}
if (!state.contains("accounts") || !state.at("accounts").is_array() ||
2 != state.at("accounts").as_array().size() ||
!state.at("accounts").as_array().at(0).is_string() ||
!state.at("accounts").as_array().at(1).is_string() ||
(state.at("accounts").as_array().at(0).as_string() ==
state.at("accounts").as_array().at(1).as_string()))
if (!state.contains(JS(accounts)) ||
!state.at(JS(accounts)).is_array() ||
2 != state.at(JS(accounts)).as_array().size() ||
!state.at(JS(accounts)).as_array().at(0).is_string() ||
!state.at(JS(accounts)).as_array().at(1).is_string() ||
(state.at(JS(accounts)).as_array().at(0).as_string() ==
state.at(JS(accounts)).as_array().at(1).as_string()))
{
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
}
auto const id1 = ripple::parseBase58<ripple::AccountID>(
state.at("accounts").as_array().at(0).as_string().c_str());
state.at(JS(accounts)).as_array().at(0).as_string().c_str());
auto const id2 = ripple::parseBase58<ripple::AccountID>(
state.at("accounts").as_array().at(1).as_string().c_str());
state.at(JS(accounts)).as_array().at(1).as_string().c_str());
if (!id1 || !id2)
return Status{Error::rpcINVALID_PARAMS, "malformedAccounts"};
else if (!ripple::to_currency(
currency, state.at("currency").as_string().c_str()))
currency, state.at(JS(currency)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedCurrency"};
key = ripple::keylet::line(*id1, *id2, currency).key;
}
else if (request.contains("ticket"))
else if (request.contains(JS(ticket)))
{
if (!request.at("ticket").is_object())
if (!request.at(JS(ticket)).is_object())
{
if (!request.at("ticket").is_string())
if (!request.at(JS(ticket)).is_string())
return Status{Error::rpcINVALID_PARAMS, "ticketNotString"};
if (!key.parseHex(request.at("ticket").as_string().c_str()))
if (!key.parseHex(request.at(JS(ticket)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedTicket"};
}
else if (
!request.at("ticket").as_object().contains("account") ||
!request.at("ticket").as_object().at("account").is_string())
!request.at(JS(ticket)).as_object().contains(JS(account)) ||
!request.at(JS(ticket)).as_object().at(JS(account)).is_string())
{
return Status{Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
}
else if (
!request.at("ticket").as_object().contains("ticket_seq") ||
!request.at("ticket").as_object().at("ticket_seq").is_int64())
!request.at(JS(ticket)).as_object().contains(JS(ticket_seq)) ||
!request.at(JS(ticket)).as_object().at(JS(ticket_seq)).is_int64())
{
return Status{Error::rpcINVALID_PARAMS, "malformedTicketSeq"};
}
else
{
auto const id =
ripple::parseBase58<ripple::AccountID>(request.at("ticket")
ripple::parseBase58<ripple::AccountID>(request.at(JS(ticket))
.as_object()
.at("account")
.at(JS(account))
.as_string()
.c_str());
@@ -331,8 +337,10 @@ doLedgerEntry(Context const& context)
Error::rpcINVALID_PARAMS, "malformedTicketAccount"};
else
{
std::uint32_t seq =
request.at("offer").as_object().at("ticket_seq").as_int64();
std::uint32_t seq = request.at(JS(offer))
.as_object()
.at(JS(ticket_seq))
.as_int64();
key = ripple::getTicketIndex(*id, seq);
}
@@ -349,21 +357,21 @@ doLedgerEntry(Context const& context)
auto end = std::chrono::system_clock::now();
if (!dbResponse or dbResponse->size() == 0)
return Status{Error::rpcOBJECT_NOT_FOUND, "entryNotFound"};
return Status{"entryNotFound"};
response["index"] = ripple::strHex(key);
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response["ledger_index"] = lgrInfo.seq;
response[JS(index)] = ripple::strHex(key);
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
if (binary)
{
response["node_binary"] = ripple::strHex(*dbResponse);
response[JS(node_binary)] = ripple::strHex(*dbResponse);
}
else
{
ripple::STLedgerEntry sle{
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
response["node"] = toJson(sle);
response[JS(node)] = toJson(sle);
}
return response;

View File

@@ -16,11 +16,11 @@ doLedgerRange(Context const& context)
}
else
{
response["ledger_index_min"] = range->minSequence;
response["ledger_index_max"] = range->maxSequence;
response[JS(ledger_index_min)] = range->minSequence;
response[JS(ledger_index_max)] = range->maxSequence;
}
return response;
}
} // namespace RPC
} // namespace RPC

View File

@@ -0,0 +1,178 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <rpc/RPCHelpers.h>
namespace RPC {
static void
appendNftOfferJson(ripple::SLE const& offer, boost::json::array& offers)
{
offers.push_back(boost::json::object_kind);
boost::json::object& obj(offers.back().as_object());
obj.at(JS(index)) = ripple::to_string(offer.key());
obj.at(JS(flags)) = (offer)[ripple::sfFlags];
obj.at(JS(owner)) = ripple::toBase58(offer.getAccountID(ripple::sfOwner));
if (offer.isFieldPresent(ripple::sfDestination))
obj[JS(destination)] =
ripple::toBase58(offer.getAccountID(ripple::sfDestination));
if (offer.isFieldPresent(ripple::sfExpiration))
obj.at(JS(expiration)) = offer.getFieldU32(ripple::sfExpiration);
obj.at(JS(amount)) = toBoostJson(offer.getFieldAmount(ripple::sfAmount)
.getJson(ripple::JsonOptions::none));
}
static Result
enumerateNFTOffers(
Context const& context,
ripple::uint256 const& tokenid,
ripple::Keylet const& directory)
{
auto const& request = context.params;
auto v = ledgerInfoFromRequest(context);
if (auto status = std::get_if<Status>(&v))
return *status;
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
// TODO: just check for existence without pulling
if (!context.backend->fetchLedgerObject(
directory.key, lgrInfo.seq, context.yield))
return Status{Error::rpcOBJECT_NOT_FOUND, "notFound"};
std::uint32_t limit;
if (auto const status = getLimit(context, limit); status)
return status;
boost::json::object response = {};
response[JS(nft_id)] = ripple::to_string(tokenid);
response[JS(offers)] = boost::json::value(boost::json::array_kind);
auto& jsonOffers = response[JS(offers)].as_array();
std::vector<ripple::SLE> offers;
std::uint64_t reserve(limit);
ripple::uint256 cursor;
if (request.contains(JS(marker)))
{
// We have a start point. Use limit - 1 from the result and use the
// very last one for the resume.
auto const& marker(request.at(JS(marker)));
if (!marker.is_string())
return Status{Error::rpcINVALID_PARAMS, "markerNotString"};
if (!cursor.parseHex(marker.as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
auto const sle =
read(ripple::keylet::nftoffer(cursor), lgrInfo, context);
if (!sle || tokenid != sle->getFieldH256(ripple::sfNFTokenID))
return Status{Error::rpcOBJECT_NOT_FOUND, "notFound"};
if (tokenid != sle->getFieldH256(ripple::sfNFTokenID))
return Status{Error::rpcINVALID_PARAMS, "invalidTokenid"};
appendNftOfferJson(*sle, jsonOffers);
offers.reserve(reserve);
}
else
{
// We have no start point, limit should be one higher than requested.
offers.reserve(++reserve);
}
auto result = traverseOwnedNodes(
*context.backend,
directory,
cursor,
0,
lgrInfo.seq,
limit,
{},
context.yield,
[&offers](ripple::SLE const& offer) {
if (offer.getType() == ripple::ltNFTOKEN_OFFER)
{
offers.emplace_back(offer);
return true;
}
return false;
});
if (auto status = std::get_if<RPC::Status>(&result))
return *status;
if (offers.size() == reserve)
{
response.at(JS(limit)) = limit;
response.at(JS(marker)) = to_string(offers.back().key());
offers.pop_back();
}
for (auto const& offer : offers)
appendNftOfferJson(offer, jsonOffers);
return response;
}
std::variant<ripple::uint256, Status>
getTokenid(boost::json::object const& request)
{
if (!request.contains(JS(nft_id)))
return Status{Error::rpcINVALID_PARAMS, "missingTokenid"};
if (!request.at(JS(nft_id)).is_string())
return Status{Error::rpcINVALID_PARAMS, "tokenidNotString"};
ripple::uint256 tokenid;
if (!tokenid.parseHex(request.at(JS(nft_id)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedCursor"};
return tokenid;
}
Result
doNFTOffers(Context const& context, bool sells)
{
auto const v = getTokenid(context.params);
if (auto const status = std::get_if<Status>(&v))
return *status;
auto const getKeylet = [sells, &v]() {
if (sells)
return ripple::keylet::nft_sells(std::get<ripple::uint256>(v));
return ripple::keylet::nft_buys(std::get<ripple::uint256>(v));
};
return enumerateNFTOffers(
context, std::get<ripple::uint256>(v), getKeylet());
}
Result
doNFTSellOffers(Context const& context)
{
return doNFTOffers(context, true);
}
Result
doNFTBuyOffers(Context const& context)
{
return doNFTOffers(context, false);
}
} // namespace RPC

View File

@@ -10,9 +10,9 @@ getBaseTx(
ripple::Fees const& fees)
{
boost::json::object tx;
tx["Sequence"] = accountSeq;
tx["Account"] = ripple::toBase58(accountID);
tx["Fee"] = RPC::toBoostJson(fees.units.jsonClipped());
tx[JS(Sequence)] = accountSeq;
tx[JS(Account)] = ripple::toBase58(accountID);
tx[JS(Fee)] = RPC::toBoostJson(fees.units.jsonClipped());
return tx;
}
@@ -21,11 +21,9 @@ doNoRippleCheck(Context const& context)
{
auto const& request = context.params;
auto accountID =
accountFromStringStrict(getRequiredString(request, "account"));
if (!accountID)
return Status{Error::rpcINVALID_PARAMS, "malformedAccount"};
ripple::AccountID accountID;
if (auto const status = getAccount(request, accountID); status)
return status;
std::string role = getRequiredString(request, "role");
bool roleGateway = false;
@@ -36,7 +34,9 @@ doNoRippleCheck(Context const& context)
return Status{Error::rpcINVALID_PARAMS, "role field is invalid"};
}
size_t limit = getUInt(request, "limit", 300);
std::uint32_t limit = 300;
if (auto const status = getLimit(context, limit); status)
return status;
bool includeTxs = getBool(request, "transactions", false);
@@ -51,11 +51,11 @@ doNoRippleCheck(Context const& context)
boost::json::array transactions;
auto keylet = ripple::keylet::account(*accountID);
auto keylet = ripple::keylet::account(accountID);
auto accountObj = context.backend->fetchLedgerObject(
keylet.key, lgrInfo.seq, context.yield);
if (!accountObj)
throw AccountNotFoundError(ripple::toBase58(*accountID));
throw AccountNotFoundError(ripple::toBase58(accountID));
ripple::SerialIter it{accountObj->data(), accountObj->size()};
ripple::SLE sle{it, keylet.key};
@@ -79,16 +79,16 @@ doNoRippleCheck(Context const& context)
"You should immediately set your default ripple flag");
if (includeTxs)
{
auto tx = getBaseTx(*accountID, accountSeq++, *fees);
tx["TransactionType"] = "AccountSet";
tx["SetFlag"] = 8;
auto tx = getBaseTx(accountID, accountSeq++, *fees);
tx[JS(TransactionType)] = JS(AccountSet);
tx[JS(SetFlag)] = 8;
transactions.push_back(tx);
}
}
traverseOwnedNodes(
*context.backend,
*accountID,
accountID,
lgrInfo.seq,
std::numeric_limits<std::uint32_t>::max(),
{},
@@ -141,12 +141,12 @@ doNoRippleCheck(Context const& context)
ripple::STAmount limitAmount(ownedItem.getFieldAmount(
bLow ? ripple::sfLowLimit : ripple::sfHighLimit));
limitAmount.setIssuer(peer);
auto tx = getBaseTx(*accountID, accountSeq++, *fees);
tx["TransactionType"] = "TrustSet";
tx["LimitAmount"] = RPC::toBoostJson(
auto tx = getBaseTx(accountID, accountSeq++, *fees);
tx[JS(TransactionType)] = JS(TrustSet);
tx[JS(LimitAmount)] = RPC::toBoostJson(
limitAmount.getJson(ripple::JsonOptions::none));
tx["Flags"] = bNoRipple ? ripple::tfClearNoRipple
: ripple::tfSetNoRipple;
tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple
: ripple::tfSetNoRipple;
transactions.push_back(tx);
}
@@ -158,11 +158,11 @@ doNoRippleCheck(Context const& context)
});
boost::json::object response;
response["ledger_index"] = lgrInfo.seq;
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
response["problems"] = std::move(problems);
if (includeTxs)
response["transactions"] = std::move(transactions);
response[JS(transactions)] = std::move(transactions);
return response;
}

View File

@@ -1,6 +1,10 @@
// rngfill.h doesn't compile without this include
#include <cassert>
#include <ripple/beast/utility/rngfill.h>
#include <ripple/crypto/csprng.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
Result
@@ -10,7 +14,8 @@ doRandom(Context const& context)
beast::rngfill(rand.begin(), rand.size(), ripple::crypto_prng());
boost::json::object result;
result["random"] = ripple::strHex(rand);
result[JS(random)] = ripple::strHex(rand);
return result;
}
} // namespace RPC

View File

@@ -2,6 +2,7 @@
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <etl/ReportingETL.h>
#include <main/Build.h>
#include <rpc/RPCHelpers.h>
namespace RPC {
@@ -36,42 +37,46 @@ doServerInfo(Context const& context)
if (age < 0)
age = 0;
response["info"] = boost::json::object{};
boost::json::object& info = response["info"].as_object();
response[JS(info)] = boost::json::object{};
boost::json::object& info = response[JS(info)].as_object();
info["complete_ledgers"] = std::to_string(range->minSequence) + "-" +
info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" +
std::to_string(range->maxSequence);
info["counters"] = boost::json::object{};
info["counters"].as_object()["rpc"] = context.counters.report();
info[JS(counters)] = boost::json::object{};
info[JS(counters)].as_object()[JS(rpc)] = context.counters.report();
info[JS(counters)].as_object()["subscriptions"] =
context.subscriptions->report();
auto serverInfoRippled = context.balancer->forwardToRippled(
{{"command", "server_info"}}, context.clientIp, context.yield);
info["load_factor"] = 1;
if (serverInfoRippled && !serverInfoRippled->contains("error"))
info[JS(load_factor)] = 1;
info["clio_version"] = Build::getClioVersionString();
if (serverInfoRippled && !serverInfoRippled->contains(JS(error)))
{
try
{
auto& rippledResult = serverInfoRippled->at("result").as_object();
auto& rippledInfo = rippledResult.at("info").as_object();
info["load_factor"] = rippledInfo["load_factor"];
info["validation_quorum"] = rippledInfo["validation_quorum"];
auto& rippledResult = serverInfoRippled->at(JS(result)).as_object();
auto& rippledInfo = rippledResult.at(JS(info)).as_object();
info[JS(load_factor)] = rippledInfo[JS(load_factor)];
info[JS(validation_quorum)] = rippledInfo[JS(validation_quorum)];
info["rippled_version"] = rippledInfo[JS(build_version)];
}
catch (std::exception const&)
{
}
}
info["validated_ledger"] = boost::json::object{};
boost::json::object& validated = info["validated_ledger"].as_object();
info[JS(validated_ledger)] = boost::json::object{};
boost::json::object& validated = info[JS(validated_ledger)].as_object();
validated["age"] = age;
validated["hash"] = ripple::strHex(lgrInfo->hash);
validated["seq"] = lgrInfo->seq;
validated["base_fee_xrp"] = fees->base.decimalXRP();
validated["reserve_base_xrp"] = fees->reserve.decimalXRP();
validated["reserve_inc_xrp"] = fees->increment.decimalXRP();
validated[JS(age)] = age;
validated[JS(hash)] = ripple::strHex(lgrInfo->hash);
validated[JS(seq)] = lgrInfo->seq;
validated[JS(base_fee_xrp)] = fees->base.decimalXRP();
validated[JS(reserve_base_xrp)] = fees->reserve.decimalXRP();
validated[JS(reserve_inc_xrp)] = fees->increment.decimalXRP();
response["cache"] = boost::json::object{};
auto& cache = response["cache"].as_object();
@@ -83,9 +88,6 @@ doServerInfo(Context const& context)
response["etl"] = context.etl->getInfo();
response["note"] =
"This is a clio server. If you want to talk to rippled, include "
"\"ledger_index\":\"current\" in your request";
return response;
}
} // namespace RPC

View File

@@ -17,7 +17,7 @@ static std::unordered_set<std::string> validCommonStreams{
Status
validateStreams(boost::json::object const& request)
{
boost::json::array const& streams = request.at("streams").as_array();
boost::json::array const& streams = request.at(JS(streams)).as_array();
for (auto const& stream : streams)
{
@@ -40,7 +40,7 @@ subscribeToStreams(
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
{
boost::json::array const& streams = request.at("streams").as_array();
boost::json::array const& streams = request.at(JS(streams)).as_array();
boost::json::object response;
for (auto const& stream : streams)
@@ -69,7 +69,7 @@ unsubscribeToStreams(
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
{
boost::json::array const& streams = request.at("streams").as_array();
boost::json::array const& streams = request.at(JS(streams)).as_array();
for (auto const& stream : streams)
{
@@ -114,7 +114,7 @@ subscribeToAccounts(
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
{
boost::json::array const& accounts = request.at("accounts").as_array();
boost::json::array const& accounts = request.at(JS(accounts)).as_array();
for (auto const& account : accounts)
{
@@ -138,7 +138,7 @@ unsubscribeToAccounts(
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
{
boost::json::array const& accounts = request.at("accounts").as_array();
boost::json::array const& accounts = request.at(JS(accounts)).as_array();
for (auto const& account : accounts)
{
@@ -163,7 +163,7 @@ subscribeToAccountsProposed(
SubscriptionManager& manager)
{
boost::json::array const& accounts =
request.at("accounts_proposed").as_array();
request.at(JS(accounts_proposed)).as_array();
for (auto const& account : accounts)
{
@@ -188,7 +188,7 @@ unsubscribeToAccountsProposed(
SubscriptionManager& manager)
{
boost::json::array const& accounts =
request.at("accounts_proposed").as_array();
request.at(JS(accounts_proposed)).as_array();
for (auto const& account : accounts)
{
@@ -212,68 +212,57 @@ validateAndGetBooks(
boost::json::object const& request,
std::shared_ptr<Backend::BackendInterface const> const& backend)
{
if (!request.at("books").is_array())
if (!request.at(JS(books)).is_array())
return Status{Error::rpcINVALID_PARAMS, "booksNotArray"};
boost::json::array const& books = request.at("books").as_array();
boost::json::array const& books = request.at(JS(books)).as_array();
std::vector<ripple::Book> booksToSub;
std::optional<Backend::LedgerRange> rng;
boost::json::array snapshot;
for (auto const& book : books)
{
auto parsed = parseBook(book.as_object());
if (auto status = std::get_if<Status>(&parsed))
auto parsedBook = parseBook(book.as_object());
if (auto status = std::get_if<Status>(&parsedBook))
return *status;
else
auto b = std::get<ripple::Book>(parsedBook);
booksToSub.push_back(b);
bool both = book.as_object().contains(JS(both));
if (both)
booksToSub.push_back(ripple::reversed(b));
if (book.as_object().contains(JS(snapshot)))
{
auto b = std::get<ripple::Book>(parsed);
booksToSub.push_back(b);
bool both = book.as_object().contains("both");
if (!rng)
rng = backend->fetchLedgerRange();
ripple::AccountID takerID = beast::zero;
if (book.as_object().contains(JS(taker)))
if (auto const status = getTaker(book.as_object(), takerID);
status)
return status;
auto getOrderBook = [&snapshot, &backend, &rng, &takerID](
auto book,
boost::asio::yield_context& yield) {
auto bookBase = getBookBase(book);
auto [offers, retMarker] = backend->fetchBookOffers(
bookBase, rng->maxSequence, 200, {}, yield);
auto orderBook = postProcessOrderBook(
offers, book, takerID, *backend, rng->maxSequence, yield);
std::copy(
orderBook.begin(),
orderBook.end(),
std::back_inserter(snapshot));
};
getOrderBook(b, yield);
if (both)
booksToSub.push_back(ripple::reversed(b));
if (book.as_object().contains("snapshot"))
{
if (!rng)
rng = backend->fetchLedgerRange();
ripple::AccountID takerID = beast::zero;
if (book.as_object().contains("taker"))
{
auto parsed = parseTaker(request.at("taker"));
if (auto status = std::get_if<Status>(&parsed))
return *status;
else
{
takerID = std::get<ripple::AccountID>(parsed);
}
}
auto getOrderBook = [&snapshot, &backend, &rng, &takerID](
auto book,
boost::asio::yield_context& yield) {
auto bookBase = getBookBase(book);
auto [offers, retCursor] = backend->fetchBookOffers(
bookBase, rng->maxSequence, 200, {}, yield);
auto orderBook = postProcessOrderBook(
offers,
book,
takerID,
*backend,
rng->maxSequence,
yield);
std::copy(
orderBook.begin(),
orderBook.end(),
std::back_inserter(snapshot));
};
getOrderBook(b, yield);
if (both)
getOrderBook(ripple::reversed(b), yield);
}
getOrderBook(ripple::reversed(b), yield);
}
}
return std::make_pair(booksToSub, snapshot);
}
void
subscribeToBooks(
std::vector<ripple::Book> const& books,
@@ -285,14 +274,33 @@ subscribeToBooks(
manager.subBook(book, session);
}
}
void
unsubscribeToBooks(
std::vector<ripple::Book> const& books,
std::shared_ptr<WsBase> session,
SubscriptionManager& manager)
{
for (auto const& book : books)
{
manager.unsubBook(book, session);
}
}
Result
doSubscribe(Context const& context)
{
auto request = context.params;
if (request.contains("streams"))
if (!request.contains(JS(streams)) && !request.contains(JS(accounts)) &&
!request.contains(JS(accounts_proposed)) &&
!request.contains(JS(books)))
return Status{
Error::rpcINVALID_PARAMS, "does not contain valid subscription"};
if (request.contains(JS(streams)))
{
if (!request.at("streams").is_array())
if (!request.at(JS(streams)).is_array())
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
auto status = validateStreams(request);
@@ -301,33 +309,34 @@ doSubscribe(Context const& context)
return status;
}
if (request.contains("accounts"))
if (request.contains(JS(accounts)))
{
if (!request.at("accounts").is_array())
if (!request.at(JS(accounts)).is_array())
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
boost::json::array accounts = request.at("accounts").as_array();
boost::json::array accounts = request.at(JS(accounts)).as_array();
auto status = validateAccounts(accounts);
if (status)
return status;
}
if (request.contains("accounts_proposed"))
if (request.contains(JS(accounts_proposed)))
{
if (!request.at("accounts_proposed").is_array())
if (!request.at(JS(accounts_proposed)).is_array())
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
boost::json::array accounts =
request.at("accounts_proposed").as_array();
request.at(JS(accounts_proposed)).as_array();
auto status = validateAccounts(accounts);
if (status)
return status;
}
std::vector<ripple::Book> books;
boost::json::array snapshot;
if (request.contains("books"))
if (request.contains(JS(books)))
{
auto parsed =
validateAndGetBooks(context.yield, request, context.backend);
@@ -341,22 +350,22 @@ doSubscribe(Context const& context)
}
boost::json::object response;
if (request.contains("streams"))
if (request.contains(JS(streams)))
response = subscribeToStreams(
context.yield, request, context.session, *context.subscriptions);
if (request.contains("accounts"))
if (request.contains(JS(accounts)))
subscribeToAccounts(request, context.session, *context.subscriptions);
if (request.contains("accounts_proposed"))
if (request.contains(JS(accounts_proposed)))
subscribeToAccountsProposed(
request, context.session, *context.subscriptions);
if (request.contains("books"))
if (request.contains(JS(books)))
subscribeToBooks(books, context.session, *context.subscriptions);
if (snapshot.size())
response["offers"] = snapshot;
response[JS(offers)] = snapshot;
return response;
}
@@ -365,9 +374,15 @@ doUnsubscribe(Context const& context)
{
auto request = context.params;
if (request.contains("streams"))
if (!request.contains(JS(streams)) && !request.contains(JS(accounts)) &&
!request.contains(JS(accounts_proposed)) &&
!request.contains(JS(books)))
return Status{
Error::rpcINVALID_PARAMS, "does not contain valid subscription"};
if (request.contains(JS(streams)))
{
if (!request.at("streams").is_array())
if (!request.at(JS(streams)).is_array())
return Status{Error::rpcINVALID_PARAMS, "streamsNotArray"};
auto status = validateStreams(request);
@@ -376,41 +391,60 @@ doUnsubscribe(Context const& context)
return status;
}
if (request.contains("accounts"))
if (request.contains(JS(accounts)))
{
if (!request.at("accounts").is_array())
if (!request.at(JS(accounts)).is_array())
return Status{Error::rpcINVALID_PARAMS, "accountsNotArray"};
boost::json::array accounts = request.at("accounts").as_array();
boost::json::array accounts = request.at(JS(accounts)).as_array();
auto status = validateAccounts(accounts);
if (status)
return status;
}
if (request.contains("accounts_proposed"))
if (request.contains(JS(accounts_proposed)))
{
if (!request.at("accounts_proposed").is_array())
if (!request.at(JS(accounts_proposed)).is_array())
return Status{Error::rpcINVALID_PARAMS, "accountsProposedNotArray"};
boost::json::array accounts =
request.at("accounts_proposed").as_array();
request.at(JS(accounts_proposed)).as_array();
auto status = validateAccounts(accounts);
if (status)
return status;
}
if (request.contains("streams"))
std::vector<ripple::Book> books;
if (request.contains(JS(books)))
{
auto parsed =
validateAndGetBooks(context.yield, request, context.backend);
if (auto status = std::get_if<Status>(&parsed))
return *status;
auto [bks, snap] =
std::get<std::pair<std::vector<ripple::Book>, boost::json::array>>(
parsed);
books = std::move(bks);
}
if (request.contains(JS(streams)))
unsubscribeToStreams(request, context.session, *context.subscriptions);
if (request.contains("accounts"))
if (request.contains(JS(accounts)))
unsubscribeToAccounts(request, context.session, *context.subscriptions);
if (request.contains("accounts_proposed"))
if (request.contains(JS(accounts_proposed)))
unsubscribeToAccountsProposed(
request, context.session, *context.subscriptions);
if (request.contains("books"))
unsubscribeToBooks(books, context.session, *context.subscriptions);
boost::json::object response = {{"status", "success"}};
return response;
}

View File

@@ -13,7 +13,7 @@ doTransactionEntry(Context const& context)
auto lgrInfo = std::get<ripple::LedgerInfo>(v);
ripple::uint256 hash;
if (!hash.parseHex(getRequiredString(context.params, "tx_hash")))
if (!hash.parseHex(getRequiredString(context.params, JS(tx_hash))))
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
auto dbResponse = context.backend->fetchTransaction(hash, context.yield);
@@ -33,10 +33,10 @@ doTransactionEntry(Context const& context)
"Transaction not found."};
auto [txn, meta] = toExpandedJson(*dbResponse);
response["tx_json"] = std::move(txn);
response["metadata"] = std::move(meta);
response["ledger_index"] = lgrInfo.seq;
response["ledger_hash"] = ripple::strHex(lgrInfo.hash);
response[JS(tx_json)] = std::move(txn);
response[JS(metadata)] = std::move(meta);
response[JS(ledger_index)] = lgrInfo.seq;
response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash);
return response;
}

View File

@@ -14,23 +14,23 @@ doTx(Context const& context)
auto request = context.params;
boost::json::object response = {};
if (!request.contains("transaction"))
if (!request.contains(JS(transaction)))
return Status{Error::rpcINVALID_PARAMS, "specifyTransaction"};
if (!request.at("transaction").is_string())
if (!request.at(JS(transaction)).is_string())
return Status{Error::rpcINVALID_PARAMS, "transactionNotString"};
ripple::uint256 hash;
if (!hash.parseHex(request.at("transaction").as_string().c_str()))
if (!hash.parseHex(request.at(JS(transaction)).as_string().c_str()))
return Status{Error::rpcINVALID_PARAMS, "malformedTransaction"};
bool binary = false;
if (request.contains("binary"))
if (request.contains(JS(binary)))
{
if (!request.at("binary").is_bool())
if (!request.at(JS(binary)).is_bool())
return Status{Error::rpcINVALID_PARAMS, "binaryFlagNotBool"};
binary = request.at("binary").as_bool();
binary = request.at(JS(binary)).as_bool();
}
auto range = context.backend->fetchLedgerRange();
@@ -45,16 +45,16 @@ doTx(Context const& context)
{
auto [txn, meta] = toExpandedJson(*dbResponse);
response = txn;
response["meta"] = meta;
response[JS(meta)] = meta;
}
else
{
response["tx"] = ripple::strHex(dbResponse->transaction);
response["meta"] = ripple::strHex(dbResponse->metadata);
response["hash"] = std::move(request.at("transaction").as_string());
response[JS(tx)] = ripple::strHex(dbResponse->transaction);
response[JS(meta)] = ripple::strHex(dbResponse->metadata);
response[JS(hash)] = std::move(request.at(JS(transaction)).as_string());
}
response["date"] = dbResponse->date;
response["ledger_index"] = dbResponse->ledgerSequence;
response[JS(date)] = dbResponse->date;
response[JS(ledger_index)] = dbResponse->ledgerSequence;
return response;
}

View File

@@ -0,0 +1,40 @@
#ifndef CLIO_SUBSCRIPTION_MESSAGE_H
#define CLIO_SUBSCRIPTION_MESSAGE_H
#include <string>
// This class should only be constructed once, then it can
// be read from in parallel by many websocket senders
class Message
{
std::string message_;
public:
Message() = delete;
Message(std::string&& message) : message_(std::move(message))
{
}
Message(Message const&) = delete;
Message(Message&&) = delete;
Message&
operator=(Message const&) = delete;
Message&
operator=(Message&&) = delete;
~Message() = default;
char*
data()
{
return message_.data();
}
std::size_t
size()
{
return message_.size();
}
};
#endif // CLIO_SUBSCRIPTION_MESSAGE_H

View File

@@ -5,25 +5,24 @@
template <class T>
inline void
sendToSubscribers(
std::string const& message,
std::shared_ptr<Message> const& message,
T& subscribers,
boost::asio::io_context::strand& strand)
std::atomic_uint64_t& counter)
{
boost::asio::post(strand, [&subscribers, message]() {
for (auto it = subscribers.begin(); it != subscribers.end();)
for (auto it = subscribers.begin(); it != subscribers.end();)
{
auto& session = *it;
if (session->dead())
{
auto& session = *it;
if (session->dead())
{
it = subscribers.erase(it);
}
else
{
session->send(message);
++it;
}
it = subscribers.erase(it);
--counter;
}
});
else
{
session->send(message);
++it;
}
}
}
template <class T>
@@ -31,11 +30,13 @@ inline void
addSession(
std::shared_ptr<WsBase> session,
T& subscribers,
boost::asio::io_context::strand& strand)
std::atomic_uint64_t& counter)
{
boost::asio::post(strand, [&subscribers, s = std::move(session)]() {
subscribers.emplace(s);
});
if (!subscribers.contains(session))
{
subscribers.insert(session);
++counter;
}
}
template <class T>
@@ -43,29 +44,37 @@ inline void
removeSession(
std::shared_ptr<WsBase> session,
T& subscribers,
boost::asio::io_context::strand& strand)
std::atomic_uint64_t& counter)
{
boost::asio::post(strand, [&subscribers, s = std::move(session)]() {
subscribers.erase(s);
});
if (subscribers.contains(session))
{
subscribers.erase(session);
--counter;
}
}
void
Subscription::subscribe(std::shared_ptr<WsBase> const& session)
{
addSession(session, subscribers_, strand_);
boost::asio::post(strand_, [this, session]() {
addSession(session, subscribers_, subCount_);
});
}
void
Subscription::unsubscribe(std::shared_ptr<WsBase> const& session)
{
removeSession(session, subscribers_, strand_);
boost::asio::post(strand_, [this, session]() {
removeSession(session, subscribers_, subCount_);
});
}
void
Subscription::publish(std::string const& message)
Subscription::publish(std::shared_ptr<Message>& message)
{
sendToSubscribers(message, subscribers_, strand_);
boost::asio::post(strand_, [this, message]() {
sendToSubscribers(message, subscribers_, subCount_);
});
}
template <class Key>
@@ -74,7 +83,9 @@ SubscriptionMap<Key>::subscribe(
std::shared_ptr<WsBase> const& session,
Key const& account)
{
addSession(session, subscribers_[account], strand_);
boost::asio::post(strand_, [this, session, account]() {
addSession(session, subscribers_[account], subCount_);
});
}
template <class Key>
@@ -83,14 +94,36 @@ SubscriptionMap<Key>::unsubscribe(
std::shared_ptr<WsBase> const& session,
Key const& account)
{
removeSession(session, subscribers_[account], strand_);
boost::asio::post(strand_, [this, account, session]() {
if (!subscribers_.contains(account))
return;
if (!subscribers_[account].contains(session))
return;
--subCount_;
subscribers_[account].erase(session);
if (subscribers_[account].size() == 0)
{
subscribers_.erase(account);
}
});
}
template <class Key>
void
SubscriptionMap<Key>::publish(std::string const& message, Key const& account)
SubscriptionMap<Key>::publish(
std::shared_ptr<Message>& message,
Key const& account)
{
sendToSubscribers(message, subscribers_[account], strand_);
boost::asio::post(strand_, [this, account, message]() {
if (!subscribers_.contains(account))
return;
sendToSubscribers(message, subscribers_[account], subCount_);
});
}
boost::json::object
@@ -120,7 +153,7 @@ getLedgerPubMessage(
boost::json::object
SubscriptionManager::subLedger(
boost::asio::yield_context& yield,
std::shared_ptr<WsBase>& session)
std::shared_ptr<WsBase> session)
{
ledgerSubscribers_.subscribe(session);
@@ -144,19 +177,19 @@ SubscriptionManager::subLedger(
}
void
SubscriptionManager::unsubLedger(std::shared_ptr<WsBase>& session)
SubscriptionManager::unsubLedger(std::shared_ptr<WsBase> session)
{
ledgerSubscribers_.unsubscribe(session);
}
void
SubscriptionManager::subTransactions(std::shared_ptr<WsBase>& session)
SubscriptionManager::subTransactions(std::shared_ptr<WsBase> session)
{
txSubscribers_.subscribe(session);
}
void
SubscriptionManager::unsubTransactions(std::shared_ptr<WsBase>& session)
SubscriptionManager::unsubTransactions(std::shared_ptr<WsBase> session)
{
txSubscribers_.unsubscribe(session);
}
@@ -167,6 +200,11 @@ SubscriptionManager::subAccount(
std::shared_ptr<WsBase>& session)
{
accountSubscribers_.subscribe(session, account);
std::unique_lock lk(cleanupMtx_);
cleanupFuncs_[session].emplace_back([this, account](session_ptr session) {
unsubAccount(account, session);
});
}
void
@@ -180,15 +218,19 @@ SubscriptionManager::unsubAccount(
void
SubscriptionManager::subBook(
ripple::Book const& book,
std::shared_ptr<WsBase>& session)
std::shared_ptr<WsBase> session)
{
bookSubscribers_.subscribe(session, book);
std::unique_lock lk(cleanupMtx_);
cleanupFuncs_[session].emplace_back(
[this, book](session_ptr session) { unsubBook(book, session); });
}
void
SubscriptionManager::unsubBook(
ripple::Book const& book,
std::shared_ptr<WsBase>& session)
std::shared_ptr<WsBase> session)
{
bookSubscribers_.unsubscribe(session, book);
}
@@ -200,8 +242,10 @@ SubscriptionManager::pubLedger(
std::string const& ledgerRange,
std::uint32_t txnCount)
{
ledgerSubscribers_.publish(boost::json::serialize(
auto message = std::make_shared<Message>(boost::json::serialize(
getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
ledgerSubscribers_.publish(message);
}
void
@@ -250,10 +294,9 @@ SubscriptionManager::pubTransaction(
}
}
std::string pubMsg{boost::json::serialize(pubObj)};
auto pubMsg = std::make_shared<Message>(boost::json::serialize(pubObj));
txSubscribers_.publish(pubMsg);
auto journal = ripple::debugLog();
auto accounts = meta->getAffectedAccounts();
for (auto const& account : accounts)
@@ -305,7 +348,7 @@ void
SubscriptionManager::forwardProposedTransaction(
boost::json::object const& response)
{
std::string pubMsg{boost::json::serialize(response)};
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
txProposedSubscribers_.publish(pubMsg);
auto transaction = response.at("transaction").as_object();
@@ -318,45 +361,45 @@ SubscriptionManager::forwardProposedTransaction(
void
SubscriptionManager::forwardManifest(boost::json::object const& response)
{
std::string pubMsg{boost::json::serialize(response)};
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
manifestSubscribers_.publish(pubMsg);
}
void
SubscriptionManager::forwardValidation(boost::json::object const& response)
{
std::string pubMsg{boost::json::serialize(response)};
validationsSubscribers_.publish(std::move(pubMsg));
auto pubMsg = std::make_shared<Message>(boost::json::serialize(response));
validationsSubscribers_.publish(pubMsg);
}
void
SubscriptionManager::subProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
std::shared_ptr<WsBase> session)
{
accountProposedSubscribers_.subscribe(session, account);
}
void
SubscriptionManager::subManifest(std::shared_ptr<WsBase>& session)
SubscriptionManager::subManifest(std::shared_ptr<WsBase> session)
{
manifestSubscribers_.subscribe(session);
}
void
SubscriptionManager::unsubManifest(std::shared_ptr<WsBase>& session)
SubscriptionManager::unsubManifest(std::shared_ptr<WsBase> session)
{
manifestSubscribers_.unsubscribe(session);
}
void
SubscriptionManager::subValidation(std::shared_ptr<WsBase>& session)
SubscriptionManager::subValidation(std::shared_ptr<WsBase> session)
{
validationsSubscribers_.subscribe(session);
}
void
SubscriptionManager::unsubValidation(std::shared_ptr<WsBase>& session)
SubscriptionManager::unsubValidation(std::shared_ptr<WsBase> session)
{
validationsSubscribers_.unsubscribe(session);
}
@@ -364,19 +407,34 @@ SubscriptionManager::unsubValidation(std::shared_ptr<WsBase>& session)
void
SubscriptionManager::unsubProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
std::shared_ptr<WsBase> session)
{
accountProposedSubscribers_.unsubscribe(session, account);
}
void
SubscriptionManager::subProposedTransactions(std::shared_ptr<WsBase>& session)
SubscriptionManager::subProposedTransactions(std::shared_ptr<WsBase> session)
{
txProposedSubscribers_.subscribe(session);
}
void
SubscriptionManager::unsubProposedTransactions(std::shared_ptr<WsBase>& session)
SubscriptionManager::unsubProposedTransactions(std::shared_ptr<WsBase> session)
{
txProposedSubscribers_.unsubscribe(session);
}
void
SubscriptionManager::cleanup(std::shared_ptr<WsBase> session)
{
std::unique_lock lk(cleanupMtx_);
if (!cleanupFuncs_.contains(session))
return;
for (auto f : cleanupFuncs_[session])
{
f(session);
}
cleanupFuncs_.erase(session);
}

View File

@@ -3,6 +3,7 @@
#include <backend/BackendInterface.h>
#include <memory>
#include <subscriptions/Message.h>
class WsBase;
@@ -10,6 +11,7 @@ class Subscription
{
boost::asio::io_context::strand strand_;
std::unordered_set<std::shared_ptr<WsBase>> subscribers_ = {};
std::atomic_uint64_t subCount_ = 0;
public:
Subscription() = delete;
@@ -29,16 +31,24 @@ public:
unsubscribe(std::shared_ptr<WsBase> const& session);
void
publish(std::string const& message);
publish(std::shared_ptr<Message>& message);
std::uint64_t
count()
{
return subCount_.load();
}
};
template <class Key>
class SubscriptionMap
{
using subscribers = std::unordered_set<std::shared_ptr<WsBase>>;
using ptr = std::shared_ptr<WsBase>;
using subscribers = std::set<ptr>;
boost::asio::io_context::strand strand_;
std::unordered_map<Key, subscribers> subscribers_ = {};
std::atomic_uint64_t subCount_ = 0;
public:
SubscriptionMap() = delete;
@@ -58,11 +68,19 @@ public:
unsubscribe(std::shared_ptr<WsBase> const& session, Key const& key);
void
publish(std::string const& message, Key const& key);
publish(std::shared_ptr<Message>& message, Key const& key);
std::uint64_t
count()
{
return subCount_.load();
}
};
class SubscriptionManager
{
using session_ptr = std::shared_ptr<WsBase>;
std::vector<std::thread> workers_;
boost::asio::io_context ioc_;
std::optional<boost::asio::io_context::work> work_;
@@ -132,9 +150,7 @@ public:
}
boost::json::object
subLedger(
boost::asio::yield_context& yield,
std::shared_ptr<WsBase>& session);
subLedger(boost::asio::yield_context& yield, session_ptr session);
void
pubLedger(
@@ -144,13 +160,13 @@ public:
std::uint32_t txnCount);
void
unsubLedger(std::shared_ptr<WsBase>& session);
unsubLedger(session_ptr session);
void
subTransactions(std::shared_ptr<WsBase>& session);
subTransactions(session_ptr session);
void
unsubTransactions(std::shared_ptr<WsBase>& session);
unsubTransactions(session_ptr session);
void
pubTransaction(
@@ -158,32 +174,28 @@ public:
ripple::LedgerInfo const& lgrInfo);
void
subAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
subAccount(ripple::AccountID const& account, session_ptr& session);
void
unsubAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
unsubAccount(ripple::AccountID const& account, session_ptr& session);
void
subBook(ripple::Book const& book, std::shared_ptr<WsBase>& session);
subBook(ripple::Book const& book, session_ptr session);
void
unsubBook(ripple::Book const& book, std::shared_ptr<WsBase>& session);
unsubBook(ripple::Book const& book, session_ptr session);
void
subManifest(std::shared_ptr<WsBase>& session);
subManifest(session_ptr session);
void
unsubManifest(std::shared_ptr<WsBase>& session);
unsubManifest(session_ptr session);
void
subValidation(std::shared_ptr<WsBase>& session);
subValidation(session_ptr session);
void
unsubValidation(std::shared_ptr<WsBase>& session);
unsubValidation(session_ptr session);
void
forwardProposedTransaction(boost::json::object const& response);
@@ -195,26 +207,51 @@ public:
forwardValidation(boost::json::object const& response);
void
subProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
subProposedAccount(ripple::AccountID const& account, session_ptr session);
void
unsubProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
unsubProposedAccount(ripple::AccountID const& account, session_ptr session);
void
subProposedTransactions(std::shared_ptr<WsBase>& session);
subProposedTransactions(session_ptr session);
void
unsubProposedTransactions(std::shared_ptr<WsBase>& session);
unsubProposedTransactions(session_ptr session);
void
cleanup(session_ptr session);
boost::json::object
report()
{
boost::json::object counts = {};
counts["ledger"] = ledgerSubscribers_.count();
counts["transactions"] = txSubscribers_.count();
counts["transactions_proposed"] = txProposedSubscribers_.count();
counts["manifests"] = manifestSubscribers_.count();
counts["validations"] = validationsSubscribers_.count();
counts["account"] = accountSubscribers_.count();
counts["accounts_proposed"] = accountProposedSubscribers_.count();
counts["books"] = bookSubscribers_.count();
return counts;
}
private:
void
sendAll(
std::string const& pubMsg,
std::unordered_set<std::shared_ptr<WsBase>>& subs);
sendAll(std::string const& pubMsg, std::unordered_set<session_ptr>& subs);
/**
* This is how we chose to cleanup subscriptions that have been closed.
* Each time we add a subscriber, we add the opposite lambda that
* unsubscribes that subscriber when cleanup is called with the session that
* closed.
*/
using CleanupFunction = std::function<void(session_ptr)>;
std::mutex cleanupMtx_;
std::unordered_map<session_ptr, std::vector<CleanupFunction>>
cleanupFuncs_ = {};
};
#endif // SUBSCRIPTION_MANAGER_H

View File

@@ -92,6 +92,12 @@ public:
});
}
bool
isWhiteListed(std::string const& ip)
{
return whitelist_.contains(ip);
}
bool
isOk(std::string const& ip)
{

View File

@@ -20,6 +20,7 @@
#include <rpc/Counters.h>
#include <rpc/RPC.h>
#include <rpc/WorkQueue.h>
#include <vector>
#include <webserver/DOSGuard.h>
@@ -92,6 +93,7 @@ class HttpBase
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
WorkQueue& workQueue_;
send_lambda lambda_;
protected:
@@ -146,6 +148,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer buffer)
: ioc_(ioc)
, backend_(backend)
@@ -154,6 +157,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, workQueue_(queue)
, lambda_(*this)
, buffer_(std::move(buffer))
{
@@ -208,7 +212,8 @@ public:
balancer_,
etl_,
dosGuard_,
counters_);
counters_,
workQueue_);
}
auto ip = derived().ip();
@@ -220,21 +225,32 @@ public:
// Requests are handed using coroutines. Here we spawn a coroutine
// which will asynchronously handle a request.
boost::asio::spawn(
derived().stream().get_executor(),
[this, ip, session](boost::asio::yield_context yield) {
handle_request(
yield,
std::move(req_),
lambda_,
backend_,
balancer_,
etl_,
dosGuard_,
counters_,
*ip,
session);
});
if (!workQueue_.postCoro(
[this, ip, session](boost::asio::yield_context yield) {
handle_request(
yield,
std::move(req_),
lambda_,
backend_,
subscriptions_,
balancer_,
etl_,
dosGuard_,
counters_,
*ip,
session);
},
dosGuard_.isWhiteListed(*ip)))
{
http::response<http::string_body> res{
http::status::ok, req_.version()};
res.set(http::field::server, "clio-server-v0.0.0");
res.set(http::field::content_type, "application/json");
res.keep_alive(req_.keep_alive());
res.body() = "Server overloaded";
res.prepare_payload();
lambda_(std::move(res));
}
}
void
@@ -275,6 +291,7 @@ handle_request(
request<Body, boost::beast::http::basic_fields<Allocator>>&& req,
Send&& send,
std::shared_ptr<BackendInterface const> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
@@ -313,7 +330,7 @@ handle_request(
try
{
BOOST_LOG_TRIVIAL(info) << "Received request: " << req.body();
BOOST_LOG_TRIVIAL(debug) << "Received request: " << req.body();
boost::json::object request;
std::string responseStr = "";
@@ -349,7 +366,15 @@ handle_request(
RPC::make_error(RPC::Error::rpcNOT_READY))));
std::optional<RPC::Context> context = RPC::make_HttpContext(
yc, request, backend, nullptr, balancer, etl, *range, counters, ip);
yc,
request,
backend,
subscriptions,
balancer,
etl,
*range,
counters,
ip);
if (!context)
return send(httpResponse(
@@ -377,7 +402,6 @@ handle_request(
result = error;
responseStr = boost::json::serialize(response);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Encountered error: " << responseStr;
}
@@ -391,13 +415,25 @@ handle_request(
if (!result.contains("error"))
result["status"] = "success";
responseStr = boost::json::serialize(response);
}
boost::json::array warnings;
warnings.emplace_back(
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request");
auto lastPublishAge = context->etl->lastPublishAgeSeconds();
if (lastPublishAge >= 60)
warnings.emplace_back("This server may be out of date");
result["warnings"] = warnings;
responseStr = boost::json::serialize(response);
if (!dosGuard.add(ip, responseStr.size()))
result["warning"] = "Too many requests";
{
warnings.emplace_back("Too many requests");
response["warnings"] = warnings;
// reserialize when we need to include this warning
responseStr = boost::json::serialize(response);
}
return send(
httpResponse(http::status::ok, "application/json", responseStr));
}

View File

@@ -25,6 +25,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer buffer)
: HttpBase<HttpSession>(
ioc,
@@ -34,6 +35,7 @@ public:
etl,
dosGuard,
counters,
queue,
std::move(buffer))
, stream_(std::move(socket))
{

View File

@@ -30,6 +30,7 @@ class Detector
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
WorkQueue& queue_;
boost::beast::flat_buffer buffer_;
public:
@@ -42,7 +43,8 @@ public:
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters)
RPC::Counters& counters,
WorkQueue& queue)
: ioc_(ioc)
, stream_(std::move(socket))
, ctx_(ctx)
@@ -52,6 +54,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
{
}
@@ -101,6 +104,7 @@ public:
etl_,
dosGuard_,
counters_,
queue_,
std::move(buffer_))
->run();
return;
@@ -116,6 +120,7 @@ public:
etl_,
dosGuard_,
counters_,
queue_,
std::move(buffer_))
->run();
}
@@ -132,7 +137,8 @@ make_websocket_session(
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters)
RPC::Counters& counters,
WorkQueue& queue)
{
std::make_shared<WsUpgrader>(
ioc,
@@ -143,6 +149,7 @@ make_websocket_session(
etl,
dosGuard,
counters,
queue,
std::move(buffer),
std::move(req))
->run();
@@ -159,7 +166,8 @@ make_websocket_session(
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters)
RPC::Counters& counters,
WorkQueue& queue)
{
std::make_shared<SslWsUpgrader>(
ioc,
@@ -170,6 +178,7 @@ make_websocket_session(
etl,
dosGuard,
counters,
queue,
std::move(buffer),
std::move(req))
->run();
@@ -190,11 +199,14 @@ class Listener
std::shared_ptr<ETLLoadBalancer> balancer_;
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
WorkQueue queue_;
RPC::Counters counters_;
public:
Listener(
boost::asio::io_context& ioc,
uint32_t numWorkerThreads,
uint32_t maxQueueSize,
std::optional<std::reference_wrapper<ssl::context>> ctx,
tcp::endpoint endpoint,
std::shared_ptr<BackendInterface const> backend,
@@ -210,6 +222,7 @@ public:
, balancer_(balancer)
, etl_(etl)
, dosGuard_(dosGuard)
, queue_(numWorkerThreads, maxQueueSize)
{
boost::beast::error_code ec;
@@ -271,7 +284,8 @@ private:
balancer_,
etl_,
dosGuard_,
counters_)
counters_,
queue_)
->run();
}
@@ -306,8 +320,19 @@ make_HttpServer(
auto const port =
static_cast<unsigned short>(serverConfig.at("port").as_int64());
uint32_t numThreads = std::thread::hardware_concurrency();
if (serverConfig.contains("workers"))
numThreads = serverConfig.at("workers").as_int64();
uint32_t maxQueueSize = 0; // no max
if (serverConfig.contains("max_queue_size"))
maxQueueSize = serverConfig.at("max_queue_size").as_int64();
BOOST_LOG_TRIVIAL(info) << __func__ << " Number of workers = " << numThreads
<< ". Max queue size = " << maxQueueSize;
auto server = std::make_shared<HttpServer>(
ioc,
numThreads,
maxQueueSize,
sslCtx,
boost::asio::ip::tcp::endpoint{address, port},
backend,

View File

@@ -38,6 +38,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& buffer)
: WsSession(
ioc,
@@ -47,6 +48,7 @@ public:
etl,
dosGuard,
counters,
queue,
std::move(buffer))
, ws_(std::move(socket))
{
@@ -91,6 +93,7 @@ class WsUpgrader : public std::enable_shared_from_this<WsUpgrader>
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
WorkQueue& queue_;
http::request<http::string_body> req_;
public:
@@ -103,6 +106,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& b)
: ioc_(ioc)
, http_(std::move(socket))
@@ -113,6 +117,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
{
}
WsUpgrader(
@@ -124,6 +129,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: ioc_(ioc)
@@ -135,6 +141,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
, req_(std::move(req))
{
}
@@ -190,6 +197,7 @@ private:
etl_,
dosGuard_,
counters_,
queue_,
std::move(buffer_))
->run(std::move(req_));
}

View File

@@ -26,6 +26,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer buffer)
: HttpBase<SslHttpSession>(
ioc,
@@ -35,6 +36,7 @@ public:
etl,
dosGuard,
counters,
queue,
std::move(buffer))
, stream_(std::move(socket), ctx)
{

View File

@@ -36,6 +36,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& b)
: WsSession(
ioc,
@@ -45,6 +46,7 @@ public:
etl,
dosGuard,
counters,
queue,
std::move(b))
, ws_(std::move(stream))
{
@@ -88,6 +90,7 @@ class SslWsUpgrader : public std::enable_shared_from_this<SslWsUpgrader>
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
WorkQueue& queue_;
http::request<http::string_body> req_;
public:
@@ -101,6 +104,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& b)
: ioc_(ioc)
, https_(std::move(socket), ctx)
@@ -111,6 +115,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
{
}
SslWsUpgrader(
@@ -122,6 +127,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: ioc_(ioc)
@@ -133,6 +139,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
, req_(std::move(req))
{
}
@@ -203,6 +210,7 @@ private:
etl_,
dosGuard_,
counters_,
queue_,
std::move(buffer_))
->run(std::move(req_));
}

View File

@@ -9,8 +9,11 @@
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <etl/ReportingETL.h>
#include <rpc/Counters.h>
#include <rpc/RPC.h>
#include <rpc/WorkQueue.h>
#include <subscriptions/Message.h>
#include <subscriptions/SubscriptionManager.h>
#include <webserver/DOSGuard.h>
@@ -49,7 +52,7 @@ protected:
public:
// Send, that enables SubscriptionManager to publish to clients
virtual void
send(std::string const& msg) = 0;
send(std::shared_ptr<Message> msg) = 0;
virtual ~WsBase()
{
@@ -84,10 +87,11 @@ class WsSession : public WsBase,
std::shared_ptr<ReportingETL const> etl_;
DOSGuard& dosGuard_;
RPC::Counters& counters_;
WorkQueue& queue_;
std::mutex mtx_;
bool sending_ = false;
std::queue<std::string> messages_;
std::queue<std::shared_ptr<Message>> messages_;
void
wsFail(boost::beast::error_code ec, char const* what)
@@ -98,6 +102,9 @@ class WsSession : public WsBase,
BOOST_LOG_TRIVIAL(info)
<< "wsFail: " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
if (auto manager = subscriptions_.lock(); manager)
manager->cleanup(derived().shared_from_this());
}
}
@@ -110,6 +117,7 @@ public:
std::shared_ptr<ReportingETL const> etl,
DOSGuard& dosGuard,
RPC::Counters& counters,
WorkQueue& queue,
boost::beast::flat_buffer&& buffer)
: buffer_(std::move(buffer))
, ioc_(ioc)
@@ -119,6 +127,7 @@ public:
, etl_(etl)
, dosGuard_(dosGuard)
, counters_(counters)
, queue_(queue)
{
}
virtual ~WsSession()
@@ -138,7 +147,7 @@ public:
{
sending_ = true;
derived().ws().async_write(
net::buffer(messages_.front()),
net::buffer(messages_.front()->data(), messages_.front()->size()),
boost::beast::bind_front_handler(
&WsSession::on_write, derived().shared_from_this()));
}
@@ -168,18 +177,25 @@ public:
}
void
send(std::string const& msg) override
send(std::shared_ptr<Message> msg) override
{
net::dispatch(
derived().ws().get_executor(),
[this,
self = derived().shared_from_this(),
msg = std::string(msg)]() {
msg = std::move(msg)]() {
messages_.push(std::move(msg));
maybe_send_next();
});
}
void
send(std::string&& msg)
{
auto sharedMsg = std::make_shared<Message>(std::move(msg));
send(sharedMsg);
}
void
run(http::request<http::string_body> req)
{
@@ -236,20 +252,41 @@ public:
return;
boost::json::object response = {};
auto sendError = [this](auto error) {
send(boost::json::serialize(RPC::make_error(error)));
auto sendError = [this](auto error, boost::json::value id) {
auto e = RPC::make_error(error);
if (!id.is_null())
e["id"] = id;
send(boost::json::serialize(e));
};
boost::json::value raw = [](std::string const&& msg) {
try
{
return boost::json::parse(msg);
}
catch (std::exception&)
{
return boost::json::value{nullptr};
}
}(std::move(msg));
if (!raw.is_object())
return sendError(RPC::Error::rpcINVALID_PARAMS, nullptr);
boost::json::object request = raw.as_object();
auto id = request.contains("id") ? request.at("id") : nullptr;
try
{
boost::json::value raw = boost::json::parse(msg);
boost::json::object request = raw.as_object();
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
try
{
auto range = backend_->fetchLedgerRange();
if (!range)
return sendError(RPC::Error::rpcNOT_READY);
return sendError(RPC::Error::rpcNOT_READY, id);
std::optional<RPC::Context> context = RPC::make_WsContext(
yc,
@@ -264,9 +301,7 @@ public:
*ip);
if (!context)
return sendError(RPC::Error::rpcBAD_SYNTAX);
auto id = request.contains("id") ? request.at("id") : nullptr;
return sendError(RPC::Error::rpcBAD_SYNTAX, id);
response = getDefaultWsResponse(id);
@@ -299,7 +334,7 @@ public:
catch (Backend::DatabaseTimeout const& t)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
return sendError(RPC::Error::rpcNOT_READY);
return sendError(RPC::Error::rpcNOT_READY, id);
}
}
catch (std::exception const& e)
@@ -307,11 +342,28 @@ public:
BOOST_LOG_TRIVIAL(error)
<< __func__ << " caught exception : " << e.what();
return sendError(RPC::Error::rpcINTERNAL);
return sendError(RPC::Error::rpcINTERNAL, id);
}
boost::json::array warnings;
warnings.emplace_back(
"This is a clio server. clio only serves validated data. If you "
"want to talk to rippled, include 'ledger_index':'current' in your "
"request");
auto lastPublishAge = etl_->lastPublishAgeSeconds();
if (lastPublishAge >= 60)
warnings.emplace_back("This server may be out of date");
response["warnings"] = warnings;
std::string responseStr = boost::json::serialize(response);
dosGuard_.add(*ip, responseStr.size());
if (!dosGuard_.add(*ip, responseStr.size()))
{
warnings.emplace_back("Too many requests");
response["warnings"] = warnings;
// reserialize if we need to include this warning
responseStr = boost::json::serialize(response);
}
send(std::move(responseStr));
}
@@ -332,25 +384,29 @@ public:
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << *ip;
if (!dosGuard_.isOk(*ip))
{
auto sendError = [&](auto&& msg) {
boost::json::object response;
response["error"] = "Too many requests. Slow down";
response["error"] = std::move(msg);
std::string responseStr = boost::json::serialize(response);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr));
};
if (!dosGuard_.isOk(*ip))
{
sendError("Too many requests. Slow down");
}
else
{
boost::asio::spawn(
derived().ws().get_executor(),
[m = std::move(msg), shared_this = shared_from_this()](
boost::asio::yield_context yield) {
shared_this->handle_request(std::move(m), yield);
});
if (!queue_.postCoro(
[m = std::move(msg), shared_this = shared_from_this()](
boost::asio::yield_context yield) {
shared_this->handle_request(std::move(m), yield);
},
dosGuard_.isWhiteListed(*ip)))
sendError("Server overloaded");
}
do_read();