Compare commits

..

10 Commits
0.1.0 ... 0.1.1

Author SHA1 Message Date
Nathan Nichols
65cbc5c232 set version to 0.1.1 (#143) 2022-04-22 13:57:09 -04:00
Nathan Nichols
56c36a07a2 return null when successor not in object table (#142)
* return null when successor not in object table
2022-04-22 11:40:41 -05:00
Nathan Nichols
c57b2d89f0 assorted server improvements (#140)
* assorted server improvements

save error on failure

execute sends on websocket executor
2022-04-20 10:35:39 -05:00
CJ Cobb
09c245786d fix offer dir iteration bug (#141) 2022-04-20 09:58:01 -05:00
Michael Legleux
da5e21b648 Use actions from XRPLF/clio-build repo 2022-04-12 18:05:14 -07:00
Michael Legleux
b2c1731bcd Fix header and function call (#138)
* RippledState.h --> TrustLine.h

* fix getAffectedAccounts() calls
2022-04-11 13:43:21 -04:00
Nathan Nichols
d6ec0f2aae round age to zero when negative (#137) 2022-03-30 13:02:59 -05:00
Nathan Nichols
9b1de77110 use 64 bit counters (#136)
* use 64 bit counters
2022-03-30 13:01:56 -05:00
Nathan Nichols
e526083456 Hotfix for account_info, ledger_data and ip() (#134)
* remove option to create account from seed

* catch errors in derived().ip()

* checks keys.size() before accessing
2022-03-25 16:57:32 -04:00
rabbit
ac6c4c25d6 Update Build Instructions (#133)
* protobuf v2 is required.

* Fixed incorrect protobuf version & add step to build instructions.

* Update build instructions so Clio isn't cloned into the boost directory.

* Updated build instructions

* Consolidate build instructions and remove line numbers.

* Change case: clio -> Clio.

* Edit for consistency and brevity.
2022-03-25 11:27:11 -07:00
22 changed files with 315 additions and 276 deletions

43
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Build Clio
on:
push:
branches: [master, develop]
pull_request:
branches: [master, develop]
jobs:
lint:
name: Lint
runs-on: ubuntu-20.04
steps:
- name: Get source
uses: actions/checkout@v3
- name: Run clang-format
uses: XRPLF/clio-build/lint@main
build_clio:
name: Build
runs-on: ubuntu-20.04
needs: lint
steps:
- name: Get clio repo
uses: actions/checkout@v3
with:
path: clio_src
- name: Get gha repo
uses: actions/checkout@v3
with:
repository: XRPLF/clio-build
ref: main
path: gha # must be the same as defined in XRPLF/clio-build
- name: Build
uses: XRPLF/clio-build/build@main
# - name: Artifact clio_tests
# uses: actions/upload-artifact@v2
# with:
# name: clio_output
# path: clio_src/build/clio_tests

View File

@@ -1,60 +0,0 @@
name: clang-format
on: [push, pull_request]
jobs:
check:
runs-on: ubuntu-18.04
env:
CLANG_VERSION: 11
steps:
- uses: actions/checkout@v2
- name: Install clang-format
run: |
sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <<EOF
deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main
deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main
EOF
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add
sudo apt-get update
sudo apt-get install clang-format-${CLANG_VERSION}
- name: Format src
run: find src -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-${CLANG_VERSION} -i
- name: Format unittests
run: find unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-${CLANG_VERSION} -i
- name: Check for differences
id: assert
run: |
set -o pipefail
git diff --exit-code | tee "clang-format.patch"
- name: Upload patch
if: failure() && steps.assert.outcome == 'failure'
uses: actions/upload-artifact@v2
continue-on-error: true
with:
name: clang-format.patch
if-no-files-found: ignore
path: clang-format.patch
- name: What happened?
if: failure() && steps.assert.outcome == 'failure'
env:
PREAMBLE: |
If you are reading this, you are looking at a failed Github Actions
job. That means you pushed one or more files that did not conform
to the formatting specified in .clang-format. That may be because
you neglected to run 'git clang-format' or 'clang-format' before
committing, or that your version of clang-format has an
incompatibility with the one on this
machine, which is:
SUGGESTION: |
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run 'git-clang-format --extensions c,cpp,h,cxx,ipp develop'
in your repo, commit, and push.
run: |
echo "${PREAMBLE}"
clang-format-${CLANG_VERSION} --version
echo "${SUGGESTION}"
exit 1

View File

@@ -1,15 +0,0 @@
name: Trigger clio-ci
on: [pull_request, push]
jobs:
dispatch_build_event:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Send build_packages repository_dispatch event to clio-ci
env:
GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }}
run: |
curl -H "Authorization: token $GITHUB_TOKEN" \
-H 'Accept: application/vnd.github.v3+json' \
"https://api.github.com/repos/legleux/clio-ci/dispatches" \
-d '{"event_type": "build", "client_payload": {"SHA": "${{ github.sha }}"}}'

View File

@@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.16.3)
project(clio VERSION 0.1.0)
project(clio VERSION 0.1.1)
option(BUILD_TESTS "Build tests" TRUE)

133
README.md
View File

@@ -1,21 +1,21 @@
**Status:** This software is in beta mode. We encourage anyone to try it out and
report any issues they discover. Version 1.0 coming soon.
# clio
clio is an XRP Ledger API server. clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
# Clio
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
historical ledger and transaction data is stored in a more space efficient format,
using up to 4 times less space than rippled. clio can be configured to store data in Apache Cassandra or ScyllaDB,
allowing for scalable read throughput. Multiple clio nodes can share
access to the same dataset, allowing for a highly available cluster of clio nodes,
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
allowing for scalable read throughput. Multiple Clio nodes can share
access to the same dataset, allowing for a highly available cluster of Clio nodes,
without the need for redundant data storage or computation.
clio offers the full rippled API, with the caveat that clio by default only returns validated data.
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
Other non-validated data is also not returned, such as information about queued transactions.
For requests that require access to the p2p network, such as `fee` or `submit`, clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and clio will forward the request to rippled.
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
clio does not connect to the peer to peer network. Instead, clio extracts data from a specified rippled node. Running clio requires access to a rippled node
from which data can be extracted. The rippled node does not need to be running on the same machine as clio.
Clio does not connect to the peer to peer network. Instead, Clio extracts data from a specified rippled node. Running Clio requires access to a rippled node
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
## Requirements
@@ -24,120 +24,119 @@ from which data can be extracted. The rippled node does not need to be running o
2. Access to one or more rippled nodes. Can be local or remote.
## Building
clio is built with cmake. clio requires c++20, and boost 1.75.0 or later. protobuf v2 is required and must be manually built on systems that ship with v3, including Debian 11 and Ubuntu 21.10.
Use these instructions to build a clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
Clio is built with cmake. Clio requires c++20, and boost 1.75.0 or later.
Use these instructions to build a Clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
```
1. sudo apt-get update
2. sudo apt-get -y upgrade
3. sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
4. Boost:
wget https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
tar xvzf boost_1_75_0.tar.gz
cd boost_1_75_0
# Install dependencies
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
# Compile Boost
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
tar xvzf $HOME/boost_1_75_0.tar.gz
cd $HOME/boost_1_75_0
./bootstrap.sh
./b2 -j$((`nproc`+1))
# Add the following 'export' command
# to your profile file (~/.profile):
# -------------------------------
export BOOST_ROOT=/home/my_user/boost_1_75_0
source ~/.profile
5. git clone https://github.com/XRPLF/clio.git
6. mkdir build
7. cd build
8. cmake ..
9. cmake --build . -- -j$((`nproc`+1))
./b2 -j$(nproc)
echo "export BOOST_ROOT=$HOME/boost_1_75_0" >> $HOME/.profile && source $HOME/.profile
# Clone the Clio Git repository & build Clio
cd $HOME
git clone https://github.com/XRPLF/clio.git
cd $HOME/clio
cmake -B build && cmake --build build --parallel $(nproc)
```
## Running
`./clio_server config.json`
clio needs access to a rippled server. The config files of rippled and clio need
Clio needs access to a rippled server. The config files of rippled and Clio need
to match in a certain sense.
clio needs to know:
Clio needs to know:
- the ip of rippled
- the port on which rippled is accepting unencrypted websocket connections
- the port on which rippled is handling gRPC requests
rippled needs to open:
- a port to accept unencrypted websocket connections
- a port to handle gRPC requests, with the ip(s) of clio specified in the `secure_gateway` entry
- a port to handle gRPC requests, with the ip(s) of Clio specified in the `secure_gateway` entry
The example configs of rippled and clio are setup such that minimal changes are
The example configs of rippled and Clio are setup such that minimal changes are
required. When running locally, the only change needed is to uncomment the `port_grpc`
section of the rippled config. When running clio and rippled on separate machines,
section of the rippled config. When running Clio and rippled on separate machines,
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
1. change the `ip` of the first entry of `etl_sources` to the ip where your rippled
server is running
2. open a public, unencrypted websocket port on your rippled server
3. change the ip specified in `secure_gateway` of `port_grpc` section of the rippled config
to the ip of your clio server. This entry can take the form of a comma separated list if
you are running multiple clio nodes.
to the ip of your Clio server. This entry can take the form of a comma separated list if
you are running multiple Clio nodes.
Once your config files are ready, start rippled and clio. It doesn't matter which you
Once your config files are ready, start rippled and Clio. It doesn't matter which you
start first, and it's fine to stop one or the other and restart at any given time.
clio will wait for rippled to sync before extracting any ledgers. If there is already
data in clio's database, clio will begin extraction with the ledger whose sequence
is one greater than the greatest sequence currently in the database. clio will wait
Clio will wait for rippled to sync before extracting any ledgers. If there is already
data in Clio's database, Clio will begin extraction with the ledger whose sequence
is one greater than the greatest sequence currently in the database. Clio will wait
for this ledger to be available. Be aware that the behavior of rippled is to sync to
the most recent ledger on the network, and then backfill. If clio is extracting ledgers
the most recent ledger on the network, and then backfill. If Clio is extracting ledgers
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
will take time to backfill to the next ledger that clio wants. The time it takes is proportional
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
is dependent on the online_delete and ledger_history config values; if these values
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
to the ledger that clio wants. To avoid this situation, it is advised to keep history
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
proportional to the amount of time that you expect rippled to be offline. For example, if you
expect rippled to be offline for a few days from time to time, you should keep at least
a few days of history. If you expect rippled to never be offline, then you can keep a very small
amount of history.
clio can use multiple rippled servers as a data source. Simply add more entries to
the `etl_sources` section. clio will load balance requests across the servers specified
in this list. As long as one rippled server is up and synced, clio will continue
Clio can use multiple rippled servers as a data source. Simply add more entries to
the `etl_sources` section. Clio will load balance requests across the servers specified
in this list. As long as one rippled server is up and synced, Clio will continue
extracting ledgers.
In contrast to rippled, clio will answer RPC requests for the data already in the
database as soon as the server starts. clio doesn't wait to sync to the network, or
In contrast to rippled, Clio will answer RPC requests for the data already in the
database as soon as the server starts. Clio doesn't wait to sync to the network, or
for rippled to sync.
When starting clio with a fresh database, clio needs to download a ledger in full.
When starting Clio with a fresh database, Clio needs to download a ledger in full.
This can take some time, and depends on database throughput. With a moderately fast
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
is fully downloaded, clio only needs to extract the changed data for each ledger,
so extraction is much faster and clio can keep up with rippled in real time. Even under
intense load, clio should not lag behind the network, as clio is not processing the data,
and is simply writing to a database. The throughput of clio is dependent on the throughput
is fully downloaded, Clio only needs to extract the changed data for each ledger,
so extraction is much faster and Clio can keep up with rippled in real time. Even under
intense load, Clio should not lag behind the network, as Clio is not processing the data,
and is simply writing to a database. The throughput of Clio is dependent on the throughput
of your database, but a standard Cassandra or Scylla deployment can handle
the write load of the XRP Ledger without any trouble. Generally the performance considerations
come on the read side, and depends on the number of RPC requests your clio nodes
come on the read side, and depends on the number of RPC requests your Clio nodes
are serving. Be aware that very heavy read traffic can impact write throughput. Again, this
is on the database side, so if you are seeing this, upgrade your database.
It is possible to run multiple clio nodes that share access to the same database.
The clio nodes don't need to know about each other. You can simply spin up more clio
It is possible to run multiple Clio nodes that share access to the same database.
The Clio nodes don't need to know about each other. You can simply spin up more Clio
nodes pointing to the same database as you wish, and shut them down as you wish.
On startup, each clio node queries the database for the latest ledger. If this latest
ledger does not change for some time, the clio node begins extracting ledgers
and writing to the database. If the clio node detects a ledger that it is trying to
write has already been written, the clio node will backoff and stop writing. If later
the clio node sees no ledger written for some time, it will start writing again.
This algorithm ensures that at any given time, one and only one clio node is writing
On startup, each Clio node queries the database for the latest ledger. If this latest
ledger does not change for some time, the Clio node begins extracting ledgers
and writing to the database. If the Clio node detects a ledger that it is trying to
write has already been written, the Clio node will backoff and stop writing. If later
the Clio node sees no ledger written for some time, it will start writing again.
This algorithm ensures that at any given time, one and only one Clio node is writing
to the database.
It is possible to force clio to only read data, and to never become a writer.
It is possible to force Clio to only read data, and to never become a writer.
To do this, set `read_only: true` in the config. One common setup is to have a
small number of writer nodes that are inaccessible to clients, with several
read only nodes handling client requests. The number of read only nodes can be scaled
up or down in response to request volume.
When using multiple rippled servers as data sources and multiple clio nodes,
each clio node should use the same set of rippled servers as sources. The order doesn't matter.
When using multiple rippled servers as data sources and multiple Clio nodes,
each Clio node should use the same set of rippled servers as sources. The order doesn't matter.
The only reason not to do this is if you are running servers in different regions, and
you want the clio nodes to extract from servers in their region. However, if you
you want the Clio nodes to extract from servers in their region. However, if you
are doing this, be aware that database traffic will be flowing across regions,
which can cause high latencies. A possible alternative to this is to just deploy
a database in each region, and the clio nodes in each region use their region's database.
a database in each region, and the Clio nodes in each region use their region's database.
This is effectively two systems.

View File

@@ -140,7 +140,9 @@ BackendInterface::fetchSuccessorObject(
if (succ)
{
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
assert(obj);
if (!obj)
return {{*succ, {}}};
return {{*succ, *obj}};
}
return {};
@@ -176,16 +178,16 @@ BackendInterface::fetchBookOffers(
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key > bookEnd)
if (!offerDir || offerDir->key >= bookEnd)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - offerDir.has_value() "
<< offerDir.has_value() << " breaking";
break;
}
uTipIndex = offerDir->key;
while (keys.size() < limit)
{
++numPages;
uTipIndex = offerDir->key;
ripple::STLedgerEntry sle{
ripple::SerialIter{
offerDir->blob.data(), offerDir->blob.size()},
@@ -283,7 +285,7 @@ BackendInterface::fetchLedgerPage(
assert(false);
}
}
if (!reachedEnd)
if (keys.size() && !reachedEnd)
page.cursor = keys.back();
return page;

View File

@@ -22,7 +22,7 @@ struct AccountTransactionsData
ripple::TxMeta& meta,
ripple::uint256 const& txHash,
beast::Journal& j)
: accounts(meta.getAffectedAccounts(j))
: accounts(meta.getAffectedAccounts())
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)

View File

@@ -16,11 +16,11 @@ private:
{
MethodInfo() = default;
std::atomic_uint started{0};
std::atomic_uint finished{0};
std::atomic_uint errored{0};
std::atomic_uint forwarded{0};
std::atomic_uint duration{0};
std::atomic_uint64_t started{0};
std::atomic_uint64_t finished{0};
std::atomic_uint64_t errored{0};
std::atomic_uint64_t forwarded{0};
std::atomic_uint64_t duration{0};
};
void

View File

@@ -48,16 +48,8 @@ doAccountInfo(Context const& context)
// Get info on account.
auto accountID = accountFromStringStrict(strIdent);
if (!accountID)
{
if (!request.contains("strict") || !request.at("strict").as_bool())
{
accountID = accountFromSeed(strIdent);
if (!accountID)
return Status{Error::rpcBAD_SEED};
}
else
return Status{Error::rpcACT_MALFORMED};
}
return Status{Error::rpcACT_MALFORMED};
assert(accountID.has_value());
auto key = ripple::keylet::account(accountID.value());

View File

@@ -1,5 +1,5 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>

View File

@@ -1,5 +1,5 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>

View File

@@ -1,5 +1,5 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>

View File

@@ -54,6 +54,10 @@ doServerInfo(Context const& context)
lgrInfo->closeTime.time_since_epoch().count() - 946684800;
auto& validatedLgr =
(response["validated_ledger"] = boost::json::object{}).as_object();
if (age < 0)
age = 0;
validatedLgr["age"] = age;
validatedLgr["hash"] = ripple::strHex(lgrInfo->hash);
validatedLgr["seq"] = lgrInfo->seq;

View File

@@ -254,7 +254,7 @@ SubscriptionManager::pubTransaction(
txSubscribers_.publish(pubMsg);
auto journal = ripple::debugLog();
auto accounts = meta->getAffectedAccounts(journal);
auto accounts = meta->getAffectedAccounts();
for (auto const& account : accounts)
accountSubscribers_.publish(pubMsg, account);

View File

@@ -34,32 +34,6 @@ static std::string defaultResponse =
" Test</h1><p>This page shows xrpl reporting http(s) "
"connectivity is working.</p></body></html>";
inline void
httpFail(boost::beast::error_code ec, char const* what)
{
// ssl::error::stream_truncated, also known as an SSL "short read",
// indicates the peer closed the connection without performing the
// required closing handshake (for example, Google does this to
// improve performance). Generally this can be a security issue,
// but if your communication protocol is self-terminated (as
// it is with both HTTP and WebSocket) then you may simply
// ignore the lack of close_notify.
//
// https://github.com/boostorg/beast/issues/38
//
// https://security.stackexchange.com/questions/91435/how-to-handle-a-malicious-ssl-tls-shutdown
//
// When a short read would cut off the end of an HTTP message,
// Beast returns the error boost::beast::http::error::partial_message.
// Therefore, if we see a short read here, it has occurred
// after the message has been completed, so it is safe to ignore it.
if (ec == net::ssl::error::stream_truncated)
return;
std::cerr << what << ": " << ec.message() << "\n";
}
// From Boost Beast examples http_server_flex.cpp
template <class Derived>
class HttpBase
@@ -84,6 +58,9 @@ class HttpBase
void
operator()(http::message<isRequest, Body, Fields>&& msg) const
{
if (self_.dead())
return;
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
@@ -105,6 +82,7 @@ class HttpBase
}
};
boost::system::error_code ec_;
boost::asio::io_context& ioc_;
http::request<http::string_body> req_;
std::shared_ptr<void> res_;
@@ -119,6 +97,46 @@ class HttpBase
protected:
boost::beast::flat_buffer buffer_;
bool
dead()
{
return ec_ != boost::system::error_code{};
}
inline void
httpFail(boost::beast::error_code ec, char const* what)
{
// ssl::error::stream_truncated, also known as an SSL "short read",
// indicates the peer closed the connection without performing the
// required closing handshake (for example, Google does this to
// improve performance). Generally this can be a security issue,
// but if your communication protocol is self-terminated (as
// it is with both HTTP and WebSocket) then you may simply
// ignore the lack of close_notify.
//
// https://github.com/boostorg/beast/issues/38
//
// https://security.stackexchange.com/questions/91435/how-to-handle-a-malicious-ssl-tls-shutdown
//
// When a short read would cut off the end of an HTTP message,
// Beast returns the error boost::beast::http::error::partial_message.
// Therefore, if we see a short read here, it has occurred
// after the message has been completed, so it is safe to ignore it.
if (ec == net::ssl::error::stream_truncated)
return;
if (!ec_ && ec != boost::asio::error::operation_aborted)
{
ec_ = ec;
BOOST_LOG_TRIVIAL(info)
<< "httpFail: " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().stream())
.socket()
.close(ec);
}
}
public:
HttpBase(
boost::asio::io_context& ioc,
@@ -144,6 +162,8 @@ public:
void
do_read()
{
if (dead())
return;
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
@@ -192,6 +212,10 @@ public:
}
auto ip = derived().ip();
if (!ip)
return;
auto session = derived().shared_from_this();
// Requests are handed using coroutines. Here we spawn a coroutine
@@ -208,7 +232,7 @@ public:
etl_,
dosGuard_,
counters_,
ip,
*ip,
session);
});
}

View File

@@ -50,10 +50,17 @@ public:
return std::move(stream_);
}
std::string
std::optional<std::string>
ip()
{
return stream_.socket().remote_endpoint().address().to_string();
try
{
return stream_.socket().remote_endpoint().address().to_string();
}
catch (std::exception const&)
{
return {};
}
}
// Start the asynchronous operation

View File

@@ -55,6 +55,16 @@ public:
{
}
inline void
fail(boost::system::error_code ec, char const* message)
{
if (ec == net::ssl::error::stream_truncated)
return;
BOOST_LOG_TRIVIAL(info)
<< "Detector failed: " << message << ec.message() << std::endl;
}
// Launch the detector
void
run()
@@ -74,12 +84,12 @@ public:
on_detect(boost::beast::error_code ec, bool result)
{
if (ec)
return httpFail(ec, "detect");
return fail(ec, "detect");
if (result)
{
if (!ctx_)
return httpFail(ec, "ssl not supported by this server");
return fail(ec, "ssl not supported by this server");
// Launch SSL session
std::make_shared<SslSession>(
ioc_,
@@ -206,34 +216,22 @@ public:
// Open the acceptor
acceptor_.open(endpoint.protocol(), ec);
if (ec)
{
httpFail(ec, "open");
return;
}
// Allow address reuse
acceptor_.set_option(net::socket_base::reuse_address(true), ec);
if (ec)
{
httpFail(ec, "set_option");
return;
}
// Bind to the server address
acceptor_.bind(endpoint, ec);
if (ec)
{
httpFail(ec, "bind");
return;
}
// Start listening for connections
acceptor_.listen(net::socket_base::max_listen_connections, ec);
if (ec)
{
httpFail(ec, "listen");
return;
}
}
// Start accepting incoming connections
@@ -257,11 +255,7 @@ private:
void
on_accept(boost::beast::error_code ec, tcp::socket socket)
{
if (ec)
{
httpFail(ec, "listener_accept");
}
else
if (!ec)
{
auto ctxRef = ctx_
? std::optional<

View File

@@ -58,15 +58,22 @@ public:
return ws_;
}
std::string
std::optional<std::string>
ip()
{
return ws()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
try
{
return ws()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
catch (std::exception const&)
{
return {};
}
}
~PlainWsSession() = default;

View File

@@ -51,14 +51,21 @@ public:
return std::move(stream_);
}
std::string
std::optional<std::string>
ip()
{
return stream_.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
try
{
return stream_.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
catch (std::exception const&)
{
return {};
}
}
// Start the asynchronous operation

View File

@@ -55,16 +55,24 @@ public:
{
return ws_;
}
std::string
std::optional<std::string>
ip()
{
return ws()
.next_layer()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
try
{
return ws()
.next_layer()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
catch (std::exception const&)
{
return {};
}
}
};

View File

@@ -43,7 +43,8 @@ getDefaultWsResponse(boost::json::value const& id)
class WsBase
{
std::atomic_bool dead_ = false;
protected:
boost::system::error_code ec_;
public:
// Send, that enables SubscriptionManager to publish to clients
@@ -54,17 +55,10 @@ public:
{
}
void
wsFail(boost::beast::error_code ec, char const* what)
{
logError(ec, what);
dead_ = true;
}
bool
dead()
{
return dead_;
return ec_ != boost::system::error_code{};
}
};
@@ -91,8 +85,22 @@ class WsSession : public WsBase,
DOSGuard& dosGuard_;
RPC::Counters& counters_;
std::mutex mtx_;
bool sending_ = false;
std::queue<std::string> messages_;
void
wsFail(boost::beast::error_code ec, char const* what)
{
if (!ec_ && ec != boost::asio::error::operation_aborted)
{
ec_ = ec;
BOOST_LOG_TRIVIAL(info)
<< "wsFail: " << what << ": " << ec.message();
boost::beast::get_lowest_layer(derived().ws()).socket().close(ec);
}
}
public:
explicit WsSession(
boost::asio::io_context& ioc,
@@ -126,43 +134,50 @@ public:
}
void
sendNext()
do_write()
{
std::lock_guard<std::mutex> lck(mtx_);
sending_ = true;
derived().ws().async_write(
boost::asio::buffer(messages_.front()),
[shared = shared_from_this()](auto ec, size_t size) {
if (ec)
return shared->wsFail(ec, "publishToStream");
size_t left = 0;
{
std::lock_guard<std::mutex> lck(shared->mtx_);
shared->messages_.pop();
left = shared->messages_.size();
}
if (left > 0)
shared->sendNext();
});
net::buffer(messages_.front()),
boost::beast::bind_front_handler(
&WsSession::on_write, derived().shared_from_this()));
}
void
enqueueMessage(std::string&& msg)
on_write(boost::system::error_code ec, std::size_t)
{
size_t left = 0;
if (ec)
{
std::lock_guard<std::mutex> lck(mtx_);
messages_.push(std::move(msg));
left = messages_.size();
wsFail(ec, "Failed to write");
}
// if the queue was previously empty, start the send chain
if (left == 1)
sendNext();
else
{
messages_.pop();
sending_ = false;
maybe_send_next();
}
}
void
maybe_send_next()
{
if (ec_ || sending_ || messages_.empty())
return;
do_write();
}
void
send(std::string const& msg) override
{
enqueueMessage(std::string(msg));
net::dispatch(
derived().ws().get_executor(),
[this,
self = derived().shared_from_this(),
msg = std::string(msg)]() {
messages_.push(std::move(msg));
maybe_send_next();
});
}
void
@@ -200,6 +215,9 @@ public:
void
do_read()
{
if (dead())
return;
std::lock_guard<std::mutex> lck{mtx_};
// Clear the buffer
buffer_.consume(buffer_.size());
@@ -213,6 +231,10 @@ public:
void
handle_request(std::string const&& msg, boost::asio::yield_context& yc)
{
auto ip = derived().ip();
if (!ip)
return;
boost::json::object response = {};
auto sendError = [this](auto error) {
send(boost::json::serialize(RPC::make_error(error)));
@@ -221,6 +243,7 @@ public:
{
boost::json::value raw = boost::json::parse(msg);
boost::json::object request = raw.as_object();
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
try
{
@@ -238,7 +261,7 @@ public:
shared_from_this(),
*range,
counters_,
derived().ip());
*ip);
if (!context)
return sendError(RPC::Error::rpcBAD_SYNTAX);
@@ -287,7 +310,7 @@ public:
}
std::string responseStr = boost::json::serialize(response);
dosGuard_.add(derived().ip(), responseStr.size());
dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr));
}
@@ -302,9 +325,13 @@ public:
std::string msg{
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
auto ip = derived().ip();
if (!ip)
return;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << ip;
if (!dosGuard_.isOk(ip))
<< __func__ << " received request from ip = " << *ip;
if (!dosGuard_.isOk(*ip))
{
boost::json::object response;
response["error"] = "Too many requests. Slow down";
@@ -312,13 +339,13 @@ public:
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << responseStr;
dosGuard_.add(ip, responseStr.size());
dosGuard_.add(*ip, responseStr.size());
send(std::move(responseStr));
}
else
{
boost::asio::spawn(
ioc_,
derived().ws().get_executor(),
[m = std::move(msg), shared_this = shared_from_this()](
boost::asio::yield_context yield) {
shared_this->handle_request(std::move(m), yield);

View File

@@ -317,7 +317,7 @@ TEST(BackendTest, Basic)
EXPECT_TRUE(hash256.parseHex(hashHex));
ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob};
auto journal = ripple::debugLog();
auto accountsSet = txMeta.getAffectedAccounts(journal);
auto accountsSet = txMeta.getAffectedAccounts();
for (auto& a : accountsSet)
{
affectedAccounts.push_back(a);