Cleanup, documentation, rename some things, cmake changes

This commit is contained in:
CJ Cobb
2021-06-23 14:43:29 +00:00
parent 8af7825d7f
commit 056e170a56
55 changed files with 288 additions and 227 deletions

View File

@@ -0,0 +1,52 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
#include <boost/algorithm/string.hpp>
#include <backend/BackendInterface.h>
#include <backend/CassandraBackend.h>
#include <backend/PostgresBackend.h>
namespace Backend {
std::shared_ptr<BackendInterface>
make_Backend(boost::json::object const& config)
{
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
boost::json::object dbConfig = config.at("database").as_object();
bool readOnly = false;
if (config.contains("read_only"))
readOnly = config.at("read_only").as_bool();
auto type = dbConfig.at("type").as_string();
std::shared_ptr<BackendInterface> backend = nullptr;
if (boost::iequals(type, "cassandra"))
{
if (config.contains("online_delete"))
dbConfig.at(type).as_object()["ttl"] =
config.at("online_delete").as_int64() * 4;
backend =
std::make_shared<CassandraBackend>(dbConfig.at(type).as_object());
}
else if (boost::iequals(type, "postgres"))
{
backend =
std::make_shared<PostgresBackend>(dbConfig.at(type).as_object());
}
if (!backend)
throw std::runtime_error("Invalid database type");
backend->open(readOnly);
backend->checkFlagLedgers();
BOOST_LOG_TRIVIAL(info)
<< __func__ << ": Constructed BackendInterface Successfully";
return backend;
}
} // namespace Backend
#endif // RIPPLE_REPORTING_BACKEND_FACTORY

View File

@@ -0,0 +1,242 @@
#include <backend/BackendIndexer.h>
#include <backend/BackendInterface.h>
namespace Backend {
BackendIndexer::BackendIndexer(boost::json::object const& config)
: strand_(ioc_)
{
if (config.contains("indexer_key_shift"))
keyShift_ = config.at("indexer_key_shift").as_int64();
work_.emplace(ioc_);
ioThread_ = std::thread{[this]() { ioc_.run(); }};
};
BackendIndexer::~BackendIndexer()
{
work_.reset();
ioThread_.join();
}
void
BackendIndexer::addKey(ripple::uint256&& key)
{
keys.insert(std::move(key));
}
void
BackendIndexer::doKeysRepair(
BackendInterface const& backend,
std::optional<uint32_t> sequence)
{
auto rng = backend.fetchLedgerRangeNoThrow();
if (!rng)
return;
if (!sequence)
sequence = rng->maxSequence;
if (sequence < rng->minSequence)
sequence = rng->minSequence;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " sequence = " << std::to_string(*sequence);
std::optional<ripple::uint256> cursor;
while (true)
{
try
{
if (backend.isLedgerIndexed(*sequence))
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - " << std::to_string(*sequence)
<< " flag ledger already written. returning";
return;
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - " << std::to_string(*sequence)
<< " flag ledger not written. recursing..";
uint32_t lower = (*sequence - 1) >> keyShift_ << keyShift_;
doKeysRepair(backend, lower);
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - "
<< " sequence = " << std::to_string(*sequence)
<< " lower = " << std::to_string(lower)
<< " finished recursing. submitting repair ";
writeKeyFlagLedger(lower, backend);
return;
}
}
catch (DatabaseTimeout const& e)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Database timeout fetching keys";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << " finished. sequence = " << std::to_string(*sequence);
}
void
BackendIndexer::doKeysRepairAsync(
BackendInterface const& backend,
std::optional<uint32_t> sequence)
{
boost::asio::post(strand_, [this, sequence, &backend]() {
doKeysRepair(backend, sequence);
});
}
void
BackendIndexer::writeKeyFlagLedger(
uint32_t ledgerSequence,
BackendInterface const& backend)
{
auto nextFlag = getKeyIndexOfSeq(ledgerSequence + 1);
uint32_t lower = ledgerSequence >> keyShift_ << keyShift_;
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - "
<< "next flag = " << std::to_string(nextFlag.keyIndex)
<< "lower = " << std::to_string(lower)
<< "ledgerSequence = " << std::to_string(ledgerSequence) << " starting";
ripple::uint256 zero = {};
std::optional<ripple::uint256> cursor;
size_t numKeys = 0;
auto begin = std::chrono::system_clock::now();
while (true)
{
try
{
{
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - checking for complete...";
if (backend.isLedgerIndexed(nextFlag.keyIndex))
{
BOOST_LOG_TRIVIAL(warning)
<< "writeKeyFlagLedger - "
<< "flag ledger already written. flag = "
<< std::to_string(nextFlag.keyIndex)
<< " , ledger sequence = "
<< std::to_string(ledgerSequence);
return;
}
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - is not complete";
}
indexing_ = nextFlag.keyIndex;
auto start = std::chrono::system_clock::now();
auto [objects, curCursor, warning] =
backend.fetchLedgerPage(cursor, lower, 2048);
auto mid = std::chrono::system_clock::now();
// no cursor means this is the first page
if (!cursor)
{
if (warning)
{
BOOST_LOG_TRIVIAL(error)
<< "writeKeyFlagLedger - "
<< " prev flag ledger not written "
<< std::to_string(nextFlag.keyIndex) << " : "
<< std::to_string(ledgerSequence);
assert(false);
throw std::runtime_error("Missing prev flag");
}
}
cursor = curCursor;
std::unordered_set<ripple::uint256> keys;
for (auto& obj : objects)
{
keys.insert(obj.key);
}
backend.writeKeys(keys, nextFlag, true);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
<< " fetched a page "
<< " cursor = "
<< (cursor.has_value() ? ripple::strHex(*cursor)
: std::string{})
<< " num keys = " << std::to_string(numKeys) << " fetch time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
mid - start)
.count()
<< " write time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
end - mid)
.count();
if (!cursor)
break;
}
catch (DatabaseTimeout const& e)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Database timeout fetching keys";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
backend.writeKeys({zero}, nextFlag, true);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info)
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
<< " finished. "
<< " num keys = " << std::to_string(numKeys) << " total time = "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count();
indexing_ = 0;
}
void
BackendIndexer::writeKeyFlagLedgerAsync(
uint32_t ledgerSequence,
BackendInterface const& backend)
{
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " starting. sequence = " << std::to_string(ledgerSequence);
boost::asio::post(strand_, [this, ledgerSequence, &backend]() {
writeKeyFlagLedger(ledgerSequence, backend);
});
BOOST_LOG_TRIVIAL(info)
<< __func__
<< " finished. sequence = " << std::to_string(ledgerSequence);
}
void
BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " starting. sequence = " << std::to_string(ledgerSequence);
bool isFirst = false;
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
if (isFirst_)
{
auto rng = backend.fetchLedgerRangeNoThrow();
if (rng && rng->minSequence != ledgerSequence)
isFirst_ = false;
else
{
keyIndex = KeyIndex{ledgerSequence};
}
}
backend.writeKeys(keys, keyIndex);
if (isFirst_)
{
// write completion record
ripple::uint256 zero = {};
backend.writeKeys({zero}, keyIndex);
// write next flag sychronously
keyIndex = getKeyIndexOfSeq(ledgerSequence + 1);
backend.writeKeys(keys, keyIndex);
backend.writeKeys({zero}, keyIndex);
}
isFirst_ = false;
keys = {};
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " finished. sequence = " << std::to_string(ledgerSequence);
}
} // namespace Backend

View File

@@ -0,0 +1,104 @@
#ifndef CLIO_BACKEND_INDEXER_H_INCLUDED
#define CLIO_BACKEND_INDEXER_H_INCLUDED
#include <ripple/basics/base_uint.h>
#include <boost/asio.hpp>
#include <boost/json.hpp>
#include <mutex>
#include <optional>
#include <thread>
namespace std {
template <>
struct hash<ripple::uint256>
{
std::size_t
operator()(const ripple::uint256& k) const noexcept
{
return boost::hash_range(k.begin(), k.end());
}
};
} // namespace std
namespace Backend {
// The below two structs exist to prevent developers from accidentally mixing up
// the two indexes.
struct BookIndex
{
uint32_t bookIndex;
explicit BookIndex(uint32_t v) : bookIndex(v){};
};
struct KeyIndex
{
uint32_t keyIndex;
explicit KeyIndex(uint32_t v) : keyIndex(v){};
};
class BackendInterface;
class BackendIndexer
{
boost::asio::io_context ioc_;
boost::asio::io_context::strand strand_;
std::mutex mutex_;
std::optional<boost::asio::io_context::work> work_;
std::thread ioThread_;
std::atomic_uint32_t indexing_ = 0;
uint32_t keyShift_ = 20;
std::unordered_set<ripple::uint256> keys;
mutable bool isFirst_ = true;
void
doKeysRepair(
BackendInterface const& backend,
std::optional<uint32_t> sequence);
void
writeKeyFlagLedger(
uint32_t ledgerSequence,
BackendInterface const& backend);
public:
BackendIndexer(boost::json::object const& config);
~BackendIndexer();
void
addKey(ripple::uint256&& key);
void
finish(uint32_t ledgerSequence, BackendInterface const& backend);
void
writeKeyFlagLedgerAsync(
uint32_t ledgerSequence,
BackendInterface const& backend);
void
doKeysRepairAsync(
BackendInterface const& backend,
std::optional<uint32_t> sequence);
uint32_t
getKeyShift()
{
return keyShift_;
}
std::optional<uint32_t>
getCurrentlyIndexing()
{
uint32_t cur = indexing_.load();
if (cur != 0)
return cur;
return {};
}
KeyIndex
getKeyIndexOfSeq(uint32_t seq) const
{
if (isKeyFlagLedger(seq))
return KeyIndex{seq};
auto incr = (1 << keyShift_);
KeyIndex index{(seq >> keyShift_ << keyShift_) + incr};
assert(isKeyFlagLedger(index.keyIndex));
return index;
}
bool
isKeyFlagLedger(uint32_t ledgerSequence) const
{
return (ledgerSequence % (1 << keyShift_)) == 0;
}
};
} // namespace Backend
#endif

View File

@@ -0,0 +1,335 @@
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
namespace Backend {
bool
BackendInterface::finishWrites(uint32_t ledgerSequence) const
{
indexer_.finish(ledgerSequence, *this);
auto commitRes = doFinishWrites();
if (commitRes)
{
if (isFirst_)
indexer_.doKeysRepairAsync(*this, ledgerSequence);
if (indexer_.isKeyFlagLedger(ledgerSequence))
indexer_.writeKeyFlagLedgerAsync(ledgerSequence, *this);
isFirst_ = false;
}
else
{
// if commitRes is false, we are relinquishing control of ETL. We
// reset isFirst_ to true so that way if we later regain control of
// ETL, we trigger the index repair
isFirst_ = true;
}
return commitRes;
}
bool
BackendInterface::isLedgerIndexed(std::uint32_t ledgerSequence) const
{
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
if (keyIndex)
{
auto page = doFetchLedgerPage({}, ledgerSequence, 1);
return !page.warning.has_value();
}
return false;
}
void
BackendInterface::writeLedgerObject(
std::string&& key,
uint32_t seq,
std::string&& blob,
bool isCreated,
bool isDeleted,
std::optional<ripple::uint256>&& book) const
{
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
indexer_.addKey(std::move(key256));
doWriteLedgerObject(
std::move(key),
seq,
std::move(blob),
isCreated,
isDeleted,
std::move(book));
}
std::optional<LedgerRange>
BackendInterface::fetchLedgerRangeNoThrow() const
{
BOOST_LOG_TRIVIAL(warning) << __func__;
while (true)
{
try
{
return fetchLedgerRange();
}
catch (DatabaseTimeout& t)
{
;
}
}
}
std::optional<KeyIndex>
BackendInterface::getKeyIndexOfSeq(uint32_t seq) const
{
if (indexer_.isKeyFlagLedger(seq))
return KeyIndex{seq};
auto rng = fetchLedgerRange();
if (!rng)
return {};
if (rng->minSequence == seq)
return KeyIndex{seq};
return indexer_.getKeyIndexOfSeq(seq);
}
BookOffersPage
BackendInterface::fetchBookOffers(
ripple::uint256 const& book,
uint32_t ledgerSequence,
std::uint32_t limit,
std::optional<ripple::uint256> const& cursor) const
{
// TODO try to speed this up. This can take a few seconds. The goal is to
// get it down to a few hundred milliseconds.
BookOffersPage page;
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
ripple::uint256 uTipIndex = book;
bool done = false;
std::vector<ripple::uint256> keys;
auto getMillis = [](auto diff) {
return std::chrono::duration_cast<std::chrono::milliseconds>(diff)
.count();
};
auto begin = std::chrono::system_clock::now();
uint32_t numSucc = 0;
uint32_t numPages = 0;
long succMillis = 0;
long pageMillis = 0;
while (keys.size() < limit)
{
auto mid1 = std::chrono::system_clock::now();
auto offerDir = fetchSuccessor(uTipIndex, ledgerSequence);
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key > bookEnd)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " - offerDir.has_value() "
<< offerDir.has_value() << " breaking";
break;
}
while (keys.size() < limit)
{
++numPages;
uTipIndex = offerDir->key;
ripple::STLedgerEntry sle{
ripple::SerialIter{
offerDir->blob.data(), offerDir->blob.size()},
offerDir->key};
auto indexes = sle.getFieldV256(ripple::sfIndexes);
keys.insert(keys.end(), indexes.begin(), indexes.end());
// TODO we probably don't have to wait here. We can probably fetch
// these objects in another thread, and move on to another page of
// the book directory, or another directory. We also could just
// accumulate all of the keys before fetching the offers
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence);
assert(nextDir);
offerDir->blob = *nextDir;
offerDir->key = nextKey.key;
}
auto mid3 = std::chrono::system_clock::now();
pageMillis += getMillis(mid3 - mid2);
}
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence);
for (size_t i = 0; i < keys.size(); ++i)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " key = " << ripple::strHex(keys[i])
<< " blob = " << ripple::strHex(objs[i]);
assert(objs[i].size());
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info)
<< __func__ << " "
<< "Fetching " << std::to_string(keys.size()) << " keys took "
<< std::to_string(getMillis(mid - begin))
<< " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
<< std::to_string(numSucc) << " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis)
<< ". num pages = " << std::to_string(numPages)
<< " milliseconds. Fetching all objects took "
<< std::to_string(getMillis(end - mid))
<< " milliseconds. total time = "
<< std::to_string(getMillis(end - begin)) << " milliseconds";
return page;
}
std::optional<LedgerObject>
BackendInterface::fetchSuccessor(ripple::uint256 key, uint32_t ledgerSequence)
const
{
auto start = std::chrono::system_clock::now();
auto page = fetchLedgerPage({++key}, ledgerSequence, 1, 512);
auto end = std::chrono::system_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " took " << std::to_string(ms) << " milliseconds";
if (page.objects.size())
return page.objects[0];
return {};
}
LedgerPage
BackendInterface::fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit,
std::uint32_t limitHint) const
{
assert(limit != 0);
bool incomplete = !isLedgerIndexed(ledgerSequence);
// really low limits almost always miss
uint32_t adjustedLimit = std::max(limitHint, std::max(limit, (uint32_t)4));
LedgerPage page;
page.cursor = cursor;
do
{
adjustedLimit = adjustedLimit >= 8192 ? 8192 : adjustedLimit * 2;
auto start = std::chrono::system_clock::now();
auto partial =
doFetchLedgerPage(page.cursor, ledgerSequence, adjustedLimit);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " " << std::to_string(ledgerSequence) << " "
<< std::to_string(adjustedLimit) << " "
<< ripple::strHex(*page.cursor) << " - time = "
<< std::to_string(
std::chrono::duration_cast<std::chrono::milliseconds>(
end - start)
.count());
page.objects.insert(
page.objects.end(), partial.objects.begin(), partial.objects.end());
page.cursor = partial.cursor;
} while (page.objects.size() < limit && page.cursor);
if (incomplete)
{
auto rng = fetchLedgerRange();
if (!rng)
return page;
if (rng->minSequence == ledgerSequence)
{
BOOST_LOG_TRIVIAL(fatal)
<< __func__
<< " Database is populated but first flag ledger is "
"incomplete. This should never happen";
assert(false);
throw std::runtime_error("Missing base flag ledger");
}
uint32_t lowerSequence = (ledgerSequence - 1) >> indexer_.getKeyShift()
<< indexer_.getKeyShift();
if (lowerSequence < rng->minSequence)
lowerSequence = rng->minSequence;
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " recursing. ledgerSequence = " << std::to_string(ledgerSequence)
<< " , lowerSequence = " << std::to_string(lowerSequence);
auto lowerPage = fetchLedgerPage(cursor, lowerSequence, limit);
std::vector<ripple::uint256> keys;
std::transform(
std::move_iterator(lowerPage.objects.begin()),
std::move_iterator(lowerPage.objects.end()),
std::back_inserter(keys),
[](auto&& elt) { return std::move(elt.key); });
auto objs = fetchLedgerObjects(keys, ledgerSequence);
for (size_t i = 0; i < keys.size(); ++i)
{
auto& obj = objs[i];
auto& key = keys[i];
if (obj.size())
page.objects.push_back({std::move(key), std::move(obj)});
}
std::sort(page.objects.begin(), page.objects.end(), [](auto a, auto b) {
return a.key < b.key;
});
page.warning = "Data may be incomplete";
}
if (page.objects.size() >= limit)
{
page.objects.resize(limit);
page.cursor = page.objects.back().key;
}
return page;
}
void
BackendInterface::checkFlagLedgers() const
{
auto rng = fetchLedgerRangeNoThrow();
if (rng)
{
bool prevComplete = true;
uint32_t cur = rng->minSequence;
size_t numIncomplete = 0;
while (cur <= rng->maxSequence + 1)
{
auto keyIndex = getKeyIndexOfSeq(cur);
assert(keyIndex.has_value());
cur = keyIndex->keyIndex;
if (!isLedgerIndexed(cur))
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " - flag ledger "
<< std::to_string(keyIndex->keyIndex) << " is incomplete";
++numIncomplete;
prevComplete = false;
}
else
{
if (!prevComplete)
{
BOOST_LOG_TRIVIAL(fatal)
<< __func__ << " - flag ledger "
<< std::to_string(keyIndex->keyIndex)
<< " is incomplete but the next is complete. This "
"should never happen";
assert(false);
throw std::runtime_error("missing prev flag ledger");
}
prevComplete = true;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " - flag ledger "
<< std::to_string(keyIndex->keyIndex) << " is complete";
}
cur = cur + 1;
}
if (numIncomplete > 1)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " " << std::to_string(numIncomplete)
<< " incomplete flag ledgers. "
"This can happen, but is unlikely. Check indexer_key_shift "
"in config";
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " number of incomplete flag ledgers = "
<< std::to_string(numIncomplete);
}
}
}
} // namespace Backend

View File

@@ -0,0 +1,256 @@
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
#include <ripple/ledger/ReadView.h>
#include <boost/asio.hpp>
#include <backend/BackendIndexer.h>
#include <backend/DBHelpers.h>
class ReportingETL;
class AsyncCallData;
class BackendTest_Basic_Test;
namespace Backend {
// *** return types
using Blob = std::vector<unsigned char>;
struct LedgerObject
{
ripple::uint256 key;
Blob blob;
};
struct LedgerPage
{
std::vector<LedgerObject> objects;
std::optional<ripple::uint256> cursor;
std::optional<std::string> warning;
};
struct BookOffersPage
{
std::vector<LedgerObject> offers;
std::optional<ripple::uint256> cursor;
std::optional<std::string> warning;
};
struct TransactionAndMetadata
{
Blob transaction;
Blob metadata;
uint32_t ledgerSequence;
bool
operator==(const TransactionAndMetadata&) const = default;
};
struct AccountTransactionsCursor
{
uint32_t ledgerSequence;
uint32_t transactionIndex;
};
struct LedgerRange
{
uint32_t minSequence;
uint32_t maxSequence;
};
class DatabaseTimeout : public std::exception
{
const char*
what() const throw() override
{
return "Database read timed out. Please retry the request";
}
};
class BackendInterface
{
protected:
mutable BackendIndexer indexer_;
mutable bool isFirst_ = true;
public:
BackendInterface(boost::json::object const& config) : indexer_(config)
{
}
virtual ~BackendInterface()
{
}
BackendIndexer&
getIndexer() const
{
return indexer_;
}
// *** public read methods ***
// All of these reads methods can throw DatabaseTimeout. When writing code
// in an RPC handler, this exception does not need to be caught: when an RPC
// results in a timeout, an error is returned to the client
public:
// *** ledger methods
virtual std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(uint32_t sequence) const = 0;
virtual std::optional<uint32_t>
fetchLatestLedgerSequence() const = 0;
virtual std::optional<LedgerRange>
fetchLedgerRange() const = 0;
// Doesn't throw DatabaseTimeout. Should be used with care.
std::optional<LedgerRange>
fetchLedgerRangeNoThrow() const;
// *** transaction methods
virtual std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash) const = 0;
virtual std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes) const = 0;
virtual std::pair<
std::vector<TransactionAndMetadata>,
std::optional<AccountTransactionsCursor>>
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::optional<AccountTransactionsCursor> const& cursor = {}) const = 0;
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(uint32_t ledgerSequence) const = 0;
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const = 0;
// *** state data methods
virtual std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, uint32_t sequence) const = 0;
virtual std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const = 0;
// Fetches a page of ledger objects, ordered by key/index.
// Used by ledger_data
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit,
std::uint32_t limitHint = 0) const;
// Fetches the successor to key/index. key need not actually be a valid
// key/index.
std::optional<LedgerObject>
fetchSuccessor(ripple::uint256 key, uint32_t ledgerSequence) const;
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
uint32_t ledgerSequence,
std::uint32_t limit,
std::optional<ripple::uint256> const& cursor = {}) const;
// Methods related to the indexer
bool
isLedgerIndexed(std::uint32_t ledgerSequence) const;
std::optional<KeyIndex>
getKeyIndexOfSeq(uint32_t seq) const;
// *** protected write methods
protected:
friend class ::ReportingETL;
friend class BackendIndexer;
friend class ::AsyncCallData;
friend std::shared_ptr<BackendInterface>
make_Backend(boost::json::object const& config);
friend class ::BackendTest_Basic_Test;
virtual void
writeLedger(
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader,
bool isFirst = false) const = 0;
void
writeLedgerObject(
std::string&& key,
uint32_t seq,
std::string&& blob,
bool isCreated,
bool isDeleted,
std::optional<ripple::uint256>&& book) const;
virtual void
writeTransaction(
std::string&& hash,
uint32_t seq,
std::string&& transaction,
std::string&& metadata) const = 0;
virtual void
writeAccountTransactions(
std::vector<AccountTransactionsData>&& data) const = 0;
// TODO: this function, or something similar, could be called internally by
// writeLedgerObject
virtual bool
writeKeys(
std::unordered_set<ripple::uint256> const& keys,
KeyIndex const& index,
bool isAsync = false) const = 0;
// Tell the database we are about to begin writing data for a particular
// ledger.
virtual void
startWrites() const = 0;
// Tell the database we have finished writing all data for a particular
// ledger
bool
finishWrites(uint32_t ledgerSequence) const;
virtual bool
doOnlineDelete(uint32_t numLedgersToKeep) const = 0;
// Open the database. Set up all of the necessary objects and
// datastructures. After this call completes, the database is ready for
// use.
virtual void
open(bool readOnly) = 0;
// Close the database, releasing any resources
virtual void
close() = 0;
// *** private helper methods
private:
virtual LedgerPage
doFetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit) const = 0;
virtual void
doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::string&& blob,
bool isCreated,
bool isDeleted,
std::optional<ripple::uint256>&& book) const = 0;
virtual bool
doFinishWrites() const = 0;
void
checkFlagLedgers() const;
};
} // namespace Backend
using BackendInterface = Backend::BackendInterface;
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

170
src/backend/DBHelpers.cpp Normal file
View File

@@ -0,0 +1,170 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <boost/format.hpp>
#include <memory>
#include <backend/DBHelpers.h>
static bool
writeToLedgersDB(ripple::LedgerInfo const& info, PgQuery& pgQuery)
{
BOOST_LOG_TRIVIAL(debug) << __func__;
auto cmd = boost::format(
R"(INSERT INTO ledgers
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
auto ledgerInsert = boost::str(
cmd % info.seq % ripple::strHex(info.hash) %
ripple::strHex(info.parentHash) % info.drops.drops() %
info.closeTime.time_since_epoch().count() %
info.parentCloseTime.time_since_epoch().count() %
info.closeTimeResolution.count() % info.closeFlags %
ripple::strHex(info.accountHash) % ripple::strHex(info.txHash));
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
<< " : "
<< "query string = " << ledgerInsert;
auto res = pgQuery(ledgerInsert.data());
return res;
}
/*
bool
writeBooks(std::vector<BookDirectoryData> const& bookDirData, PgQuery& pg)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Writing " << bookDirData.size() << "books to Postgres";
try
{
std::stringstream booksCopyBuffer;
for (auto const& data : bookDirData)
{
std::string directoryIndex = ripple::strHex(data.directoryIndex);
std::string bookIndex = ripple::strHex(data.bookIndex);
auto ledgerSeq = data.ledgerSequence;
booksCopyBuffer << "\\\\x" << directoryIndex << '\t'
<< std::to_string(ledgerSeq) << '\t' << "\\\\x"
<< bookIndex << '\n';
}
pg.bulkInsert("books", booksCopyBuffer.str());
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Successfully inserted books";
return true;
}
catch (std::exception& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << "Caught exception inserting books : " << e.what();
assert(false);
return false;
}
}
*/
/*
bool
writeToPostgres(
ripple::LedgerInfo const& info,
std::vector<AccountTransactionsData> const& accountTxData,
std::shared_ptr<PgPool> const& pgPool)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Beginning write to Postgres";
try
{
// Create a PgQuery object to run multiple commands over the
// same connection in a single transaction block.
PgQuery pg(pgPool);
auto res = pg("BEGIN");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "bulkWriteToTable : Postgres insert error: " << res.msg();
throw std::runtime_error(msg.str());
}
// Writing to the ledgers db fails if the ledger already
// exists in the db. In this situation, the ETL process has
// detected there is another writer, and falls back to only
// publishing
if (!writeToLedgersDB(info, pg))
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to write to ledgers database.";
return false;
}
std::stringstream transactionsCopyBuffer;
std::stringstream accountTransactionsCopyBuffer;
for (auto const& data : accountTxData)
{
std::string txHash = ripple::strHex(data.txHash);
std::string nodestoreHash = ripple::strHex(data.nodestoreHash);
auto idx = data.transactionIndex;
auto ledgerSeq = data.ledgerSequence;
transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t'
<< std::to_string(idx) << '\t' << "\\\\x"
<< txHash << '\t' << "\\\\x" << nodestoreHash
<< '\n';
for (auto const& a : data.accounts)
{
std::string acct = ripple::strHex(a);
accountTransactionsCopyBuffer
<< "\\\\x" << acct << '\t' << std::to_string(ledgerSeq)
<< '\t' << std::to_string(idx) << '\n';
}
}
pg.bulkInsert("transactions", transactionsCopyBuffer.str());
pg.bulkInsert(
"account_transactions", accountTransactionsCopyBuffer.str());
res = pg("COMMIT");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "bulkWriteToTable : Postgres insert error: " << res.msg();
assert(false);
throw std::runtime_error(msg.str());
}
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Successfully wrote to Postgres";
return true;
}
catch (std::exception& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__
<< "Caught exception writing to Postgres : " << e.what();
assert(false);
return false;
}
}
*/

104
src/backend/DBHelpers.h Normal file
View File

@@ -0,0 +1,104 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_REPORTING_DBHELPERS_H_INCLUDED
#define RIPPLE_APP_REPORTING_DBHELPERS_H_INCLUDED
#include <ripple/basics/Log.h>
#include <ripple/protocol/TxMeta.h>
#include <boost/container/flat_set.hpp>
#include <backend/Pg.h>
/// Struct used to keep track of what to write to transactions and
/// account_transactions tables in Postgres
struct AccountTransactionsData
{
boost::container::flat_set<ripple::AccountID> accounts;
uint32_t ledgerSequence;
uint32_t transactionIndex;
ripple::uint256 txHash;
AccountTransactionsData(
ripple::TxMeta& meta,
ripple::uint256 const& txHash,
beast::Journal& j)
: accounts(meta.getAffectedAccounts(j))
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)
{
}
AccountTransactionsData() = default;
};
template <class T>
inline bool
isOffer(T const& object)
{
short offer_bytes = (object[1] << 8) | object[2];
return offer_bytes == 0x006f;
}
template <class T>
inline bool
isOfferHex(T const& object)
{
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
if (blob)
{
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
return offer_bytes == 0x006f;
}
return false;
}
template <class T>
inline ripple::uint256
getBook(T const& offer)
{
ripple::SerialIter it{offer.data(), offer.size()};
ripple::SLE sle{it, {}};
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
return book;
}
inline ripple::LedgerInfo
deserializeHeader(ripple::Slice data)
{
ripple::SerialIter sit(data.data(), data.size());
ripple::LedgerInfo info;
info.seq = sit.get32();
info.drops = sit.get64();
info.parentHash = sit.get256();
info.txHash = sit.get256();
info.accountHash = sit.get256();
info.parentCloseTime =
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTime =
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTimeResolution = ripple::NetClock::duration{sit.get8()};
info.closeFlags = sit.get8();
info.hash = sit.get256();
return info;
}
#endif

1492
src/backend/Pg.cpp Normal file

File diff suppressed because it is too large Load Diff

552
src/backend/Pg.h Normal file
View File

@@ -0,0 +1,552 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_CORE_PG_H_INCLUDED
#define RIPPLE_CORE_PG_H_INCLUDED
#include <ripple/basics/StringUtilities.h>
#include <ripple/basics/chrono.h>
#include <ripple/ledger/ReadView.h>
#include <boost/icl/closed_interval.hpp>
#include <boost/json.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/log/trivial.hpp>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <libpq-fe.h>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
// These postgres structs must be freed only by the postgres API.
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
/** first: command
* second: parameter values
*
* The 2nd member takes an optional string to
* distinguish between NULL parameters and empty strings. An empty
* item corresponds to a NULL parameter.
*
* Postgres reads each parameter as a c-string, regardless of actual type.
* Binary types (bytea) need to be converted to hex and prepended with
* \x ("\\x").
*/
using pg_params =
std::pair<char const*, std::vector<std::optional<std::string>>>;
/** Parameter values for pg API. */
using pg_formatted_params = std::vector<char const*>;
/** Parameters for managing postgres connections. */
struct PgConfig
{
/** Maximum connections allowed to db. */
std::size_t max_connections{std::numeric_limits<std::size_t>::max()};
/** Close idle connections past this duration. */
std::chrono::seconds timeout{600};
/** Index of DB connection parameter names. */
std::vector<char const*> keywordsIdx;
/** DB connection parameter names. */
std::vector<std::string> keywords;
/** Index of DB connection parameter values. */
std::vector<char const*> valuesIdx;
/** DB connection parameter values. */
std::vector<std::string> values;
};
//-----------------------------------------------------------------------------
/** Class that operates on postgres query results.
*
* The functions that return results do not check first whether the
* expected results are actually there. Therefore, the caller first needs
* to check whether or not a valid response was returned using the operator
* bool() overload. If number of tuples or fields are unknown, then check
* those. Each result field should be checked for null before attempting
* to return results. Finally, the caller must know the type of the field
* before calling the corresponding function to return a field. Postgres
* internally stores each result field as null-terminated strings.
*/
class PgResult
{
// The result object must be freed using the libpq API PQclear() call.
pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }};
std::optional<std::pair<ExecStatusType, std::string>> error_;
public:
/** Constructor for when the process is stopping.
*
*/
PgResult()
{
}
/** Constructor for successful query results.
*
* @param result Query result.
*/
explicit PgResult(pg_result_type&& result) : result_(std::move(result))
{
}
/** Constructor for failed query results.
*
* @param result Query result that contains error information.
* @param conn Postgres connection that contains error information.
*/
PgResult(PGresult* result, PGconn* conn)
: error_({PQresultStatus(result), PQerrorMessage(conn)})
{
}
/** Return field as a null-terminated string pointer.
*
* Note that this function does not guarantee that the result struct
* exists, or that the row and fields exist, or that the field is
* not null.
*
* @param ntuple Row number.
* @param nfield Field number.
* @return Field contents.
*/
char const*
c_str(int ntuple = 0, int nfield = 0) const
{
return PQgetvalue(result_.get(), ntuple, nfield);
}
std::vector<unsigned char>
asUnHexedBlob(int ntuple = 0, int nfield = 0) const
{
std::string_view view{c_str(ntuple, nfield) + 2};
auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend());
if (res)
return *res;
return {};
}
ripple::uint256
asUInt256(int ntuple = 0, int nfield = 0) const
{
ripple::uint256 val;
if (!val.parseHex(c_str(ntuple, nfield) + 2))
throw std::runtime_error("Pg - failed to parse hex into uint256");
return val;
}
/** Return field as equivalent to Postgres' INT type (32 bit signed).
*
* Note that this function does not guarantee that the result struct
* exists, or that the row and fields exist, or that the field is
* not null, or that the type is that requested.
* @param ntuple Row number.
* @param nfield Field number.
* @return Field contents.
*/
std::int32_t
asInt(int ntuple = 0, int nfield = 0) const
{
return boost::lexical_cast<std::int32_t>(
PQgetvalue(result_.get(), ntuple, nfield));
}
/** Return field as equivalent to Postgres' BIGINT type (64 bit signed).
*
* Note that this function does not guarantee that the result struct
* exists, or that the row and fields exist, or that the field is
* not null, or that the type is that requested.
* @param ntuple Row number.
* @param nfield Field number.
* @return Field contents.
*/
std::int64_t
asBigInt(int ntuple = 0, int nfield = 0) const
{
return boost::lexical_cast<std::int64_t>(
PQgetvalue(result_.get(), ntuple, nfield));
}
/** Returns whether the field is NULL or not.
*
* Note that this function does not guarantee that the result struct
* exists, or that the row and fields exist.
*
* @param ntuple Row number.
* @param nfield Field number.
* @return Whether field is NULL.
*/
bool
isNull(int ntuple = 0, int nfield = 0) const
{
return PQgetisnull(result_.get(), ntuple, nfield);
}
/** Check whether a valid response occurred.
*
* @return Whether or not the query returned a valid response.
*/
operator bool() const
{
return result_ != nullptr;
}
/** Message describing the query results suitable for diagnostics.
*
* If error, then the postgres error type and message are returned.
* Otherwise, "ok"
*
* @return Query result message.
*/
std::string
msg() const;
/** Get number of rows in result.
*
* Note that this function does not guarantee that the result struct
* exists.
*
* @return Number of result rows.
*/
int
ntuples() const
{
return PQntuples(result_.get());
}
/** Get number of fields in result.
*
* Note that this function does not guarantee that the result struct
* exists.
*
* @return Number of result fields.
*/
int
nfields() const
{
return PQnfields(result_.get());
}
/** Return result status of the command.
*
* Note that this function does not guarantee that the result struct
* exists.
*
* @return
*/
ExecStatusType
status() const
{
return PQresultStatus(result_.get());
}
};
/* Class that contains and operates upon a postgres connection. */
class Pg
{
friend class PgPool;
friend class PgQuery;
PgConfig const& config_;
bool& stop_;
std::mutex& mutex_;
// The connection object must be freed using the libpq API PQfinish() call.
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
/** Clear results from the connection.
*
* Results from previous commands must be cleared before new commands
* can be processed. This function should be called on connections
* that weren't processed completely before being reused, such as
* when being checked-in.
*
* @return whether or not connection still exists.
*/
bool
clear();
/** Connect to postgres.
*
* Idempotently connects to postgres by first checking whether an
* existing connection is already present. If connection is not present
* or in an errored state, reconnects to the database.
*/
void
connect();
/** Disconnect from postgres. */
void
disconnect()
{
conn_.reset();
}
/** Execute postgres query.
*
* If parameters are included, then the command should contain only a
* single SQL statement. If no parameters, then multiple SQL statements
* delimited by semi-colons can be processed. The response is from
* the last command executed.
*
* @param command postgres API command string.
* @param nParams postgres API number of parameters.
* @param values postgres API array of parameter.
* @return Query result object.
*/
PgResult
query(char const* command, std::size_t nParams, char const* const* values);
/** Execute postgres query with no parameters.
*
* @param command Query string.
* @return Query result object;
*/
PgResult
query(char const* command)
{
return query(command, 0, nullptr);
}
/** Execute postgres query with parameters.
*
* @param dbParams Database command and parameter values.
* @return Query result object.
*/
PgResult
query(pg_params const& dbParams);
/** Insert multiple records into a table using Postgres' bulk COPY.
*
* Throws upon error.
*
* @param table Name of table for import.
* @param records Records in the COPY IN format.
*/
void
bulkInsert(char const* table, std::string const& records);
public:
/** Constructor for Pg class.
*
* @param config Config parameters.
* @param j Logger object.
* @param stop Reference to connection pool's stop flag.
* @param mutex Reference to connection pool's mutex.
*/
Pg(PgConfig const& config, bool& stop, std::mutex& mutex)
: config_(config), stop_(stop), mutex_(mutex)
{
}
};
//-----------------------------------------------------------------------------
/** Database connection pool.
*
* Allow re-use of postgres connections. Postgres connections are created
* as needed until configurable limit is reached. After use, each connection
* is placed in a container ordered by time of use. Each request for
* a connection grabs the most recently used connection from the container.
* If none are available, a new connection is used (up to configured limit).
* Idle connections are destroyed periodically after configurable
* timeout duration.
*
* This should be stored as a shared pointer so PgQuery objects can safely
* outlive it.
*/
class PgPool
{
friend class PgQuery;
using clock_type = std::chrono::steady_clock;
PgConfig config_;
std::mutex mutex_;
std::condition_variable cond_;
std::size_t connections_{};
bool stop_{false};
/** Idle database connections ordered by timestamp to allow timing out. */
std::multimap<std::chrono::time_point<clock_type>, std::unique_ptr<Pg>>
idle_;
/** Get a postgres connection object.
*
* Return the most recent idle connection in the pool, if available.
* Otherwise, return a new connection unless we're at the threshold.
* If so, then wait until a connection becomes available.
*
* @return Postgres object.
*/
std::unique_ptr<Pg>
checkout();
/** Return a postgres object to the pool for reuse.
*
* If connection is healthy, place in pool for reuse. After calling this,
* the container no longer have a connection unless checkout() is called.
*
* @param pg Pg object.
*/
void
checkin(std::unique_ptr<Pg>& pg);
public:
/** Connection pool constructor.
*
* @param pgConfig Postgres config.
* @param j Logger object.
* @param parent Stoppable parent.
*/
PgPool(boost::json::object const& config);
~PgPool()
{
onStop();
}
/** Initiate idle connection timer.
*
* The PgPool object needs to be fully constructed to support asynchronous
* operations.
*/
void
setup();
/** Prepare for process shutdown. (Stoppable) */
void
onStop();
/** Disconnect idle postgres connections. */
void
idleSweeper();
};
//-----------------------------------------------------------------------------
/** Class to query postgres.
*
* This class should be used by functions outside of this
* compilation unit for querying postgres. It automatically acquires and
* relinquishes a database connection to handle each query.
*/
class PgQuery
{
private:
std::shared_ptr<PgPool> pool_;
std::unique_ptr<Pg> pg_;
public:
PgQuery() = delete;
PgQuery(std::shared_ptr<PgPool> const& pool)
: pool_(pool), pg_(pool->checkout())
{
}
~PgQuery()
{
pool_->checkin(pg_);
}
// TODO. add sendQuery and getResult, for sending the query and getting the
// result asynchronously. This could be useful for sending a bunch of
// requests concurrently
/** Execute postgres query with parameters.
*
* @param dbParams Database command with parameters.
* @return Result of query, including errors.
*/
PgResult
operator()(pg_params const& dbParams)
{
if (!pg_) // It means we're stopping. Return empty result.
return PgResult();
return pg_->query(dbParams);
}
/** Execute postgres query with only command statement.
*
* @param command Command statement.
* @return Result of query, including errors.
*/
PgResult
operator()(char const* command)
{
return operator()(pg_params{command, {}});
}
/** Insert multiple records into a table using Postgres' bulk COPY.
*
* Throws upon error.
*
* @param table Name of table for import.
* @param records Records in the COPY IN format.
*/
void
bulkInsert(char const* table, std::string const& records)
{
pg_->bulkInsert(table, records);
}
};
//-----------------------------------------------------------------------------
/** Create Postgres connection pool manager.
*
* @param pgConfig Configuration for Postgres.
* @param j Logger object.
* @param parent Stoppable parent object.
* @return Postgres connection pool manager
*/
std::shared_ptr<PgPool>
make_PgPool(boost::json::object const& pgConfig);
/** Initialize the Postgres schema.
*
* This function ensures that the database is running the latest version
* of the schema.
*
* @param pool Postgres connection pool manager.
*/
void
initSchema(std::shared_ptr<PgPool> const& pool);
// Load the ledger info for the specified ledger/s from the database
// @param whichLedger specifies the ledger to load via ledger sequence, ledger
// hash or std::monostate (which loads the most recent)
// @return vector of LedgerInfos
std::optional<ripple::LedgerInfo>
getLedger(
std::variant<std::monostate, ripple::uint256, uint32_t> const& whichLedger,
std::shared_ptr<PgPool>& pgPool);
#endif // RIPPLE_CORE_PG_H_INCLUDED

View File

@@ -0,0 +1,791 @@
#include <boost/asio.hpp>
#include <boost/format.hpp>
#include <backend/PostgresBackend.h>
namespace Backend {
PostgresBackend::PostgresBackend(boost::json::object const& config)
: BackendInterface(config)
, pgPool_(make_PgPool(config))
, writeConnection_(pgPool_)
{
if (config.contains("write_interval"))
{
writeInterval_ = config.at("write_interval").as_int64();
}
}
void
PostgresBackend::writeLedger(
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader,
bool isFirst) const
{
auto cmd = boost::format(
R"(INSERT INTO ledgers
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
auto ledgerInsert = boost::str(
cmd % ledgerInfo.seq % ripple::strHex(ledgerInfo.hash) %
ripple::strHex(ledgerInfo.parentHash) % ledgerInfo.drops.drops() %
ledgerInfo.closeTime.time_since_epoch().count() %
ledgerInfo.parentCloseTime.time_since_epoch().count() %
ledgerInfo.closeTimeResolution.count() % ledgerInfo.closeFlags %
ripple::strHex(ledgerInfo.accountHash) %
ripple::strHex(ledgerInfo.txHash));
auto res = writeConnection_(ledgerInsert.data());
abortWrite_ = !res;
}
void
PostgresBackend::writeAccountTransactions(
std::vector<AccountTransactionsData>&& data) const
{
if (abortWrite_)
return;
PgQuery pg(pgPool_);
for (auto const& record : data)
{
for (auto const& a : record.accounts)
{
std::string acct = ripple::strHex(a);
accountTxBuffer_ << "\\\\x" << acct << '\t'
<< std::to_string(record.ledgerSequence) << '\t'
<< std::to_string(record.transactionIndex) << '\t'
<< "\\\\x" << ripple::strHex(record.txHash)
<< '\n';
}
}
}
void
PostgresBackend::doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::string&& blob,
bool isCreated,
bool isDeleted,
std::optional<ripple::uint256>&& book) const
{
if (abortWrite_)
return;
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
<< std::to_string(seq) << '\t' << "\\\\x"
<< ripple::strHex(blob) << '\n';
numRowsInObjectsBuffer_++;
// If the buffer gets too large, the insert fails. Not sure why. So we
// insert after 1 million records
if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " Flushing large buffer. num objects = "
<< numRowsInObjectsBuffer_;
writeConnection_.bulkInsert("objects", objectsBuffer_.str());
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
objectsBuffer_.str("");
}
}
void
PostgresBackend::writeTransaction(
std::string&& hash,
uint32_t seq,
std::string&& transaction,
std::string&& metadata) const
{
if (abortWrite_)
return;
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
<< std::to_string(seq) << '\t' << "\\\\x"
<< ripple::strHex(transaction) << '\t' << "\\\\x"
<< ripple::strHex(metadata) << '\n';
}
uint32_t
checkResult(PgResult const& res, uint32_t numFieldsExpected)
{
if (!res)
{
auto msg = res.msg();
BOOST_LOG_TRIVIAL(debug) << msg;
if (msg.find("statement timeout"))
throw DatabaseTimeout();
assert(false);
throw std::runtime_error(msg);
}
if (res.status() != PGRES_TUPLES_OK)
{
std::stringstream msg;
msg << " : Postgres response should have been "
"PGRES_TUPLES_OK but instead was "
<< res.status() << " - msg = " << res.msg();
assert(false);
throw std::runtime_error(msg.str());
}
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " Postgres result msg : " << res.msg();
if (res.isNull() || res.ntuples() == 0)
{
return 0;
}
else if (res.ntuples() > 0)
{
if (res.nfields() != numFieldsExpected)
{
std::stringstream msg;
msg << "Wrong number of fields in Postgres "
"response. Expected "
<< numFieldsExpected << ", but got " << res.nfields();
throw std::runtime_error(msg.str());
assert(false);
}
}
return res.ntuples();
}
ripple::LedgerInfo
parseLedgerInfo(PgResult const& res)
{
std::int64_t ledgerSeq = res.asBigInt(0, 0);
ripple::uint256 hash = res.asUInt256(0, 1);
ripple::uint256 prevHash = res.asUInt256(0, 2);
std::int64_t totalCoins = res.asBigInt(0, 3);
std::int64_t closeTime = res.asBigInt(0, 4);
std::int64_t parentCloseTime = res.asBigInt(0, 5);
std::int64_t closeTimeRes = res.asBigInt(0, 6);
std::int64_t closeFlags = res.asBigInt(0, 7);
ripple::uint256 accountHash = res.asUInt256(0, 8);
ripple::uint256 txHash = res.asUInt256(0, 9);
using time_point = ripple::NetClock::time_point;
using duration = ripple::NetClock::duration;
ripple::LedgerInfo info;
info.seq = ledgerSeq;
info.hash = hash;
info.parentHash = prevHash;
info.drops = totalCoins;
info.closeTime = time_point{duration{closeTime}};
info.parentCloseTime = time_point{duration{parentCloseTime}};
info.closeFlags = closeFlags;
info.closeTimeResolution = duration{closeTimeRes};
info.accountHash = accountHash;
info.txHash = txHash;
info.validated = true;
return info;
}
std::optional<uint32_t>
PostgresBackend::fetchLatestLedgerSequence() const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
auto res = pgQuery(
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1");
if (checkResult(res, 1))
return res.asBigInt(0, 0);
return {};
}
std::optional<ripple::LedgerInfo>
PostgresBackend::fetchLedgerBySequence(uint32_t sequence) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT * FROM ledgers WHERE ledger_seq = "
<< std::to_string(sequence);
auto res = pgQuery(sql.str().data());
if (checkResult(res, 10))
return parseLedgerInfo(res);
return {};
}
std::optional<LedgerRange>
PostgresBackend::fetchLedgerRange() const
{
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()");
if (!range)
return {};
std::string res{range.c_str()};
BOOST_LOG_TRIVIAL(debug) << "range is = " << res;
try
{
size_t minVal = 0;
size_t maxVal = 0;
if (res == "empty" || res == "error" || res.empty())
return {};
else if (size_t delim = res.find('-'); delim != std::string::npos)
{
minVal = std::stol(res.substr(0, delim));
maxVal = std::stol(res.substr(delim + 1));
}
else
{
minVal = maxVal = std::stol(res);
}
return LedgerRange{minVal, maxVal};
}
catch (std::exception&)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "Error parsing result of getCompleteLedgers()";
}
return {};
}
std::optional<Blob>
PostgresBackend::fetchLedgerObject(
ripple::uint256 const& key,
uint32_t sequence) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT object FROM objects WHERE key = "
<< "\'\\x" << ripple::strHex(key) << "\'"
<< " AND ledger_seq <= " << std::to_string(sequence)
<< " ORDER BY ledger_seq DESC LIMIT 1";
auto res = pgQuery(sql.str().data());
if (checkResult(res, 1))
{
auto blob = res.asUnHexedBlob(0, 0);
if (blob.size())
return blob;
}
return {};
}
// returns a transaction, metadata pair
std::optional<TransactionAndMetadata>
PostgresBackend::fetchTransaction(ripple::uint256 const& hash) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT transaction,metadata,ledger_seq FROM transactions "
"WHERE hash = "
<< "\'\\x" << ripple::strHex(hash) << "\'";
auto res = pgQuery(sql.str().data());
if (checkResult(res, 3))
{
return {
{res.asUnHexedBlob(0, 0),
res.asUnHexedBlob(0, 1),
res.asBigInt(0, 2)}};
}
return {};
}
std::vector<TransactionAndMetadata>
PostgresBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT transaction, metadata, ledger_seq FROM transactions WHERE "
<< "ledger_seq = " << std::to_string(ledgerSequence);
auto res = pgQuery(sql.str().data());
if (size_t numRows = checkResult(res, 3))
{
std::vector<TransactionAndMetadata> txns;
for (size_t i = 0; i < numRows; ++i)
{
txns.push_back(
{res.asUnHexedBlob(i, 0),
res.asUnHexedBlob(i, 1),
res.asBigInt(i, 2)});
}
return txns;
}
return {};
}
std::vector<ripple::uint256>
PostgresBackend::fetchAllTransactionHashesInLedger(
uint32_t ledgerSequence) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT hash FROM transactions WHERE "
<< "ledger_seq = " << std::to_string(ledgerSequence);
auto res = pgQuery(sql.str().data());
if (size_t numRows = checkResult(res, 1))
{
std::vector<ripple::uint256> hashes;
for (size_t i = 0; i < numRows; ++i)
{
hashes.push_back(res.asUInt256(i, 0));
}
return hashes;
}
return {};
}
LedgerPage
PostgresBackend::doFetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit) const
{
auto index = getKeyIndexOfSeq(ledgerSequence);
if (!index)
return {};
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
sql << "SELECT key FROM keys WHERE ledger_seq = "
<< std::to_string(index->keyIndex);
if (cursor)
sql << " AND key >= \'\\x" << ripple::strHex(*cursor) << "\'";
sql << " ORDER BY key ASC LIMIT " << std::to_string(limit);
BOOST_LOG_TRIVIAL(debug) << __func__ << sql.str();
auto res = pgQuery(sql.str().data());
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched keys";
std::optional<ripple::uint256> returnCursor;
if (size_t numRows = checkResult(res, 1))
{
std::vector<ripple::uint256> keys;
for (size_t i = 0; i < numRows; ++i)
{
keys.push_back({res.asUInt256(i, 0)});
}
if (numRows >= limit)
{
returnCursor = keys.back();
++(*returnCursor);
}
auto objs = fetchLedgerObjects(keys, ledgerSequence);
std::vector<LedgerObject> results;
for (size_t i = 0; i < objs.size(); ++i)
{
if (objs[i].size())
{
results.push_back({keys[i], objs[i]});
}
}
if (!cursor && !keys[0].isZero())
return {results, returnCursor, "Data may be incomplete"};
return {results, returnCursor};
}
if (!cursor)
return {{}, {}, "Data may be incomplete"};
return {};
}
std::vector<TransactionAndMetadata>
PostgresBackend::fetchTransactions(
std::vector<ripple::uint256> const& hashes) const
{
std::vector<TransactionAndMetadata> results;
constexpr bool doAsync = true;
if (doAsync)
{
auto start = std::chrono::system_clock::now();
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
results.resize(hashes.size());
std::condition_variable cv;
std::mutex mtx;
std::atomic_uint numRemaining = hashes.size();
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
boost::asio::post(
pool_, [this, &hash, &results, &numRemaining, &cv, &mtx, i]() {
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " getting txn = " << i;
PgQuery pgQuery(pgPool_);
std::stringstream sql;
sql << "SELECT transaction,metadata,ledger_seq FROM "
"transactions "
"WHERE HASH = \'\\x"
<< ripple::strHex(hash) << "\'";
auto res = pgQuery(sql.str().data());
if (size_t numRows = checkResult(res, 3))
{
results[i] = {
res.asUnHexedBlob(0, 0),
res.asUnHexedBlob(0, 1),
res.asBigInt(0, 2)};
}
if (--numRemaining == 0)
{
std::unique_lock lck(mtx);
cv.notify_one();
}
});
}
std::unique_lock lck(mtx);
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
auto end2 = std::chrono::system_clock::now();
duration = ((end2 - end).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(hashes.size())
<< " transactions with threadpool. took "
<< std::to_string(duration);
}
else
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::stringstream sql;
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
sql << "SELECT transaction,metadata,ledger_seq FROM "
"transactions "
"WHERE HASH = \'\\x"
<< ripple::strHex(hash) << "\'";
if (i + 1 < hashes.size())
sql << " UNION ALL ";
}
auto start = std::chrono::system_clock::now();
auto res = pgQuery(sql.str().data());
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(hashes.size())
<< " transactions with union all. took "
<< std::to_string(duration);
if (size_t numRows = checkResult(res, 3))
{
for (size_t i = 0; i < numRows; ++i)
results.push_back(
{res.asUnHexedBlob(i, 0),
res.asUnHexedBlob(i, 1),
res.asBigInt(i, 2)});
}
}
return results;
}
std::vector<Blob>
PostgresBackend::fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
std::vector<Blob> results;
results.resize(keys.size());
std::condition_variable cv;
std::mutex mtx;
std::atomic_uint numRemaining = keys.size();
auto start = std::chrono::system_clock::now();
for (size_t i = 0; i < keys.size(); ++i)
{
auto const& key = keys[i];
boost::asio::post(
pool_,
[this, &key, &results, &numRemaining, &cv, &mtx, i, sequence]() {
PgQuery pgQuery(pgPool_);
std::stringstream sql;
sql << "SELECT object FROM "
"objects "
"WHERE key = \'\\x"
<< ripple::strHex(key) << "\'"
<< " AND ledger_seq <= " << std::to_string(sequence)
<< " ORDER BY ledger_seq DESC LIMIT 1";
auto res = pgQuery(sql.str().data());
if (size_t numRows = checkResult(res, 1))
{
results[i] = res.asUnHexedBlob();
}
if (--numRemaining == 0)
{
std::unique_lock lck(mtx);
cv.notify_one();
}
});
}
std::unique_lock lck(mtx);
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " fetched " << std::to_string(keys.size())
<< " objects with threadpool. took " << std::to_string(duration);
return results;
}
std::pair<
std::vector<TransactionAndMetadata>,
std::optional<AccountTransactionsCursor>>
PostgresBackend::fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::optional<AccountTransactionsCursor> const& cursor) const
{
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 10000");
pg_params dbParams;
char const*& command = dbParams.first;
std::vector<std::optional<std::string>>& values = dbParams.second;
command =
"SELECT account_tx($1::bytea, $2::bigint, "
"$3::bigint, $4::bigint)";
values.resize(4);
values[0] = "\\x" + strHex(account);
values[1] = std::to_string(limit);
if (cursor)
{
values[2] = std::to_string(cursor->ledgerSequence);
values[3] = std::to_string(cursor->transactionIndex);
}
for (size_t i = 0; i < values.size(); ++i)
{
BOOST_LOG_TRIVIAL(debug) << "value " << std::to_string(i) << " = "
<< (values[i] ? values[i].value() : "null");
}
auto start = std::chrono::system_clock::now();
auto res = pgQuery(dbParams);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : executed stored_procedure in "
<< std::to_string(duration)
<< " num records = " << std::to_string(checkResult(res, 1));
checkResult(res, 1);
char const* resultStr = res.c_str();
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "postgres result = " << resultStr
<< " : account = " << strHex(account);
boost::json::value raw = boost::json::parse(resultStr);
boost::json::object responseObj = raw.as_object();
BOOST_LOG_TRIVIAL(debug) << " parsed = " << responseObj;
if (responseObj.contains("transactions"))
{
auto txns = responseObj.at("transactions").as_array();
std::vector<ripple::uint256> hashes;
for (auto& hashHex : txns)
{
ripple::uint256 hash;
if (hash.parseHex(hashHex.at("hash").as_string().c_str() + 2))
hashes.push_back(hash);
}
if (responseObj.contains("cursor"))
{
return {
fetchTransactions(hashes),
{{responseObj.at("cursor").at("ledger_sequence").as_int64(),
responseObj.at("cursor")
.at("transaction_index")
.as_int64()}}};
}
return {fetchTransactions(hashes), {}};
}
return {{}, {}};
} // namespace Backend
void
PostgresBackend::open(bool readOnly)
{
if (!readOnly)
initSchema(pgPool_);
}
void
PostgresBackend::close()
{
}
void
PostgresBackend::startWrites() const
{
numRowsInObjectsBuffer_ = 0;
abortWrite_ = false;
auto res = writeConnection_("BEGIN");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "Postgres error creating transaction: " << res.msg();
throw std::runtime_error(msg.str());
}
}
bool
PostgresBackend::doFinishWrites() const
{
if (!abortWrite_)
{
std::string txStr = transactionsBuffer_.str();
writeConnection_.bulkInsert("transactions", txStr);
writeConnection_.bulkInsert(
"account_transactions", accountTxBuffer_.str());
std::string objectsStr = objectsBuffer_.str();
if (objectsStr.size())
writeConnection_.bulkInsert("objects", objectsStr);
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " objects size = " << objectsStr.size()
<< " txns size = " << txStr.size();
}
auto res = writeConnection_("COMMIT");
if (!res || res.status() != PGRES_COMMAND_OK)
{
std::stringstream msg;
msg << "Postgres error committing transaction: " << res.msg();
throw std::runtime_error(msg.str());
}
transactionsBuffer_.str("");
transactionsBuffer_.clear();
objectsBuffer_.str("");
objectsBuffer_.clear();
accountTxBuffer_.str("");
accountTxBuffer_.clear();
numRowsInObjectsBuffer_ = 0;
return !abortWrite_;
}
bool
PostgresBackend::writeKeys(
std::unordered_set<ripple::uint256> const& keys,
KeyIndex const& index,
bool isAsync) const
{
if (abortWrite_)
return false;
PgQuery pgQuery(pgPool_);
PgQuery& conn = isAsync ? pgQuery : writeConnection_;
std::stringstream sql;
size_t numRows = 0;
for (auto& key : keys)
{
numRows++;
sql << "INSERT INTO keys (ledger_seq, key) VALUES ("
<< std::to_string(index.keyIndex) << ", \'\\x"
<< ripple::strHex(key) << "\') ON CONFLICT DO NOTHING; ";
if (numRows > 10000)
{
conn(sql.str().c_str());
sql.str("");
sql.clear();
numRows = 0;
}
}
if (numRows > 0)
conn(sql.str().c_str());
return true;
/*
BOOST_LOG_TRIVIAL(debug) << __func__;
std::condition_variable cv;
std::mutex mtx;
std::atomic_uint numRemaining = keys.size();
auto start = std::chrono::system_clock::now();
for (auto& key : keys)
{
boost::asio::post(
pool_, [this, key, &numRemaining, &cv, &mtx, &index]() {
PgQuery pgQuery(pgPool_);
std::stringstream sql;
sql << "INSERT INTO keys (ledger_seq, key) VALUES ("
<< std::to_string(index.keyIndex) << ", \'\\x"
<< ripple::strHex(key) << "\') ON CONFLICT DO NOTHING";
auto res = pgQuery(sql.str().data());
if (--numRemaining == 0)
{
std::unique_lock lck(mtx);
cv.notify_one();
}
});
}
std::unique_lock lck(mtx);
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
BOOST_LOG_TRIVIAL(info)
<< __func__ << " wrote " << std::to_string(keys.size())
<< " keys with threadpool. took " << std::to_string(duration);
*/
return true;
}
bool
PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
{
auto rng = fetchLedgerRangeNoThrow();
if (!rng)
return false;
uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
if (minLedger <= rng->minSequence)
return false;
uint32_t limit = 2048;
PgQuery pgQuery(pgPool_);
pgQuery("SET statement_timeout TO 0");
std::optional<ripple::uint256> cursor;
while (true)
{
try
{
auto [objects, curCursor, warning] =
fetchLedgerPage(cursor, minLedger, 256);
if (warning)
{
BOOST_LOG_TRIVIAL(warning) << __func__
<< " online delete running but "
"flag ledger is not complete";
std::this_thread::sleep_for(std::chrono::seconds(10));
continue;
}
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
std::stringstream objectsBuffer;
for (auto& obj : objects)
{
objectsBuffer << "\\\\x" << ripple::strHex(obj.key) << '\t'
<< std::to_string(minLedger) << '\t' << "\\\\x"
<< ripple::strHex(obj.blob) << '\n';
}
pgQuery.bulkInsert("objects", objectsBuffer.str());
cursor = curCursor;
if (!cursor)
break;
}
catch (DatabaseTimeout const& e)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " Database timeout fetching keys";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
BOOST_LOG_TRIVIAL(info) << __func__ << " finished inserting into objects";
{
std::stringstream sql;
sql << "DELETE FROM ledgers WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from ledgers table");
}
{
std::stringstream sql;
sql << "DELETE FROM keys WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from keys table");
}
{
std::stringstream sql;
sql << "DELETE FROM books WHERE ledger_seq < "
<< std::to_string(minLedger);
auto res = pgQuery(sql.str().data());
if (res.msg() != "ok")
throw std::runtime_error("Error deleting from books table");
}
return true;
}
} // namespace Backend

View File

@@ -0,0 +1,117 @@
#ifndef RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
#define RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
namespace Backend {
class PostgresBackend : public BackendInterface
{
private:
mutable size_t numRowsInObjectsBuffer_ = 0;
mutable std::stringstream objectsBuffer_;
mutable std::stringstream keysBuffer_;
mutable std::stringstream transactionsBuffer_;
mutable std::stringstream accountTxBuffer_;
std::shared_ptr<PgPool> pgPool_;
mutable PgQuery writeConnection_;
mutable bool abortWrite_ = false;
mutable boost::asio::thread_pool pool_{16};
uint32_t writeInterval_ = 1000000;
public:
PostgresBackend(boost::json::object const& config);
std::optional<uint32_t>
fetchLatestLedgerSequence() const override;
std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(uint32_t sequence) const override;
std::optional<LedgerRange>
fetchLedgerRange() const override;
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, uint32_t sequence)
const override;
// returns a transaction, metadata pair
std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash) const override;
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(uint32_t ledgerSequence) const override;
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const override;
LedgerPage
doFetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit) const override;
std::vector<TransactionAndMetadata>
fetchTransactions(
std::vector<ripple::uint256> const& hashes) const override;
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
uint32_t sequence) const override;
std::pair<
std::vector<TransactionAndMetadata>,
std::optional<AccountTransactionsCursor>>
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
std::optional<AccountTransactionsCursor> const& cursor) const override;
void
writeLedger(
ripple::LedgerInfo const& ledgerInfo,
std::string&& ledgerHeader,
bool isFirst) const override;
void
doWriteLedgerObject(
std::string&& key,
uint32_t seq,
std::string&& blob,
bool isCreated,
bool isDeleted,
std::optional<ripple::uint256>&& book) const override;
void
writeTransaction(
std::string&& hash,
uint32_t seq,
std::string&& transaction,
std::string&& metadata) const override;
void
writeAccountTransactions(
std::vector<AccountTransactionsData>&& data) const override;
void
open(bool readOnly) override;
void
close() override;
void
startWrites() const override;
bool
doFinishWrites() const override;
bool
doOnlineDelete(uint32_t numLedgersToKeep) const override;
bool
writeKeys(
std::unordered_set<ripple::uint256> const& keys,
KeyIndex const& index,
bool isAsync = false) const override;
};
} // namespace Backend
#endif

108
src/backend/README.md Normal file
View File

@@ -0,0 +1,108 @@
Reporting mode is a special operating mode of rippled, designed to handle RPCs
for validated data. A server running in reporting mode does not connect to the
p2p network, but rather extracts validated data from a node that is connected
to the p2p network. To run rippled in reporting mode, you must also run a
separate rippled node in p2p mode, to use as an ETL source. Multiple reporting
nodes can share access to the same network accessible databases (Postgres and
Cassandra); at any given time, only one reporting node will be performing ETL
and writing to the databases, while the others simply read from the databases.
A server running in reporting mode will forward any requests that require access
to the p2p network to a p2p node.
# Reporting ETL
A single reporting node has one or more ETL sources, specified in the config
file. A reporting node will subscribe to the "ledgers" stream of each of the ETL
sources. This stream sends a message whenever a new ledger is validated. Upon
receiving a message on the stream, reporting will then fetch the data associated
with the newly validated ledger from one of the ETL sources. The fetch is
performed via a gRPC request ("GetLedger"). This request returns the ledger
header, transactions+metadata blobs, and every ledger object
added/modified/deleted as part of this ledger. ETL then writes all of this data
to the databases, and moves on to the next ledger. ETL does not apply
transactions, but rather extracts the already computed results of those
transactions (all of the added/modified/deleted SHAMap leaf nodes of the state
tree). The new SHAMap inner nodes are computed by the ETL writer; this computation mainly
involves manipulating child pointers and recomputing hashes, logic which is
buried inside of SHAMap.
If the database is entirely empty, ETL must download an entire ledger in full
(as opposed to just the diff, as described above). This download is done via the
"GetLedgerData" gRPC request. "GetLedgerData" allows clients to page through an
entire ledger over several RPC calls. ETL will page through an entire ledger,
and write each object to the database.
If the database is not empty, the reporting node will first come up in a "soft"
read-only mode. In read-only mode, the server does not perform ETL and simply
publishes new ledgers as they are written to the database.
If the database is not updated within a certain time period
(currently hard coded at 20 seconds), the reporting node will begin the ETL
process and start writing to the database. Postgres will report an error when
trying to write a record with a key that already exists. ETL uses this error to
determine that another process is writing to the database, and subsequently
falls back to a soft read-only mode. Reporting nodes can also operate in strict
read-only mode, in which case they will never write to the database.
# Database Nuances
The database schema for reporting mode does not allow any history gaps.
Attempting to write a ledger to a non-empty database where the previous ledger
does not exist will return an error.
The databases must be set up prior to running reporting mode. This requires
creating the Postgres database, and setting up the Cassandra keyspace. Reporting
mode will create the objects table in Cassandra if the table does not yet exist.
Creating the Postgres database:
```
$ psql -h [host] -U [user]
postgres=# create database [database];
```
Creating the keyspace:
```
$ cqlsh [host] [port]
> CREATE KEYSPACE rippled WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 3 };
```
A replication factor of 3 is recommended. However, when running locally, only a
replication factor of 1 is supported.
Online delete is not supported by reporting mode and must be done manually. The
easiest way to do this would be to setup a second Cassandra keyspace and
Postgres database, bring up a single reporting mode instance that uses those
databases, and start ETL at a ledger of your choosing (via --startReporting on
the command line). Once this node is caught up, the other databases can be
deleted.
To delete:
```
$ psql -h [host] -U [user] -d [database]
reporting=$ truncate table ledgers cascade;
```
```
$ cqlsh [host] [port]
> truncate table objects;
```
# Proxy
RPCs that require access to the p2p network and/or the open ledger are forwarded
from the reporting node to one of the ETL sources. The request is not processed
prior to forwarding, and the response is delivered as-is to the client.
Reporting will forward any requests that always require p2p/open ledger access
(fee and submit, for instance). In addition, any request that explicitly
requests data from the open or closed ledger (via setting
"ledger_index":"current" or "ledger_index":"closed"), will be forwarded to a
p2p node.
For the stream "transactions_proposed" (AKA "rt_transactions"), reporting
subscribes to the "transactions_proposed" streams of each ETL source, and then
forwards those messages to any clients subscribed to the same stream on the
reporting node. A reporting node will subscribe to the stream on each ETL
source, but will only forward the messages from one of the streams at any given
time (to avoid sending the same message more than once to the same client).
# API changes
A reporting node defaults to only returning validated data. If a ledger is not
specified, the most recently validated ledger is used. This is in contrast to
the normal rippled behavior, where the open ledger is used by default.
Reporting will reject all subscribe requests for streams "server", "manifests",
"validations", "peer_status" and "consensus".

201
src/etl/ETLHelpers.h Normal file
View File

@@ -0,0 +1,201 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
#include <ripple/basics/base_uint.h>
#include <condition_variable>
#include <mutex>
#include <optional>
#include <queue>
#include <sstream>
/// This datastructure is used to keep track of the sequence of the most recent
/// ledger validated by the network. There are two methods that will wait until
/// certain conditions are met. This datastructure is able to be "stopped". When
/// the datastructure is stopped, any threads currently waiting are unblocked.
/// Any later calls to methods of this datastructure will not wait. Once the
/// datastructure is stopped, the datastructure remains stopped for the rest of
/// its lifetime.
class NetworkValidatedLedgers
{
// max sequence validated by network
std::optional<uint32_t> max_;
mutable std::mutex m_;
std::condition_variable cv_;
bool stopping_ = false;
public:
static std::shared_ptr<NetworkValidatedLedgers>
make_ValidatedLedgers()
{
return std::make_shared<NetworkValidatedLedgers>();
}
/// Notify the datastructure that idx has been validated by the network
/// @param idx sequence validated by network
void
push(uint32_t idx)
{
std::lock_guard lck(m_);
if (!max_ || idx > *max_)
max_ = idx;
cv_.notify_all();
}
/// Get most recently validated sequence. If no ledgers are known to have
/// been validated, this function waits until the next ledger is validated
/// @return sequence of most recently validated ledger. empty optional if
/// the datastructure has been stopped
std::optional<uint32_t>
getMostRecent()
{
std::unique_lock lck(m_);
cv_.wait(lck, [this]() { return max_ || stopping_; });
return max_;
}
/// Waits for the sequence to be validated by the network
/// @param sequence to wait for
/// @return true if sequence was validated, false otherwise
/// a return value of false means the datastructure has been stopped
bool
waitUntilValidatedByNetwork(uint32_t sequence)
{
std::unique_lock lck(m_);
cv_.wait(lck, [sequence, this]() {
return (max_ && sequence <= *max_) || stopping_;
});
return !stopping_;
}
/// Puts the datastructure in the stopped state
/// Future calls to this datastructure will not block
/// This operation cannot be reversed
void
stop()
{
std::lock_guard lck(m_);
stopping_ = true;
cv_.notify_all();
}
};
/// Generic thread-safe queue with an optional maximum size
/// Note, we can't use a lockfree queue here, since we need the ability to wait
/// for an element to be added or removed from the queue. These waits are
/// blocking calls.
template <class T>
class ThreadSafeQueue
{
std::queue<T> queue_;
mutable std::mutex m_;
std::condition_variable cv_;
std::optional<uint32_t> maxSize_;
public:
/// @param maxSize maximum size of the queue. Calls that would cause the
/// queue to exceed this size will block until free space is available
ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
{
}
/// Create a queue with no maximum size
ThreadSafeQueue() = default;
/// @param elt element to push onto queue
/// if maxSize is set, this method will block until free space is available
void
push(T const& elt)
{
std::unique_lock lck(m_);
// if queue has a max size, wait until not full
if (maxSize_)
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
queue_.push(elt);
cv_.notify_all();
}
/// @param elt element to push onto queue. elt is moved from
/// if maxSize is set, this method will block until free space is available
void
push(T&& elt)
{
std::unique_lock lck(m_);
// if queue has a max size, wait until not full
if (maxSize_)
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
queue_.push(std::move(elt));
cv_.notify_all();
}
/// @return element popped from queue. Will block until queue is non-empty
T
pop()
{
std::unique_lock lck(m_);
cv_.wait(lck, [this]() { return !queue_.empty(); });
T ret = std::move(queue_.front());
queue_.pop();
// if queue has a max size, unblock any possible pushers
if (maxSize_)
cv_.notify_all();
return ret;
}
/// @return element popped from queue. Will block until queue is non-empty
std::optional<T>
tryPop()
{
std::unique_lock lck(m_);
if (queue_.empty())
return {};
T ret = std::move(queue_.front());
queue_.pop();
// if queue has a max size, unblock any possible pushers
if (maxSize_)
cv_.notify_all();
return ret;
}
};
/// Parititions the uint256 keyspace into numMarkers partitions, each of equal
/// size.
inline std::vector<ripple::uint256>
getMarkers(size_t numMarkers)
{
assert(numMarkers <= 256);
unsigned char incr = 256 / numMarkers;
std::vector<ripple::uint256> markers;
markers.reserve(numMarkers);
ripple::uint256 base{0};
for (size_t i = 0; i < numMarkers; ++i)
{
markers.push_back(base);
base.data()[0] += incr;
}
return markers;
}
#endif

855
src/etl/ETLSource.cpp Normal file
View File

@@ -0,0 +1,855 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/net/IPEndpoint.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <boost/asio/strand.hpp>
#include <boost/beast/http.hpp>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <boost/log/trivial.hpp>
#include <etl/ETLSource.h>
#include <etl/ReportingETL.h>
// Create ETL source without grpc endpoint
// Fetch ledger and load initial ledger will fail for this source
// Primarly used in read-only mode, to monitor when ledgers are validated
ETLSource::ETLSource(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer)
: ioc_(ioContext)
, ws_(std::make_unique<
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
boost::asio::make_strand(ioc_)))
, resolver_(boost::asio::make_strand(ioc_))
, timer_(ioc_)
, networkValidatedLedgers_(networkValidatedLedgers)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
{
if (config.contains("ip"))
{
auto ipJs = config.at("ip").as_string();
ip_ = {ipJs.c_str(), ipJs.size()};
}
if (config.contains("ws_port"))
{
auto portjs = config.at("ws_port").as_string();
wsPort_ = {portjs.c_str(), portjs.size()};
}
if (config.contains("grpc_port"))
{
auto portjs = config.at("grpc_port").as_string();
grpcPort_ = {portjs.c_str(), portjs.size()};
try
{
boost::asio::ip::tcp::endpoint endpoint{
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
std::stringstream ss;
ss << endpoint;
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateChannel(
ss.str(), grpc::InsecureChannelCredentials()));
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(debug)
<< "Exception while creating stub = " << e.what()
<< " . Remote = " << toString();
}
}
}
void
ETLSource::reconnect(boost::beast::error_code ec)
{
connected_ = false;
// These are somewhat normal errors. operation_aborted occurs on shutdown,
// when the timer is cancelled. connection_refused will occur repeatedly
// if we cannot connect to the transaction processing process
if (ec != boost::asio::error::operation_aborted &&
ec != boost::asio::error::connection_refused)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "error code = " << ec << " - " << toString();
}
// exponentially increasing timeouts, with a max of 30 seconds
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
numFailures_++;
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
timer_.async_wait([this](auto ec) {
bool startAgain = (ec != boost::asio::error::operation_aborted);
BOOST_LOG_TRIVIAL(trace) << __func__ << " async_wait : ec = " << ec;
close(startAgain);
});
}
void
ETLSource::close(bool startAgain)
{
timer_.cancel();
ioc_.post([this, startAgain]() {
if (closing_)
return;
if (ws_->is_open())
{
// onStop() also calls close(). If the async_close is called twice,
// an assertion fails. Using closing_ makes sure async_close is only
// called once
closing_ = true;
ws_->async_close(
boost::beast::websocket::close_code::normal,
[this, startAgain](auto ec) {
if (ec)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " async_close : "
<< "error code = " << ec << " - " << toString();
}
closing_ = false;
if (startAgain)
run();
});
}
else if (startAgain)
{
run();
}
});
}
void
ETLSource::onResolve(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type results)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec)
{
// try again
reconnect(ec);
}
else
{
boost::beast::get_lowest_layer(*ws_).expires_after(
std::chrono::seconds(30));
boost::beast::get_lowest_layer(*ws_).async_connect(
results, [this](auto ec, auto ep) { onConnect(ec, ep); });
}
}
void
ETLSource::onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec)
{
// start over
reconnect(ec);
}
else
{
numFailures_ = 0;
// Turn off timeout on the tcp stream, because websocket stream has it's
// own timeout system
boost::beast::get_lowest_layer(*ws_).expires_never();
// Set suggested timeout settings for the websocket
ws_->set_option(
boost::beast::websocket::stream_base::timeout::suggested(
boost::beast::role_type::client));
// Set a decorator to change the User-Agent of the handshake
ws_->set_option(boost::beast::websocket::stream_base::decorator(
[](boost::beast::websocket::request_type& req) {
req.set(
boost::beast::http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client-async");
}));
// Update the host_ string. This will provide the value of the
// Host HTTP header during the WebSocket handshake.
// See https://tools.ietf.org/html/rfc7230#section-5.4
auto host = ip_ + ':' + std::to_string(endpoint.port());
// Perform the websocket handshake
ws_->async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
}
}
void
ETLSource::onHandshake(boost::beast::error_code ec)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec)
{
// start over
reconnect(ec);
}
else
{
boost::json::object jv{
{"command", "subscribe"},
{"streams", {"ledger", "transactions_proposed"}}};
std::string s = boost::json::serialize(jv);
BOOST_LOG_TRIVIAL(trace) << "Sending subscribe stream message";
// Send the message
ws_->async_write(boost::asio::buffer(s), [this](auto ec, size_t size) {
onWrite(ec, size);
});
}
}
void
ETLSource::onWrite(boost::beast::error_code ec, size_t bytesWritten)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
if (ec)
{
// start over
reconnect(ec);
}
else
{
ws_->async_read(
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
}
}
void
ETLSource::onRead(boost::beast::error_code ec, size_t size)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : ec = " << ec << " - " << toString();
// if error or error reading message, start over
if (ec)
{
reconnect(ec);
}
else
{
handleMessage();
boost::beast::flat_buffer buffer;
swap(readBuffer_, buffer);
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : calling async_read - " << toString();
ws_->async_read(
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
}
}
bool
ETLSource::handleMessage()
{
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
setLastMsgTime();
connected_ = true;
try
{
std::string msg{
static_cast<char const*>(readBuffer_.data().data()),
readBuffer_.size()};
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
boost::json::value raw = boost::json::parse(msg);
// BOOST_LOG_TRIVIAL(debug) << __func__ << " parsed";
boost::json::object response = raw.as_object();
uint32_t ledgerIndex = 0;
if (response.contains("result"))
{
boost::json::object result = response["result"].as_object();
if (result.contains("ledger_index"))
{
ledgerIndex = result["ledger_index"].as_int64();
}
if (result.contains("validated_ledgers"))
{
boost::json::string const& validatedLedgers =
result["validated_ledgers"].as_string();
setValidatedRange(
{validatedLedgers.c_str(), validatedLedgers.size()});
}
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Received a message on ledger "
<< " subscription stream. Message : " << response << " - "
<< toString();
}
else
{
if (response.contains("transaction"))
{
if (balancer_.shouldPropagateTxnStream(this))
{
subscriptions_->forwardProposedTransaction(response);
}
}
else
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Received a message on ledger "
<< " subscription stream. Message : " << response << " - "
<< toString();
if (response.contains("ledger_index"))
{
ledgerIndex = response["ledger_index"].as_int64();
}
if (response.contains("validated_ledgers"))
{
boost::json::string const& validatedLedgers =
response["validated_ledgers"].as_string();
setValidatedRange(
{validatedLedgers.c_str(), validatedLedgers.size()});
}
}
}
if (ledgerIndex != 0)
{
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "Pushing ledger sequence = " << ledgerIndex << " - "
<< toString();
networkValidatedLedgers_->push(ledgerIndex);
}
return true;
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "Exception in handleMessage : " << e.what();
return false;
}
}
class AsyncCallData
{
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
std::unique_ptr<grpc::ClientContext> context_;
grpc::Status status_;
public:
AsyncCallData(uint32_t seq)
{
request_.mutable_ledger()->set_sequence(seq);
request_.set_user("ETL");
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
context_ = std::make_unique<grpc::ClientContext>();
}
enum class CallStatus { MORE, DONE, ERRORED };
CallStatus
process(
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
grpc::CompletionQueue& cq,
BackendInterface& backend,
bool abort = false)
{
BOOST_LOG_TRIVIAL(info) << "Processing response. "
<< "Marker prefix = " << getMarkerPrefix();
if (abort)
{
BOOST_LOG_TRIVIAL(error) << "AsyncCallData aborted";
return CallStatus::ERRORED;
}
if (!status_.ok())
{
BOOST_LOG_TRIVIAL(error)
<< "AsyncCallData status_ not ok: "
<< " code = " << status_.error_code()
<< " message = " << status_.error_message();
return CallStatus::ERRORED;
}
if (!next_->is_unlimited())
{
BOOST_LOG_TRIVIAL(warning)
<< "AsyncCallData is_unlimited is false. Make sure "
"secure_gateway is set correctly at the ETL source";
assert(false);
}
std::swap(cur_, next_);
bool more = true;
// if no marker returned, we are done
if (cur_->marker().size() == 0)
more = false;
// if we are not done, make the next async call
if (more)
{
request_.set_marker(std::move(cur_->marker()));
call(stub, cq);
}
BOOST_LOG_TRIVIAL(trace) << "Writing objects";
for (auto& obj : *(cur_->mutable_ledger_objects()->mutable_objects()))
{
std::optional<ripple::uint256> book = {};
short offer_bytes = (obj.data()[1] << 8) | obj.data()[2];
if (offer_bytes == 0x006f)
{
ripple::SerialIter it{obj.data().data(), obj.data().size()};
ripple::SLE sle{it, {}};
book = sle.getFieldH256(ripple::sfBookDirectory);
}
backend.writeLedgerObject(
std::move(*obj.mutable_key()),
request_.ledger().sequence(),
std::move(*obj.mutable_data()),
true,
false,
std::move(book));
}
BOOST_LOG_TRIVIAL(trace) << "Wrote objects";
return more ? CallStatus::MORE : CallStatus::DONE;
}
void
call(
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
grpc::CompletionQueue& cq)
{
BOOST_LOG_TRIVIAL(info) << "Making next request. " << getMarkerPrefix();
context_ = std::make_unique<grpc::ClientContext>();
std::unique_ptr<grpc::ClientAsyncResponseReader<
org::xrpl::rpc::v1::GetLedgerDataResponse>>
rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
rpc->StartCall();
rpc->Finish(next_.get(), &status_, this);
}
std::string
getMarkerPrefix()
{
if (next_->marker().size() == 0)
return "";
else
return ripple::strHex(std::string{next_->marker().data()[0]});
}
};
bool
ETLSource::loadInitialLedger(uint32_t sequence)
{
if (!stub_)
return false;
grpc::CompletionQueue cq;
void* tag;
bool ok = false;
std::vector<AsyncCallData> calls;
calls.emplace_back(sequence);
BOOST_LOG_TRIVIAL(info) << "Starting data download for ledger " << sequence
<< ". Using source = " << toString();
for (auto& c : calls)
c.call(stub_, cq);
size_t numFinished = 0;
bool abort = false;
while (numFinished < calls.size() && cq.Next(&tag, &ok))
{
assert(tag);
auto ptr = static_cast<AsyncCallData*>(tag);
if (!ok)
{
BOOST_LOG_TRIVIAL(error) << "loadInitialLedger - ok is false";
return false;
// handle cancelled
}
else
{
BOOST_LOG_TRIVIAL(info)
<< "Marker prefix = " << ptr->getMarkerPrefix();
auto result = ptr->process(stub_, cq, *backend_, abort);
if (result != AsyncCallData::CallStatus::MORE)
{
numFinished++;
BOOST_LOG_TRIVIAL(info)
<< "Finished a marker. "
<< "Current number of finished = " << numFinished;
}
if (result == AsyncCallData::CallStatus::ERRORED)
{
abort = true;
}
}
}
return !abort;
}
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
ETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects)
{
org::xrpl::rpc::v1::GetLedgerResponse response;
if (!stub_)
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
// ledger header with txns and metadata
org::xrpl::rpc::v1::GetLedgerRequest request;
grpc::ClientContext context;
request.mutable_ledger()->set_sequence(ledgerSequence);
request.set_transactions(true);
request.set_expand(true);
request.set_get_objects(getObjects);
request.set_user("ETL");
grpc::Status status = stub_->GetLedger(&context, request, &response);
if (status.ok() && !response.is_unlimited())
{
BOOST_LOG_TRIVIAL(warning)
<< "ETLSource::fetchLedger - is_unlimited is "
"false. Make sure secure_gateway is set "
"correctly on the ETL source. source = "
<< toString() << " status = " << status.error_message();
assert(false);
}
return {status, std::move(response)};
}
ETLLoadBalancer::ETLLoadBalancer(
boost::json::array const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl)
{
for (auto& entry : config)
{
std::unique_ptr<ETLSource> source = ETLSource::make_ETLSource(
entry.as_object(), ioContext, backend, subscriptions, nwvl, *this);
sources_.push_back(std::move(source));
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
<< sources_.back()->toString();
}
}
void
ETLLoadBalancer::loadInitialLedger(uint32_t sequence)
{
execute(
[this, &sequence](auto& source) {
bool res = source->loadInitialLedger(sequence);
if (!res)
{
BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger."
<< " Sequence = " << sequence
<< " source = " << source->toString();
}
return res;
},
sequence);
}
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects)
{
org::xrpl::rpc::v1::GetLedgerResponse response;
bool success = execute(
[&response, ledgerSequence, getObjects, this](auto& source) {
auto [status, data] =
source->fetchLedger(ledgerSequence, getObjects);
response = std::move(data);
if (status.ok() && (response.validated() || true))
{
BOOST_LOG_TRIVIAL(info)
<< "Successfully fetched ledger = " << ledgerSequence
<< " from source = " << source->toString();
return true;
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< "Error getting ledger = " << ledgerSequence
<< " Reply : " << response.DebugString()
<< " error_code : " << status.error_code()
<< " error_msg : " << status.error_message()
<< " source = " << source->toString();
return false;
}
},
ledgerSequence);
if (success)
return response;
else
return {};
}
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
ETLLoadBalancer::getRippledForwardingStub() const
{
if (sources_.size() == 0)
return nullptr;
srand((unsigned)time(0));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0;
while (numAttempts < sources_.size())
{
auto stub = sources_[sourceIdx]->getRippledForwardingStub();
if (!stub)
{
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
continue;
}
return stub;
}
return nullptr;
}
boost::json::object
ETLLoadBalancer::forwardToRippled(boost::json::object const& request) const
{
boost::json::object res;
if (sources_.size() == 0)
return res;
srand((unsigned)time(0));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0;
while (numAttempts < sources_.size())
{
res = sources_[sourceIdx]->forwardToRippled(request);
if (!res.contains("forwarded") || res.at("forwarded") != true)
{
sourceIdx = (sourceIdx + 1) % sources_.size();
++numAttempts;
continue;
}
return res;
}
res["error"] = "Failed to forward";
return res;
}
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
ETLSource::getRippledForwardingStub() const
{
if (!connected_)
return nullptr;
try
{
return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
grpc::CreateChannel(
beast::IP::Endpoint(
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_))
.to_string(),
grpc::InsecureChannelCredentials()));
}
catch (std::exception const&)
{
BOOST_LOG_TRIVIAL(error) << "Failed to create grpc stub";
return nullptr;
}
}
boost::json::object
ETLSource::forwardToRippled(boost::json::object const& request) const
{
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
<< "request = " << boost::json::serialize(request);
boost::json::object response;
if (!connected_)
{
BOOST_LOG_TRIVIAL(error)
<< "Attempted to proxy but failed to connect to tx";
return response;
}
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from
namespace net = boost::asio; // from
using tcp = boost::asio::ip::tcp; // from
try
{
// The io_context is required for all I/O
net::io_context ioc;
// These objects perform our I/O
tcp::resolver resolver{ioc};
BOOST_LOG_TRIVIAL(debug) << "Creating websocket";
auto ws = std::make_unique<websocket::stream<tcp::socket>>(ioc);
// Look up the domain name
auto const results = resolver.resolve(ip_, wsPort_);
BOOST_LOG_TRIVIAL(debug) << "Connecting websocket";
// Make the connection on the IP address we get from a lookup
net::connect(ws->next_layer(), results.begin(), results.end());
// Set a decorator to change the User-Agent of the handshake
// and to tell rippled to charge the client IP for RPC
// resources. See "secure_gateway" in
//
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
ws->set_option(websocket::stream_base::decorator(
[&request](websocket::request_type& req) {
req.set(
http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client-coro");
req.set(
http::field::forwarded,
"for=" + boost::json::serialize(request));
}));
BOOST_LOG_TRIVIAL(debug)
<< "client ip: " << boost::json::serialize(request);
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
// Perform the websocket handshake
ws->handshake(ip_, "/");
BOOST_LOG_TRIVIAL(debug) << "Sending request";
// Send the message
ws->write(net::buffer(boost::json::serialize(request)));
beast::flat_buffer buffer;
ws->read(buffer);
auto begin = static_cast<char const*>(buffer.data().data());
auto end = begin + buffer.data().size();
auto parsed = boost::json::parse(std::string(begin, end));
if (!parsed.is_object())
{
BOOST_LOG_TRIVIAL(error) << "Error parsing response";
response["error"] = "Error parsing response from tx";
return response;
}
BOOST_LOG_TRIVIAL(debug) << "Successfully forward request";
response = parsed.as_object();
response["forwarded"] = true;
return response;
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error) << "Encountered exception : " << e.what();
return response;
}
}
template <class Func>
bool
ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
{
srand((unsigned)time(0));
auto sourceIdx = rand() % sources_.size();
auto numAttempts = 0;
while (true)
{
auto& source = sources_[sourceIdx];
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to execute func. ledger sequence = "
<< ledgerSequence << " - source = " << source->toString();
if (source->hasLedger(ledgerSequence) || true)
{
bool res = f(source);
if (res)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Successfully executed func at source = "
<< source->toString()
<< " - ledger sequence = " << ledgerSequence;
break;
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to execute func at source = "
<< source->toString()
<< " - ledger sequence = " << ledgerSequence;
}
}
else
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Ledger not present at source = " << source->toString()
<< " - ledger sequence = " << ledgerSequence;
}
sourceIdx = (sourceIdx + 1) % sources_.size();
numAttempts++;
if (numAttempts % sources_.size() == 0)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "Error executing function "
<< " - ledger sequence = " << ledgerSequence
<< " - Tried all sources. Sleeping and trying again";
std::this_thread::sleep_for(std::chrono::seconds(2));
}
}
return true;
}

450
src/etl/ETLSource.h Normal file
View File

@@ -0,0 +1,450 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
#include <boost/algorithm/string.hpp>
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <server/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <etl/ETLHelpers.h>
class ETLLoadBalancer;
class SubscriptionManager;
/// This class manages a connection to a single ETL source. This is almost
/// always a rippled node, but really could be another reporting node. This
/// class subscribes to the ledgers and transactions_proposed streams of the
/// associated rippled node, and keeps track of which ledgers the rippled node
/// has. This class also has methods for extracting said ledgers. Lastly this
/// class forwards transactions received on the transactions_proposed streams to
/// any subscribers.
class ETLSource
{
std::string ip_;
std::string wsPort_;
std::string grpcPort_;
boost::asio::io_context& ioc_;
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>>
ws_;
boost::asio::ip::tcp::resolver resolver_;
boost::beast::flat_buffer readBuffer_;
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
std::string validatedLedgersRaw_;
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
// beast::Journal journal_;
mutable std::mutex mtx_;
size_t numFailures_ = 0;
std::atomic_bool closing_{false};
std::atomic_bool connected_{false};
// true if this ETL source is forwarding transactions received on the
// transactions_proposed stream. There are usually multiple ETL sources,
// so to avoid forwarding the same transaction multiple times, we only
// forward from one particular ETL source at a time.
std::atomic_bool forwardingStream_{false};
// The last time a message was received on the ledgers stream
std::chrono::system_clock::time_point lastMsgTime_;
mutable std::mutex lastMsgTimeMtx_;
// used for retrying connections
boost::asio::steady_timer timer_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
ETLLoadBalancer& balancer_;
void
run()
{
BOOST_LOG_TRIVIAL(trace) << __func__ << " : " << toString();
auto const host = ip_;
auto const port = wsPort_;
resolver_.async_resolve(host, port, [this](auto ec, auto results) {
onResolve(ec, results);
});
}
public:
static std::unique_ptr<ETLSource>
make_ETLSource(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer)
{
std::unique_ptr<ETLSource> src = std::make_unique<ETLSource>(
config,
ioContext,
backend,
subscriptions,
networkValidatedLedgers,
balancer);
src->run();
return src;
}
~ETLSource()
{
close(false);
}
bool
isConnected() const
{
return connected_;
}
std::chrono::system_clock::time_point
getLastMsgTime() const
{
std::lock_guard lck(lastMsgTimeMtx_);
return lastMsgTime_;
}
void
setLastMsgTime()
{
std::lock_guard lck(lastMsgTimeMtx_);
lastMsgTime_ = std::chrono::system_clock::now();
}
/// Create ETL source without gRPC endpoint
/// Fetch ledger and load initial ledger will fail for this source
/// Primarly used in read-only mode, to monitor when ledgers are validated
ETLSource(
boost::json::object const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
ETLLoadBalancer& balancer);
/// @param sequence ledger sequence to check for
/// @return true if this source has the desired ledger
bool
hasLedger(uint32_t sequence) const
{
std::lock_guard lck(mtx_);
for (auto& pair : validatedLedgers_)
{
if (sequence >= pair.first && sequence <= pair.second)
{
return true;
}
else if (sequence < pair.first)
{
// validatedLedgers_ is a sorted list of disjoint ranges
// if the sequence comes before this range, the sequence will
// come before all subsequent ranges
return false;
}
}
return false;
}
/// process the validated range received on the ledgers stream. set the
/// appropriate member variable
/// @param range validated range received on ledgers stream
void
setValidatedRange(std::string const& range)
{
std::vector<std::pair<uint32_t, uint32_t>> pairs;
std::vector<std::string> ranges;
boost::split(ranges, range, boost::is_any_of(","));
for (auto& pair : ranges)
{
std::vector<std::string> minAndMax;
boost::split(minAndMax, pair, boost::is_any_of("-"));
if (minAndMax.size() == 1)
{
uint32_t sequence = std::stoll(minAndMax[0]);
pairs.push_back(std::make_pair(sequence, sequence));
}
else
{
assert(minAndMax.size() == 2);
uint32_t min = std::stoll(minAndMax[0]);
uint32_t max = std::stoll(minAndMax[1]);
pairs.push_back(std::make_pair(min, max));
}
}
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) {
return left.first < right.first;
});
// we only hold the lock here, to avoid blocking while string processing
std::lock_guard lck(mtx_);
validatedLedgers_ = std::move(pairs);
validatedLedgersRaw_ = range;
}
/// @return the validated range of this source
/// @note this is only used by server_info
std::string
getValidatedRange() const
{
std::lock_guard lck(mtx_);
return validatedLedgersRaw_;
}
/// Fetch the specified ledger
/// @param ledgerSequence sequence of the ledger to fetch
/// @getObjects whether to get the account state diff between this ledger
/// and the prior one
/// @return the extracted data and the result status
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(uint32_t ledgerSequence, bool getObjects = true);
std::string
toString() const
{
return "{ validated_ledger : " + getValidatedRange() +
" , ip : " + ip_ + " , web socket port : " + wsPort_ +
", grpc port : " + grpcPort_ + " }";
}
boost::json::object
toJson() const
{
boost::json::object res;
res["validated_range"] = getValidatedRange();
res["is_connected"] = std::to_string(isConnected());
res["ip"] = ip_;
res["ws_port"] = wsPort_;
res["grpc_port"] = grpcPort_;
auto last = getLastMsgTime();
if (last.time_since_epoch().count() != 0)
res["last_msg_arrival_time"] = std::to_string(
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - getLastMsgTime())
.count());
return res;
}
/// Download a ledger in full
/// @param ledgerSequence sequence of the ledger to download
/// @param writeQueue queue to push downloaded ledger objects
/// @return true if the download was successful
bool
loadInitialLedger(uint32_t ledgerSequence);
/// Attempt to reconnect to the ETL source
void
reconnect(boost::beast::error_code ec);
/// Callback
void
onResolve(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type results);
/// Callback
void
onConnect(
boost::beast::error_code ec,
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint);
/// Callback
void
onHandshake(boost::beast::error_code ec);
/// Callback
void
onWrite(boost::beast::error_code ec, size_t size);
/// Callback
void
onRead(boost::beast::error_code ec, size_t size);
/// Handle the most recently received message
/// @return true if the message was handled successfully. false on error
bool
handleMessage();
/// Close the websocket
/// @param startAgain whether to reconnect
void
close(bool startAgain);
/// Get grpc stub to forward requests to rippled node
/// @return stub to send requests to ETL source
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
getRippledForwardingStub() const;
boost::json::object
forwardToRippled(boost::json::object const& request) const;
};
/// This class is used to manage connections to transaction processing processes
/// This class spawns a listener for each etl source, which listens to messages
/// on the ledgers stream (to keep track of which ledgers have been validated by
/// the network, and the range of ledgers each etl source has). This class also
/// allows requests for ledger data to be load balanced across all possible etl
/// sources.
class ETLLoadBalancer
{
private:
std::vector<std::unique_ptr<ETLSource>> sources_;
public:
ETLLoadBalancer(
boost::json::array const& config,
boost::asio::io_context& ioContext,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> nwvl);
static std::shared_ptr<ETLLoadBalancer>
make_ETLLoadBalancer(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
{
return std::make_shared<ETLLoadBalancer>(
config.at("etl_sources").as_array(),
ioc,
backend,
subscriptions,
validatedLedgers);
}
~ETLLoadBalancer()
{
sources_.clear();
}
/// Load the initial ledger, writing data to the queue
/// @param sequence sequence of ledger to download
/// @param writeQueue queue to push downloaded data to
void
loadInitialLedger(uint32_t sequence);
/// Fetch data for a specific ledger. This function will continuously try
/// to fetch data for the specified ledger until the fetch succeeds, the
/// ledger is found in the database, or the server is shutting down.
/// @param ledgerSequence sequence of ledger to fetch data for
/// @param getObjects if true, fetch diff between specified ledger and
/// previous
/// @return the extracted data, if extraction was successful. If the ledger
/// was found in the database or the server is shutting down, the optional
/// will be empty
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedger(uint32_t ledgerSequence, bool getObjects);
/// Determine whether messages received on the transactions_proposed stream
/// should be forwarded to subscribing clients. The server subscribes to
/// transactions_proposed on multiple ETLSources, yet only forwards messages
/// from one source at any given time (to avoid sending duplicate messages
/// to clients).
/// @param in ETLSource in question
/// @return true if messages should be forwarded
bool
shouldPropagateTxnStream(ETLSource* in) const
{
for (auto& src : sources_)
{
assert(src);
// We pick the first ETLSource encountered that is connected
if (src->isConnected())
{
if (src.get() == in)
return true;
else
return false;
}
}
// If no sources connected, then this stream has not been forwarded
return true;
}
boost::json::value
toJson() const
{
boost::json::array ret;
for (auto& src : sources_)
{
ret.push_back(src->toJson());
}
return ret;
}
/// Randomly select a rippled node to forward a gRPC request to
/// @return gRPC stub to forward requests to rippled node
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
getRippledForwardingStub() const;
/// Forward a JSON RPC request to a randomly selected rippled node
/// @param request JSON-RPC request
/// @return response received from rippled node
boost::json::object
forwardToRippled(boost::json::object const& request) const;
private:
/// f is a function that takes an ETLSource as an argument and returns a
/// bool. Attempt to execute f for one randomly chosen ETLSource that has
/// the specified ledger. If f returns false, another randomly chosen
/// ETLSource is used. The process repeats until f returns true.
/// @param f function to execute. This function takes the ETL source as an
/// argument, and returns a bool.
/// @param ledgerSequence f is executed for each ETLSource that has this
/// ledger
/// @return true if f was eventually executed successfully. false if the
/// ledger was found in the database or the server is shutting down
template <class Func>
bool
execute(Func f, uint32_t ledgerSequence);
};
#endif

799
src/etl/ReportingETL.cpp Normal file
View File

@@ -0,0 +1,799 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/StringUtilities.h>
#include <backend/DBHelpers.h>
#include <etl/ReportingETL.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <server/SubscriptionManager.h>
#include <cstdlib>
#include <iostream>
#include <string>
#include <variant>
namespace detail {
/// Convenience function for printing out basic ledger info
std::string
toString(ripple::LedgerInfo const& info)
{
std::stringstream ss;
ss << "LedgerInfo { Sequence : " << info.seq
<< " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash)
<< " AccountHash : " << strHex(info.accountHash)
<< " ParentHash : " << strHex(info.parentHash) << " }";
return ss.str();
}
} // namespace detail
std::vector<AccountTransactionsData>
ReportingETL::insertTransactions(
ripple::LedgerInfo const& ledger,
org::xrpl::rpc::v1::GetLedgerResponse& data)
{
std::vector<AccountTransactionsData> accountTxData;
for (auto& txn :
*(data.mutable_transactions_list()->mutable_transactions()))
{
std::string* raw = txn.mutable_transaction_blob();
ripple::SerialIter it{raw->data(), raw->size()};
ripple::STTx sttx{it};
auto txSerializer =
std::make_shared<ripple::Serializer>(sttx.getSerializer());
ripple::TxMeta txMeta{
sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
auto metaSerializer = std::make_shared<ripple::Serializer>(
txMeta.getAsObject().getSerializer());
BOOST_LOG_TRIVIAL(trace)
<< __func__ << " : "
<< "Inserting transaction = " << sttx.getTransactionID();
auto journal = ripple::debugLog();
accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
backend_->writeTransaction(
std::move(keyStr),
ledger.seq,
std::move(*raw),
std::move(*txn.mutable_metadata_blob()));
}
return accountTxData;
}
std::optional<ripple::LedgerInfo>
ReportingETL::loadInitialLedger(uint32_t startingSequence)
{
// check that database is actually empty
auto rng = backend_->fetchLedgerRangeNoThrow();
if (rng)
{
BOOST_LOG_TRIVIAL(fatal) << __func__ << " : "
<< "Database is not empty";
assert(false);
return {};
}
// fetch the ledger from the network. This function will not return until
// either the fetch is successful, or the server is being shutdown. This
// only fetches the ledger header and the transactions+metadata
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> ledgerData{
fetchLedgerData(startingSequence)};
if (!ledgerData)
return {};
ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(ledgerData->ledger_header()));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
backend_->startWrites();
backend_->writeLedger(
lgrInfo, std::move(*ledgerData->mutable_ledger_header()), true);
std::vector<AccountTransactionsData> accountTxData =
insertTransactions(lgrInfo, *ledgerData);
auto start = std::chrono::system_clock::now();
// download the full account state map. This function downloads full ledger
// data and pushes the downloaded data into the writeQueue. asyncWriter
// consumes from the queue and inserts the data into the Ledger object.
// Once the below call returns, all data has been pushed into the queue
loadBalancer_->loadInitialLedger(startingSequence);
if (!stopping_)
{
backend_->writeAccountTransactions(std::move(accountTxData));
}
backend_->finishWrites(startingSequence);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time to download and store ledger = "
<< ((end - start).count()) / 1000000000.0;
return lgrInfo;
}
std::optional<ripple::Fees>
ReportingETL::getFees(std::uint32_t seq)
{
ripple::Fees fees;
auto key = ripple::keylet::fees().key;
auto bytes = backend_->fetchLedgerObject(key, seq);
if (!bytes)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees";
return {};
}
ripple::SerialIter it(bytes->data(), bytes->size());
ripple::SLE sle{it, key};
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
fees.base = sle.getFieldU64(ripple::sfBaseFee);
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
return fees;
}
void
ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo)
{
auto ledgerRange = backend_->fetchLedgerRange();
auto fees = getFees(lgrInfo.seq);
auto transactions = backend_->fetchAllTransactionsInLedger(lgrInfo.seq);
if (!fees || !ledgerRange)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " - could not fetch from database";
return;
}
std::string range = std::to_string(ledgerRange->minSequence) + "-" +
std::to_string(ledgerRange->maxSequence);
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
for (auto& txAndMeta : transactions)
subscriptions_->pubTransaction(txAndMeta, lgrInfo.seq);
setLastPublish();
}
bool
ReportingETL::publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Attempting to publish ledger = " << ledgerSequence;
size_t numAttempts = 0;
while (!stopping_)
{
try
{
auto range = backend_->fetchLedgerRangeNoThrow();
if (!range || range->maxSequence < ledgerSequence)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Trying to publish. Could not find "
"ledger with sequence = "
<< ledgerSequence;
// We try maxAttempts times to publish the ledger, waiting one
// second in between each attempt.
// If the ledger is not present in the database after
// maxAttempts, we attempt to take over as the writer. If the
// takeover fails, doContinuousETL will return, and this node
// will go back to publishing. If the node is in strict read
// only mode, we simply skip publishing this ledger and return
// false indicating the publish failed
if (numAttempts >= maxAttempts)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Failed to publish ledger after " << numAttempts
<< " attempts.";
if (!readOnly_)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Attempting to become ETL writer";
return false;
}
}
std::this_thread::sleep_for(std::chrono::seconds(1));
++numAttempts;
continue;
}
else
{
auto lgr = backend_->fetchLedgerBySequence(ledgerSequence);
assert(lgr);
publishLedger(*lgr);
return true;
}
}
catch (Backend::DatabaseTimeout const& e)
{
continue;
}
}
return false;
}
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerData(uint32_t idx)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << idx;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger(idx, false);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
return response;
}
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
ReportingETL::fetchLedgerDataAndDiff(uint32_t idx)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Attempting to fetch ledger with sequence = " << idx;
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
loadBalancer_->fetchLedger(idx, true);
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
<< "GetLedger reply = " << response->DebugString();
return response;
}
std::pair<ripple::LedgerInfo, bool>
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Beginning ledger update";
ripple::LedgerInfo lgrInfo =
deserializeHeader(ripple::makeSlice(rawData.ledger_header()));
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
backend_->startWrites();
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "started writes";
backend_->writeLedger(lgrInfo, std::move(*rawData.mutable_ledger_header()));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote ledger header";
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
{
bool isCreated = false;
bool isDeleted = false;
if (obj.mod_type() == org::xrpl::rpc::v1::RawLedgerObject::CREATED)
isCreated = true;
else if (
obj.mod_type() == org ::xrpl::rpc::v1::RawLedgerObject::DELETED)
isDeleted = true;
std::optional<ripple::uint256> bookDir;
if (isCreated)
{
if (isOffer(obj.data()))
bookDir = getBook(obj.data());
}
else if (obj.book_of_deleted_offer().size())
{
bookDir =
ripple::uint256::fromVoid(obj.book_of_deleted_offer().data());
}
assert(not(isCreated and isDeleted));
backend_->writeLedgerObject(
std::move(*obj.mutable_key()),
lgrInfo.seq,
std::move(*obj.mutable_data()),
isCreated,
isDeleted,
std::move(bookDir));
}
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "wrote objects. num objects = "
<< std::to_string(rawData.ledger_objects().objects_size());
std::vector<AccountTransactionsData> accountTxData{
insertTransactions(lgrInfo, rawData)};
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Inserted all transactions. Number of transactions = "
<< rawData.transactions_list().transactions_size();
backend_->writeAccountTransactions(std::move(accountTxData));
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "wrote account_tx";
accumTxns_ += rawData.transactions_list().transactions_size();
bool success = true;
if (accumTxns_ >= txnThreshold_)
{
auto start = std::chrono::system_clock::now();
success = backend_->finishWrites(lgrInfo.seq);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " Accumulated " << std::to_string(accumTxns_)
<< " transactions. Wrote in " << std::to_string(duration)
<< " transactions per second = "
<< std::to_string(accumTxns_ / duration);
accumTxns_ = 0;
}
else
BOOST_LOG_TRIVIAL(debug) << __func__ << " skipping commit";
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Inserted/modified/deleted all objects. Number of objects = "
<< rawData.ledger_objects().objects_size();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Finished ledger update. " << detail::toString(lgrInfo);
return {lgrInfo, success};
}
// Database must be populated when this starts
std::optional<uint32_t>
ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
{
if (finishSequence_ && startSequence > *finishSequence_)
return {};
/*
* Behold, mortals! This function spawns three separate threads, which talk
* to each other via 2 different thread safe queues and 1 atomic variable.
* All threads and queues are function local. This function returns when all
*
* of the threads exit. There are two termination conditions: the first is
* if the load thread encounters a write conflict. In this case, the load
* thread sets writeConflict, an atomic bool, to true, which signals the
* other threads to stop. The second termination condition is when the
* entire server is shutting down, which is detected in one of three ways:
* 1. isStopping() returns true if the server is shutting down
* 2. networkValidatedLedgers_.waitUntilValidatedByNetwork returns
* false, signaling the wait was aborted.
* 3. fetchLedgerDataAndDiff returns an empty optional, signaling the fetch
* was aborted.
* In all cases, the extract thread detects this condition,
* and pushes an empty optional onto the transform queue. The transform
* thread, upon popping an empty optional, pushes an empty optional onto the
* load queue, and then returns. The load thread, upon popping an empty
* optional, returns.
*/
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Starting etl pipeline";
writing_ = true;
auto rng = backend_->fetchLedgerRangeNoThrow();
if (!rng || rng->maxSequence != startSequence - 1)
{
assert(false);
throw std::runtime_error("runETLPipeline: parent ledger is null");
}
std::atomic<uint32_t> minSequence = rng->minSequence;
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Populating caches";
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Populated caches";
std::atomic_bool writeConflict = false;
std::optional<uint32_t> lastPublishedSequence;
uint32_t maxQueueSize = 1000 / numExtractors;
auto begin = std::chrono::system_clock::now();
using QueueType =
ThreadSafeQueue<std::optional<org::xrpl::rpc::v1::GetLedgerResponse>>;
std::vector<std::shared_ptr<QueueType>> queues;
auto getNext = [&queues, &startSequence, &numExtractors](
uint32_t sequence) -> std::shared_ptr<QueueType> {
std::cout << std::to_string((sequence - startSequence) % numExtractors);
return queues[(sequence - startSequence) % numExtractors];
};
std::vector<std::thread> extractors;
for (size_t i = 0; i < numExtractors; ++i)
{
auto transformQueue = std::make_shared<QueueType>(maxQueueSize);
queues.push_back(transformQueue);
extractors.emplace_back([this,
&startSequence,
&writeConflict,
transformQueue,
i,
numExtractors]() {
beast::setCurrentThreadName("rippled: ReportingETL extract");
uint32_t currentSequence = startSequence + i;
double totalTime = 0;
// there are two stopping conditions here.
// First, if there is a write conflict in the load thread, the
// ETL mechanism should stop. The other stopping condition is if
// the entire server is shutting down. This can be detected in a
// variety of ways. See the comment at the top of the function
while ((!finishSequence_ || currentSequence <= *finishSequence_) &&
networkValidatedLedgers_->waitUntilValidatedByNetwork(
currentSequence) &&
!writeConflict && !isStopping())
{
auto start = std::chrono::system_clock::now();
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchResponse{fetchLedgerDataAndDiff(currentSequence)};
auto end = std::chrono::system_clock::now();
auto time = ((end - start).count()) / 1000000000.0;
totalTime += time;
auto tps =
fetchResponse->transactions_list().transactions_size() /
time;
BOOST_LOG_TRIVIAL(info)
<< "Extract phase time = " << time
<< " . Extract phase tps = " << tps
<< " . Avg extract time = "
<< totalTime / (currentSequence - startSequence + 1)
<< " . thread num = " << i
<< " . seq = " << currentSequence;
// if the fetch is unsuccessful, stop. fetchLedger only
// returns false if the server is shutting down, or if the
// ledger was found in the database (which means another
// process already wrote the ledger that this process was
// trying to extract; this is a form of a write conflict).
// Otherwise, fetchLedgerDataAndDiff will keep trying to
// fetch the specified ledger until successful
if (!fetchResponse)
{
break;
}
transformQueue->push(std::move(fetchResponse));
currentSequence += numExtractors;
if (finishSequence_ && currentSequence > *finishSequence_)
break;
}
// empty optional tells the transformer to shut down
transformQueue->push({});
});
}
std::thread transformer{[this,
&minSequence,
&writeConflict,
&startSequence,
&getNext,
&lastPublishedSequence]() {
beast::setCurrentThreadName("rippled: ReportingETL transform");
uint32_t currentSequence = startSequence;
while (!writeConflict)
{
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
getNext(currentSequence)->pop()};
++currentSequence;
// if fetchResponse is an empty optional, the extracter thread
// has stopped and the transformer should stop as well
if (!fetchResponse)
{
break;
}
if (isStopping())
continue;
auto numTxns =
fetchResponse->transactions_list().transactions_size();
auto numObjects = fetchResponse->ledger_objects().objects_size();
auto start = std::chrono::system_clock::now();
auto [lgrInfo, success] = buildNextLedger(*fetchResponse);
auto end = std::chrono::system_clock::now();
auto duration = ((end - start).count()) / 1000000000.0;
if (success)
BOOST_LOG_TRIVIAL(info)
<< "Load phase of etl : "
<< "Successfully published ledger! Ledger info: "
<< detail::toString(lgrInfo) << ". txn count = " << numTxns
<< ". object count = " << numObjects
<< ". load time = " << duration
<< ". load txns per second = " << numTxns / duration
<< ". load objs per second = " << numObjects / duration;
else
BOOST_LOG_TRIVIAL(error)
<< "Error writing ledger. " << detail::toString(lgrInfo);
// success is false if the ledger was already written
if (success)
{
publishLedger(lgrInfo);
lastPublishedSequence = lgrInfo.seq;
}
writeConflict = !success;
if (onlineDeleteInterval_ && !deleting_ &&
lgrInfo.seq - minSequence > *onlineDeleteInterval_)
{
deleting_ = true;
ioContext_.post([this, &minSequence]() {
BOOST_LOG_TRIVIAL(info) << "Running online delete";
backend_->doOnlineDelete(*onlineDeleteInterval_);
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
auto rng = backend_->fetchLedgerRangeNoThrow();
minSequence = rng->minSequence;
deleting_ = false;
});
}
}
}};
transformer.join();
for (size_t i = 0; i < numExtractors; ++i)
{
// pop from each queue that might be blocked on a push
getNext(i)->tryPop();
}
// wait for all of the extractors to stop
for (auto& t : extractors)
t.join();
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug)
<< "Extracted and wrote " << *lastPublishedSequence - startSequence
<< " in " << ((end - begin).count()) / 1000000000.0;
writing_ = false;
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
<< "Stopping etl pipeline";
return lastPublishedSequence;
}
// main loop. The software begins monitoring the ledgers that are validated
// by the nework. The member networkValidatedLedgers_ keeps track of the
// sequences of ledgers validated by the network. Whenever a ledger is validated
// by the network, the software looks for that ledger in the database. Once the
// ledger is found in the database, the software publishes that ledger to the
// ledgers stream. If a network validated ledger is not found in the database
// after a certain amount of time, then the software attempts to take over
// responsibility of the ETL process, where it writes new ledgers to the
// database. The software will relinquish control of the ETL process if it
// detects that another process has taken over ETL.
void
ReportingETL::monitor()
{
std::optional<uint32_t> latestSequence =
backend_->fetchLatestLedgerSequence();
if (!latestSequence)
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Database is empty. Will download a ledger "
"from the network.";
std::optional<ripple::LedgerInfo> ledger;
if (startSequence_)
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "ledger sequence specified in config. "
<< "Will begin ETL process starting with ledger "
<< *startSequence_;
ledger = loadInitialLedger(*startSequence_);
}
else
{
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Waiting for next ledger to be validated by network...";
std::optional<uint32_t> mostRecentValidated =
networkValidatedLedgers_->getMostRecent();
if (mostRecentValidated)
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Ledger " << *mostRecentValidated
<< " has been validated. "
<< "Downloading...";
ledger = loadInitialLedger(*mostRecentValidated);
}
else
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "The wait for the next validated "
<< "ledger has been aborted. "
<< "Exiting monitor loop";
return;
}
}
if (ledger)
latestSequence = ledger->seq;
}
else
{
if (startSequence_)
{
BOOST_LOG_TRIVIAL(warning)
<< "start sequence specified but db is already populated";
}
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Database already populated. Picking up from the tip of history";
}
if (!latestSequence)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << " : "
<< "Failed to load initial ledger. Exiting monitor loop";
return;
}
else
{
}
uint32_t nextSequence = latestSequence.value() + 1;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " : "
<< "Database is populated. "
<< "Starting monitor loop. sequence = " << nextSequence;
while (!stopping_ &&
networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence))
{
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
<< "Ledger with sequence = " << nextSequence
<< " has been validated by the network. "
<< "Attempting to find in database and publish";
// Attempt to take over responsibility of ETL writer after 10 failed
// attempts to publish the ledger. publishLedger() fails if the
// ledger that has been validated by the network is not found in the
// database after the specified number of attempts. publishLedger()
// waits one second between each attempt to read the ledger from the
// database
//
// In strict read-only mode, when the software fails to find a
// ledger in the database that has been validated by the network,
// the software will only try to publish subsequent ledgers once,
// until one of those ledgers is found in the database. Once the
// software successfully publishes a ledger, the software will fall
// back to the normal behavior of trying several times to publish
// the ledger that has been validated by the network. In this
// manner, a reporting processing running in read-only mode does not
// need to restart if the database is wiped.
constexpr size_t timeoutSeconds = 10;
bool success = publishLedger(nextSequence, timeoutSeconds);
if (!success)
{
BOOST_LOG_TRIVIAL(warning)
<< __func__ << " : "
<< "Failed to publish ledger with sequence = " << nextSequence
<< " . Beginning ETL";
// doContinousETLPipelined returns the most recent sequence
// published empty optional if no sequence was published
std::optional<uint32_t> lastPublished =
runETLPipeline(nextSequence, extractorThreads_);
BOOST_LOG_TRIVIAL(info)
<< __func__ << " : "
<< "Aborting ETL. Falling back to publishing";
// if no ledger was published, don't increment nextSequence
if (lastPublished)
nextSequence = *lastPublished + 1;
}
else
{
++nextSequence;
}
}
}
void
ReportingETL::monitorReadOnly()
{
BOOST_LOG_TRIVIAL(debug) << "Starting reporting in strict read only mode";
std::optional<uint32_t> mostRecent =
networkValidatedLedgers_->getMostRecent();
if (!mostRecent)
return;
uint32_t sequence = *mostRecent;
bool success = true;
while (!stopping_ &&
networkValidatedLedgers_->waitUntilValidatedByNetwork(sequence))
{
publishLedger(sequence, 30);
++sequence;
}
}
void
ReportingETL::doWork()
{
worker_ = std::thread([this]() {
beast::setCurrentThreadName("rippled: ReportingETL worker");
if (readOnly_)
monitorReadOnly();
else
monitor();
});
}
ReportingETL::ReportingETL(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<NetworkValidatedLedgers> ledgers)
: publishStrand_(ioc)
, ioContext_(ioc)
, backend_(backend)
, subscriptions_(subscriptions)
, loadBalancer_(balancer)
, networkValidatedLedgers_(ledgers)
{
if (config.contains("start_sequence"))
startSequence_ = config.at("start_sequence").as_int64();
if (config.contains("finish_sequence"))
finishSequence_ = config.at("finish_sequence").as_int64();
if (config.contains("read_only"))
readOnly_ = config.at("read_only").as_bool();
if (config.contains("online_delete"))
{
int64_t interval = config.at("online_delete").as_int64();
uint32_t max = std::numeric_limits<uint32_t>::max();
if (interval > max)
{
std::stringstream msg;
msg << "online_delete cannot be greater than "
<< std::to_string(max);
throw std::runtime_error(msg.str());
}
if (interval > 0)
onlineDeleteInterval_ = static_cast<uint32_t>(interval);
}
if (config.contains("extractor_threads"))
extractorThreads_ = config.at("extractor_threads").as_int64();
if (config.contains("txn_threshold"))
txnThreshold_ = config.at("txn_threshold").as_int64();
}

343
src/etl/ReportingETL.h Normal file
View File

@@ -0,0 +1,343 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
#include <ripple/ledger/ReadView.h>
#include <boost/algorithm/string.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/core/string.hpp>
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <server/SubscriptionManager.h>
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
#include <grpcpp/grpcpp.h>
#include <condition_variable>
#include <mutex>
#include <queue>
#include <chrono>
struct AccountTransactionsData;
class SubscriptionManager;
/**
* This class is responsible for continuously extracting data from a
* p2p node, and writing that data to the databases. Usually, multiple different
* processes share access to the same network accessible databases, in which
* case only one such process is performing ETL and writing to the database. The
* other processes simply monitor the database for new ledgers, and publish
* those ledgers to the various subscription streams. If a monitoring process
* determines that the ETL writer has failed (no new ledgers written for some
* time), the process will attempt to become the ETL writer. If there are
* multiple monitoring processes that try to become the ETL writer at the same
* time, one will win out, and the others will fall back to
* monitoring/publishing. In this sense, this class dynamically transitions from
* monitoring to writing and from writing to monitoring, based on the activity
* of other processes running on different machines.
*/
class ReportingETL
{
private:
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> loadBalancer_;
std::optional<uint32_t> onlineDeleteInterval_;
uint32_t extractorThreads_ = 1;
std::thread worker_;
boost::asio::io_context& ioContext_;
/// Strand to ensure that ledgers are published in order.
/// If ETL is started far behind the network, ledgers will be written and
/// published very rapidly. Monitoring processes will publish ledgers as
/// they are written. However, to publish a ledger, the monitoring process
/// needs to read all of the transactions for that ledger from the database.
/// Reading the transactions from the database requires network calls, which
/// can be slow. It is imperative however that the monitoring processes keep
/// up with the writer, else the monitoring processes will not be able to
/// detect if the writer failed. Therefore, publishing each ledger (which
/// includes reading all of the transactions from the database) is done from
/// the application wide asio io_service, and a strand is used to ensure
/// ledgers are published in order
boost::asio::io_context::strand publishStrand_;
/// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an
/// arbitrary number of ETL sources and load balances ETL requests across
/// those sources.
/// Mechanism for detecting when the network has validated a new ledger.
/// This class provides a way to wait for a specific ledger to be validated
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers_;
/// Whether the software is stopping
std::atomic_bool stopping_ = false;
/// Whether the software is performing online delete
// TODO this needs to live in the database, so diff servers can coordinate
// deletion
std::atomic_bool deleting_ = false;
/// Used to determine when to write to the database during the initial
/// ledger download. By default, the software downloads an entire ledger and
/// then writes to the database. If flushInterval_ is non-zero, the software
/// will write to the database as new ledger data (SHAMap leaf nodes)
/// arrives. It is not neccesarily more effient to write the data as it
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
/// nodes; flushing prematurely can result in the same SHAMap inner node
/// being written to the database more than once. It is recommended to use
/// the default value of 0 for this variable; however, different values can
/// be experimented with if better performance is desired.
size_t flushInterval_ = 0;
/// This variable controls the number of GetLedgerData calls that will be
/// executed in parallel during the initial ledger download. GetLedgerData
/// allows clients to page through a ledger over many RPC calls.
/// GetLedgerData returns a marker that is used as an offset in a subsequent
/// call. If numMarkers_ is greater than 1, there will be multiple chains of
/// GetLedgerData calls iterating over different parts of the same ledger in
/// parallel. This can dramatically speed up the time to download the
/// initial ledger. However, a higher value for this member variable puts
/// more load on the ETL source.
size_t numMarkers_ = 2;
/// Whether the process is in strict read-only mode. In strict read-only
/// mode, the process will never attempt to become the ETL writer, and will
/// only publish ledgers as they are written to the database.
bool readOnly_ = false;
/// Whether the process is writing to the database. Used by server_info
std::atomic_bool writing_ = false;
/// Ledger sequence to start ETL from. If this is empty, ETL will start from
/// the next ledger validated by the network. If this is set, and the
/// database is already populated, an error is thrown.
std::optional<uint32_t> startSequence_;
std::optional<uint32_t> finishSequence_;
size_t accumTxns_ = 0;
size_t txnThreshold_ = 0;
/// The time that the most recently published ledger was published. Used by
/// server_info
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
std::mutex publishTimeMtx_;
std::chrono::time_point<std::chrono::system_clock>
getLastPublish()
{
std::unique_lock<std::mutex> lck(publishTimeMtx_);
return lastPublish_;
}
void
setLastPublish()
{
std::unique_lock<std::mutex> lck(publishTimeMtx_);
lastPublish_ = std::chrono::system_clock::now();
}
/// Download a ledger with specified sequence in full, via GetLedgerData,
/// and write the data to the databases. This takes several minutes or
/// longer.
/// @param sequence the sequence of the ledger to download
/// @return The ledger downloaded, with a full transaction and account state
/// map
std::optional<ripple::LedgerInfo>
loadInitialLedger(uint32_t sequence);
/// Run ETL. Extracts ledgers and writes them to the database, until a write
/// conflict occurs (or the server shuts down).
/// @note database must already be populated when this function is called
/// @param startSequence the first ledger to extract
/// @return the last ledger written to the database, if any
std::optional<uint32_t>
runETLPipeline(uint32_t startSequence, int offset);
/// Monitor the network for newly validated ledgers. Also monitor the
/// database to see if any process is writing those ledgers. This function
/// is called when the application starts, and will only return when the
/// application is shutting down. If the software detects the database is
/// empty, this function will call loadInitialLedger(). If the software
/// detects ledgers are not being written, this function calls
/// runETLPipeline(). Otherwise, this function publishes ledgers as they are
/// written to the database.
void
monitor();
/// Monitor the database for newly written ledgers.
/// Similar to the monitor(), except this function will never call
/// runETLPipeline() or loadInitialLedger(). This function only publishes
/// ledgers as they are written to the database.
void
monitorReadOnly();
/// Extract data for a particular ledger from an ETL source. This function
/// continously tries to extract the specified ledger (using all available
/// ETL sources) until the extraction succeeds, or the server shuts down.
/// @param sequence sequence of the ledger to extract
/// @return ledger header and transaction+metadata blobs. Empty optional
/// if the server is shutting down
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedgerData(uint32_t sequence);
/// Extract data for a particular ledger from an ETL source. This function
/// continously tries to extract the specified ledger (using all available
/// ETL sources) until the extraction succeeds, or the server shuts down.
/// @param sequence sequence of the ledger to extract
/// @return ledger header, transaction+metadata blobs, and all ledger
/// objects created, modified or deleted between this ledger and the parent.
/// Empty optional if the server is shutting down
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
fetchLedgerDataAndDiff(uint32_t sequence);
/// Insert all of the extracted transactions into the ledger
/// @param ledger ledger to insert transactions into
/// @param data data extracted from an ETL source
/// @return struct that contains the neccessary info to write to the
/// transctions and account_transactions tables in Postgres (mostly
/// transaction hashes, corresponding nodestore hashes and affected
/// accounts)
std::vector<AccountTransactionsData>
insertTransactions(
ripple::LedgerInfo const& ledger,
org::xrpl::rpc::v1::GetLedgerResponse& data);
// TODO update this documentation
/// Build the next ledger using the previous ledger and the extracted data.
/// This function calls insertTransactions()
/// @note rawData should be data that corresponds to the ledger immediately
/// following parent
/// @param parent the previous ledger
/// @param rawData data extracted from an ETL source
/// @return the newly built ledger and data to write to Postgres
std::pair<ripple::LedgerInfo, bool>
buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData);
/// Attempt to read the specified ledger from the database, and then publish
/// that ledger to the ledgers stream.
/// @param ledgerSequence the sequence of the ledger to publish
/// @param maxAttempts the number of times to attempt to read the ledger
/// from the database. 1 attempt per second
/// @return whether the ledger was found in the database and published
bool
publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts = 10);
/// Publish the passed in ledger
/// @param ledger the ledger to publish
void
publishLedger(ripple::LedgerInfo const& lgrInfo);
/// Get fees at a current ledger_index
/// @param seq the ledger index
/// @return nullopt if not found, fees if found.
std::optional<ripple::Fees>
getFees(std::uint32_t seq);
bool
isStopping()
{
return stopping_;
}
/// Get the number of markers to use during the initial ledger download.
/// This is equivelent to the degree of parallelism during the initial
/// ledger download
/// @return the number of markers
uint32_t
getNumMarkers()
{
return numMarkers_;
}
boost::json::object
getInfo()
{
boost::json::object result;
result["etl_sources"] = loadBalancer_->toJson();
result["is_writer"] = writing_.load();
result["read_only"] = readOnly_;
auto last = getLastPublish();
if (last.time_since_epoch().count() != 0)
result["last_publish_time"] = std::to_string(
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now() - getLastPublish())
.count());
return result;
}
/// start all of the necessary components and begin ETL
void
run()
{
BOOST_LOG_TRIVIAL(info) << "Starting reporting etl";
stopping_ = false;
doWork();
}
void
doWork();
public:
ReportingETL(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<NetworkValidatedLedgers> ledgers);
static std::shared_ptr<ReportingETL>
make_ReportingETL(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<NetworkValidatedLedgers> ledgers)
{
auto etl = std::make_shared<ReportingETL>(
config, ioc, backend, subscriptions, balancer, ledgers);
etl->run();
return etl;
}
~ReportingETL()
{
BOOST_LOG_TRIVIAL(info) << "onStop called";
BOOST_LOG_TRIVIAL(debug) << "Stopping Reporting ETL";
stopping_ = true;
if (worker_.joinable())
worker_.join();
BOOST_LOG_TRIVIAL(debug) << "Joined ReportingETL worker thread";
}
};
#endif

View File

@@ -0,0 +1,160 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
void
addChannel(boost::json::array& jsonLines, ripple::SLE const& line)
{
boost::json::object jDst;
jDst["channel_id"] = ripple::to_string(line.key());
jDst["account"] = ripple::to_string(line.getAccountID(ripple::sfAccount));
jDst["destination_account"] =
ripple::to_string(line.getAccountID(ripple::sfDestination));
jDst["amount"] = line[ripple::sfAmount].getText();
jDst["balance"] = line[ripple::sfBalance].getText();
if (publicKeyType(line[ripple::sfPublicKey]))
{
ripple::PublicKey const pk(line[ripple::sfPublicKey]);
jDst["public_key"] = toBase58(ripple::TokenType::AccountPublic, pk);
jDst["public_key_hex"] = strHex(pk);
}
jDst["settle_delay"] = line[ripple::sfSettleDelay];
if (auto const& v = line[~ripple::sfExpiration])
jDst["expiration"] = *v;
if (auto const& v = line[~ripple::sfCancelAfter])
jDst["cancel_after"] = *v;
if (auto const& v = line[~ripple::sfSourceTag])
jDst["source_tag"] = *v;
if (auto const& v = line[~ripple::sfDestinationTag])
jDst["destination_tag"] = *v;
jsonLines.push_back(jDst);
}
boost::json::object
doAccountChannels(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
if (!request.contains("account"))
{
response["error"] = "Must contain account";
return response;
}
if (!request.at("account").is_string())
{
response["error"] = "Account must be a string";
return response;
}
ripple::AccountID accountID;
auto parsed = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!parsed)
{
response["error"] = "Invalid account";
return response;
}
accountID = *parsed;
boost::optional<ripple::AccountID> destAccount;
if (request.contains("destination_account"))
{
if (!request.at("destination_account").is_string())
{
response["error"] = "destination_account should be a string";
return response;
}
destAccount = ripple::parseBase58<ripple::AccountID>(
request.at("destination_account").as_string().c_str());
if (!destAccount)
{
response["error"] = "Invalid destination account";
return response;
}
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
{
response["error"] = "limit must be integer";
return response;
}
limit = request.at("limit").as_int64();
if (limit <= 0)
{
response["error"] = "limit must be positive";
return response;
}
}
ripple::uint256 cursor = beast::zero;
if (request.contains("cursor"))
{
if (!request.at("cursor").is_string())
{
response["error"] = "limit must be string";
return response;
}
auto bytes = ripple::strUnHex(request.at("cursor").as_string().c_str());
if (bytes and bytes->size() == 32)
{
response["error"] = "invalid cursor";
return response;
}
cursor = ripple::uint256::fromVoid(bytes->data());
}
response["channels"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonChannels = response.at("channels").as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltPAYCHAN &&
sle.getAccountID(ripple::sfAccount) == accountID &&
(!destAccount ||
*destAccount == sle.getAccountID(ripple::sfDestination)))
{
if (limit-- == 0)
{
return false;
}
addChannel(jsonChannels, sle);
}
return true;
};
auto nextCursor = traverseOwnedNodes(
backend, accountID, *ledgerSequence, cursor, addToResponse);
if (nextCursor)
response["next_cursor"] = ripple::strHex(*nextCursor);
return response;
}

View File

@@ -0,0 +1,90 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
boost::json::object
doAccountCurrencies(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
if (!request.contains("account"))
{
response["error"] = "Must contain account";
return response;
}
if (!request.at("account").is_string())
{
response["error"] = "Account must be a string";
return response;
}
ripple::AccountID accountID;
auto parsed = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!parsed)
{
response["error"] = "Invalid account";
return response;
}
accountID = *parsed;
std::set<std::string> send, receive;
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
ripple::STAmount const& balance =
sle.getFieldAmount(ripple::sfBalance);
auto lowLimit = sle.getFieldAmount(ripple::sfLowLimit);
auto highLimit = sle.getFieldAmount(ripple::sfHighLimit);
bool viewLowest = (lowLimit.getIssuer() == accountID);
auto lineLimit = viewLowest ? lowLimit : highLimit;
auto lineLimitPeer = !viewLowest ? lowLimit : highLimit;
if (balance < lineLimit)
receive.insert(ripple::to_string(balance.getCurrency()));
if ((-balance) < lineLimitPeer)
send.insert(ripple::to_string(balance.getCurrency()));
}
return true;
};
traverseOwnedNodes(
backend, accountID, *ledgerSequence, beast::zero, addToResponse);
response["send_currencies"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonSend = response.at("send_currencies").as_array();
for (auto const& currency : send)
jsonSend.push_back(currency.c_str());
response["receive_currencies"] =
boost::json::value(boost::json::array_kind);
boost::json::array& jsonReceive =
response.at("receive_currencies").as_array();
for (auto const& currency : receive)
jsonReceive.push_back(currency.c_str());
return response;
}

View File

@@ -0,0 +1,136 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <boost/json.hpp>
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
// {
// account: <ident>,
// strict: <bool> // optional (default false)
// // if true only allow public keys and addresses.
// ledger_hash : <ledger>
// ledger_index : <ledger_index>
// signer_lists : <bool> // optional (default false)
// // if true return SignerList(s).
// queue : <bool> // optional (default false)
// // if true return information about transactions
// // in the current TxQ, only if the requested
// // ledger is open. Otherwise if true, returns an
// // error.
// }
// TODO(tom): what is that "default"?
boost::json::object
doAccountInfo(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
std::string strIdent;
if (request.contains("account"))
strIdent = request.at("account").as_string().c_str();
else if (request.contains("ident"))
strIdent = request.at("ident").as_string().c_str();
else
{
response["error"] = "missing account field";
return response;
}
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
// bool bStrict = request.contains("strict") &&
// params.at("strict").as_bool();
// Get info on account.
std::optional<ripple::AccountID> accountID =
accountFromStringStrict(strIdent);
if (!accountID)
{
accountID = ripple::AccountID();
if (!accountID->parseHex(request.at("account").as_string().c_str()))
{
response["error"] = "account malformed";
return response;
}
}
auto key = ripple::keylet::account(accountID.value());
auto start = std::chrono::system_clock::now();
std::optional<std::vector<unsigned char>> dbResponse =
backend.fetchLedgerObject(key.key, *ledgerSequence);
auto end = std::chrono::system_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
if (!dbResponse)
{
response["error"] = "no response from db";
}
ripple::STLedgerEntry sle{
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key};
if (!key.check(sle))
{
response["error"] = "error fetching record from db";
return response;
}
else
{
response["success"] = "fetched successfully!";
if (!binary)
response["object"] = toJson(sle);
else
response["object"] = ripple::strHex(*dbResponse);
response["db_time"] = time;
return response;
}
// Return SignerList(s) if that is requested.
/*
if (params.isMember(jss::signer_lists) &&
params[jss::signer_lists].asBool())
{
// We put the SignerList in an array because of an anticipated
// future when we support multiple signer lists on one account.
Json::Value jvSignerList = Json::arrayValue;
// This code will need to be revisited if in the future we
// support multiple SignerLists on one account.
auto const sleSigners = ledger->read(keylet::signers(accountID));
if (sleSigners)
jvSignerList.append(sleSigners->toJson(JsonOptions::none));
result[jss::account_data][jss::signer_lists] =
std::move(jvSignerList);
}
*/
return response;
}

View File

@@ -0,0 +1,199 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
void
addLine(
boost::json::array& jsonLines,
ripple::SLE const& line,
ripple::AccountID const& account,
boost::optional<ripple::AccountID> const& peerAccount)
{
auto flags = line.getFieldU32(ripple::sfFlags);
auto lowLimit = line.getFieldAmount(ripple::sfLowLimit);
auto highLimit = line.getFieldAmount(ripple::sfHighLimit);
auto lowID = lowLimit.getIssuer();
auto highID = highLimit.getIssuer();
auto lowQualityIn = line.getFieldU32(ripple::sfLowQualityIn);
auto lowQualityOut = line.getFieldU32(ripple::sfLowQualityOut);
auto highQualityIn = line.getFieldU32(ripple::sfHighQualityIn);
auto highQualityOut = line.getFieldU32(ripple::sfHighQualityOut);
auto balance = line.getFieldAmount(ripple::sfBalance);
bool viewLowest = (lowID == account);
auto lineLimit = viewLowest ? lowLimit : highLimit;
auto lineLimitPeer = !viewLowest ? lowLimit : highLimit;
auto lineAccountIDPeer = !viewLowest ? lowID : highID;
auto lineQualityIn = viewLowest ? lowQualityIn : highQualityIn;
auto lineQualityOut = viewLowest ? lowQualityOut : highQualityOut;
if (peerAccount and peerAccount != lineAccountIDPeer)
return;
bool lineAuth =
flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool lineAuthPeer =
flags & (!viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool lineNoRipple =
flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool lineDefaultRipple = flags & ripple::lsfDefaultRipple;
bool lineNoRipplePeer = flags &
(!viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool lineFreeze =
flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
bool lineFreezePeer =
flags & (!viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
ripple::STAmount const& saBalance(balance);
ripple::STAmount const& saLimit(lineLimit);
ripple::STAmount const& saLimitPeer(lineLimitPeer);
boost::json::object jPeer;
jPeer["account"] = ripple::to_string(lineAccountIDPeer);
jPeer["balance"] = saBalance.getText();
jPeer["currency"] = ripple::to_string(saBalance.issue().currency);
jPeer["limit"] = saLimit.getText();
jPeer["limit_peer"] = saLimitPeer.getText();
jPeer["quality_in"] = lineQualityIn;
jPeer["quality_out"] = lineQualityOut;
if (lineAuth)
jPeer["authorized"] = true;
if (lineAuthPeer)
jPeer["peer_authorized"] = true;
if (lineNoRipple || !lineDefaultRipple)
jPeer["no_ripple"] = lineNoRipple;
if (lineNoRipple || !lineDefaultRipple)
jPeer["no_ripple_peer"] = lineNoRipplePeer;
if (lineFreeze)
jPeer["freeze"] = true;
if (lineFreezePeer)
jPeer["freeze_peer"] = true;
jsonLines.push_back(jPeer);
}
boost::json::object
doAccountLines(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
if (!request.contains("account"))
{
response["error"] = "Must contain account";
return response;
}
if (!request.at("account").is_string())
{
response["error"] = "Account must be a string";
return response;
}
ripple::AccountID accountID;
auto parsed = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!parsed)
{
response["error"] = "Invalid account";
return response;
}
accountID = *parsed;
boost::optional<ripple::AccountID> peerAccount;
if (request.contains("peer"))
{
if (!request.at("peer").is_string())
{
response["error"] = "peer should be a string";
return response;
}
peerAccount = ripple::parseBase58<ripple::AccountID>(
request.at("peer").as_string().c_str());
if (!peerAccount)
{
response["error"] = "Invalid peer account";
return response;
}
}
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
{
response["error"] = "limit must be integer";
return response;
}
limit = request.at("limit").as_int64();
if (limit <= 0)
{
response["error"] = "limit must be positive";
return response;
}
}
ripple::uint256 cursor = beast::zero;
if (request.contains("cursor"))
{
if (!request.at("cursor").is_string())
{
response["error"] = "limit must be string";
return response;
}
auto bytes = ripple::strUnHex(request.at("cursor").as_string().c_str());
if (bytes and bytes->size() != 32)
{
response["error"] = "invalid cursor";
return response;
}
cursor = ripple::uint256::fromVoid(bytes->data());
}
response["lines"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at("lines").as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
if (limit-- == 0)
{
return false;
}
addLine(jsonLines, sle, accountID, peerAccount);
}
return true;
};
auto nextCursor = traverseOwnedNodes(
backend, accountID, *ledgerSequence, cursor, addToResponse);
if (nextCursor)
response["next_cursor"] = ripple::strHex(*nextCursor);
return response;
}

View File

@@ -0,0 +1,119 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
std::unordered_map<std::string, ripple::LedgerEntryType> types{
{"state", ripple::ltRIPPLE_STATE},
{"ticket", ripple::ltTICKET},
{"signer_list", ripple::ltSIGNER_LIST},
{"payment_channel", ripple::ltPAYCHAN},
{"offer", ripple::ltOFFER},
{"escrow", ripple::ltESCROW},
{"deposit_preauth", ripple::ltDEPOSIT_PREAUTH},
{"check", ripple::ltCHECK},
};
boost::json::object
doAccountObjects(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
if (!request.contains("account"))
{
response["error"] = "Must contain account";
return response;
}
if (!request.at("account").is_string())
{
response["error"] = "Account must be a string";
return response;
}
ripple::AccountID accountID;
auto parsed = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!parsed)
{
response["error"] = "Invalid account";
return response;
}
accountID = *parsed;
ripple::uint256 cursor = beast::zero;
if (request.contains("cursor"))
{
if (!request.at("cursor").is_string())
{
response["error"] = "limit must be string";
return response;
}
auto bytes = ripple::strUnHex(request.at("cursor").as_string().c_str());
if (bytes and bytes->size() != 32)
{
response["error"] = "invalid cursor";
return response;
}
cursor = ripple::uint256::fromVoid(bytes->data());
}
std::optional<ripple::LedgerEntryType> objectType = {};
if (request.contains("type"))
{
if (!request.at("type").is_string())
{
response["error"] = "type must be string";
return response;
}
std::string typeAsString = request.at("type").as_string().c_str();
if (types.find(typeAsString) == types.end())
{
response["error"] = "invalid object type";
return response;
}
objectType = types[typeAsString];
}
response["objects"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonObjects = response.at("objects").as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (!objectType || objectType == sle.getType())
{
jsonObjects.push_back(toJson(sle));
}
return true;
};
auto nextCursor = traverseOwnedNodes(
backend, accountID, *ledgerSequence, cursor, addToResponse);
if (nextCursor)
response["next_cursor"] = ripple::strHex(*nextCursor);
return response;
}

View File

@@ -0,0 +1,158 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
void
addOffer(boost::json::array& offersJson, ripple::SLE const& offer)
{
auto quality = getQuality(offer.getFieldH256(ripple::sfBookDirectory));
ripple::STAmount rate = ripple::amountFromQuality(quality);
ripple::STAmount takerPays = offer.getFieldAmount(ripple::sfTakerPays);
ripple::STAmount takerGets = offer.getFieldAmount(ripple::sfTakerGets);
boost::json::object obj;
if (!takerPays.native())
{
obj["taker_pays"] = boost::json::value(boost::json::object_kind);
boost::json::object& takerPaysJson = obj.at("taker_pays").as_object();
takerPaysJson["value"] = takerPays.getText();
takerPaysJson["currency"] = ripple::to_string(takerPays.getCurrency());
takerPaysJson["issuer"] = ripple::to_string(takerPays.getIssuer());
}
else
{
obj["taker_pays"] = takerPays.getText();
}
if (!takerGets.native())
{
obj["taker_gets"] = boost::json::value(boost::json::object_kind);
boost::json::object& takerGetsJson = obj.at("taker_gets").as_object();
takerGetsJson["value"] = takerGets.getText();
takerGetsJson["currency"] = ripple::to_string(takerGets.getCurrency());
takerGetsJson["issuer"] = ripple::to_string(takerGets.getIssuer());
}
else
{
obj["taker_gets"] = takerGets.getText();
}
obj["seq"] = offer.getFieldU32(ripple::sfSequence);
obj["flags"] = offer.getFieldU32(ripple::sfFlags);
obj["quality"] = rate.getText();
if (offer.isFieldPresent(ripple::sfExpiration))
obj["expiration"] = offer.getFieldU32(ripple::sfExpiration);
offersJson.push_back(obj);
};
boost::json::object
doAccountOffers(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
if (!request.contains("account"))
{
response["error"] = "Must contain account";
return response;
}
if (!request.at("account").is_string())
{
response["error"] = "Account must be a string";
return response;
}
ripple::AccountID accountID;
auto parsed = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!parsed)
{
response["error"] = "Invalid account";
return response;
}
accountID = *parsed;
std::uint32_t limit = 200;
if (request.contains("limit"))
{
if (!request.at("limit").is_int64())
{
response["error"] = "limit must be integer";
return response;
}
limit = request.at("limit").as_int64();
if (limit <= 0)
{
response["error"] = "limit must be positive";
return response;
}
}
ripple::uint256 cursor = beast::zero;
if (request.contains("cursor"))
{
if (!request.at("cursor").is_string())
{
response["error"] = "limit must be string";
return response;
}
auto bytes = ripple::strUnHex(request.at("cursor").as_string().c_str());
if (bytes and bytes->size() != 32)
{
response["error"] = "invalid cursor";
return response;
}
cursor = ripple::uint256::fromVoid(bytes->data());
}
response["offers"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonLines = response.at("offers").as_array();
auto const addToResponse = [&](ripple::SLE const& sle) {
if (sle.getType() == ripple::ltOFFER)
{
if (limit-- == 0)
{
return false;
}
addOffer(jsonLines, sle);
}
return true;
};
auto nextCursor = traverseOwnedNodes(
backend, accountID, *ledgerSequence, cursor, addToResponse);
if (nextCursor)
response["next_cursor"] = ripple::strHex(*nextCursor);
return response;
}

137
src/handlers/AccountTx.cpp Normal file
View File

@@ -0,0 +1,137 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
// {
// account: account,
// ledger_index_min: ledger_index // optional, defaults to earliest
// ledger_index_max: ledger_index, // optional, defaults to latest
// binary: boolean, // optional, defaults to false
// forward: boolean, // optional, defaults to false
// limit: integer, // optional
// marker: object {ledger: ledger_index, seq: txn_sequence} // optional,
// resume previous query
// }
boost::json::object
doAccountTx(boost::json::object const& request, BackendInterface const& backend)
{
boost::json::object response;
if (!request.contains("account"))
{
response["error"] = "Please specify an account";
return response;
}
auto account = ripple::parseBase58<ripple::AccountID>(
request.at("account").as_string().c_str());
if (!account)
{
account = ripple::AccountID();
if (!account->parseHex(request.at("account").as_string().c_str()))
{
response["error"] = "account malformed";
return response;
}
}
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
std::optional<Backend::AccountTransactionsCursor> cursor;
if (request.contains("cursor"))
{
auto const& obj = request.at("cursor").as_object();
std::optional<uint32_t> ledgerSequence;
if (obj.contains("ledger_sequence"))
{
ledgerSequence = (uint32_t)obj.at("ledger_sequence").as_int64();
}
std::optional<uint32_t> transactionIndex;
if (obj.contains("transaction_index"))
{
transactionIndex = (uint32_t)obj.at("transaction_index").as_int64();
}
if (!ledgerSequence || !transactionIndex)
{
response["error"] =
"malformed cursor. include transaction_index and "
"ledger_sequence in an object named \"cursor\"";
return response;
}
cursor = {*ledgerSequence, *transactionIndex};
}
uint32_t limit = 200;
if (request.contains("limit") and
request.at("limit").kind() == boost::json::kind::int64)
limit = request.at("limit").as_int64();
boost::json::array txns;
auto start = std::chrono::system_clock::now();
auto [blobs, retCursor] =
backend.fetchAccountTransactions(*account, limit, cursor);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
<< ((end - start).count() / 1000000000.0)
<< " num blobs = " << blobs.size();
for (auto const& txnPlusMeta : blobs)
{
if (txnPlusMeta.ledgerSequence > ledgerSequence)
{
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " skipping over transactions from incomplete ledger";
continue;
}
boost::json::object obj;
if (!binary)
{
auto [txn, meta] = deserializeTxPlusMeta(txnPlusMeta);
obj["transaction"] = toJson(*txn);
obj["metadata"] = toJson(*meta);
}
else
{
obj["transaction"] = ripple::strHex(txnPlusMeta.transaction);
obj["metadata"] = ripple::strHex(txnPlusMeta.metadata);
}
obj["ledger_sequence"] = txnPlusMeta.ledgerSequence;
txns.push_back(obj);
}
response["transactions"] = txns;
if (retCursor)
{
boost::json::object cursorJson;
cursorJson["ledger_sequence"] = retCursor->ledgerSequence;
cursorJson["transaction_index"] = retCursor->transactionIndex;
response["cursor"] = cursorJson;
}
auto end2 = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "
<< ((end2 - end).count() / 1000000000.0);
return response;
}

302
src/handlers/BookOffers.cpp Normal file
View File

@@ -0,0 +1,302 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/jss.h>
#include <boost/json.hpp>
#include <algorithm>
#include <backend/BackendInterface.h>
#include <backend/DBHelpers.h>
#include <handlers/RPCHelpers.h>
boost::json::object
doBookOffers(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
ripple::uint256 bookBase;
if (request.contains("book"))
{
if (!bookBase.parseHex(request.at("book").as_string().c_str()))
{
response["error"] = "Error parsing book";
return response;
}
}
else
{
if (!request.contains("taker_pays"))
{
response["error"] = "Missing field taker_pays";
return response;
}
if (!request.contains("taker_gets"))
{
response["error"] = "Missing field taker_gets";
return response;
}
boost::json::object taker_pays;
if (request.at("taker_pays").kind() == boost::json::kind::object)
{
taker_pays = request.at("taker_pays").as_object();
}
else
{
response["error"] = "Invalid field taker_pays";
return response;
}
boost::json::object taker_gets;
if (request.at("taker_gets").kind() == boost::json::kind::object)
{
taker_gets = request.at("taker_gets").as_object();
}
else
{
response["error"] = "Invalid field taker_gets";
return response;
}
if (!taker_pays.contains("currency"))
{
response["error"] = "Missing field taker_pays.currency";
return response;
}
if (!taker_pays.at("currency").is_string())
{
response["error"] = "taker_pays.currency should be string";
return response;
}
if (!taker_gets.contains("currency"))
{
response["error"] = "Missing field taker_gets.currency";
return response;
}
if (!taker_gets.at("currency").is_string())
{
response["error"] = "taker_gets.currency should be string";
return response;
}
ripple::Currency pay_currency;
if (!ripple::to_currency(
pay_currency, taker_pays.at("currency").as_string().c_str()))
{
response["error"] =
"Invalid field 'taker_pays.currency', bad currency.";
return response;
}
ripple::Currency get_currency;
if (!ripple::to_currency(
get_currency, taker_gets["currency"].as_string().c_str()))
{
response["error"] =
"Invalid field 'taker_gets.currency', bad currency.";
return response;
}
ripple::AccountID pay_issuer;
if (taker_pays.contains("issuer"))
{
if (!taker_pays.at("issuer").is_string())
{
response["error"] = "taker_pays.issuer should be string";
return response;
}
if (!ripple::to_issuer(
pay_issuer, taker_pays.at("issuer").as_string().c_str()))
{
response["error"] =
"Invalid field 'taker_pays.issuer', bad issuer.";
return response;
}
if (pay_issuer == ripple::noAccount())
{
response["error"] =
"Invalid field 'taker_pays.issuer', bad issuer account "
"one.";
return response;
}
}
else
{
pay_issuer = ripple::xrpAccount();
}
if (isXRP(pay_currency) && !isXRP(pay_issuer))
{
response["error"] =
"Unneeded field 'taker_pays.issuer' for XRP currency "
"specification.";
return response;
}
if (!isXRP(pay_currency) && isXRP(pay_issuer))
{
response["error"] =
"Invalid field 'taker_pays.issuer', expected non-XRP issuer.";
return response;
}
ripple::AccountID get_issuer;
if (taker_gets.contains("issuer"))
{
if (!taker_gets["issuer"].is_string())
{
response["error"] = "taker_gets.issuer should be string";
return response;
}
if (!ripple::to_issuer(
get_issuer, taker_gets.at("issuer").as_string().c_str()))
{
response["error"] =
"Invalid field 'taker_gets.issuer', bad issuer.";
return response;
}
if (get_issuer == ripple::noAccount())
{
response["error"] =
"Invalid field 'taker_gets.issuer', bad issuer account "
"one.";
return response;
}
}
else
{
get_issuer = ripple::xrpAccount();
}
if (ripple::isXRP(get_currency) && !ripple::isXRP(get_issuer))
{
response["error"] =
"Unneeded field 'taker_gets.issuer' for XRP currency "
"specification.";
return response;
}
if (!ripple::isXRP(get_currency) && ripple::isXRP(get_issuer))
{
response["error"] =
"Invalid field 'taker_gets.issuer', expected non-XRP issuer.";
return response;
}
if (pay_currency == get_currency && pay_issuer == get_issuer)
{
response["error"] = "Bad market";
return response;
}
ripple::Book book = {
{pay_currency, pay_issuer}, {get_currency, get_issuer}};
bookBase = getBookBase(book);
}
std::uint32_t limit = 200;
if (request.contains("limit") and
request.at("limit").kind() == boost::json::kind::int64)
limit = request.at("limit").as_int64();
std::optional<ripple::AccountID> takerID = {};
if (request.contains("taker"))
{
if (!request.at("taker").is_string())
{
response["error"] = "Taker account must be string";
return response;
}
takerID =
accountFromStringStrict(request.at("taker").as_string().c_str());
if (!takerID)
{
response["error"] = "Invalid taker account";
return response;
}
}
std::optional<ripple::uint256> cursor;
if (request.contains("cursor"))
{
cursor = ripple::uint256{};
if (!cursor->parseHex(request.at("cursor").as_string().c_str()))
{
response["error"] = "Bad cursor";
return response;
}
}
auto start = std::chrono::system_clock::now();
auto [offers, retCursor, warning] =
backend.fetchBookOffers(bookBase, *ledgerSequence, limit, cursor);
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning)
<< "Time loading books: " << ((end - start).count() / 1000000000.0);
if (warning)
response["warning"] = *warning;
response["offers"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonOffers = response.at("offers").as_array();
start = std::chrono::system_clock::now();
for (auto const& obj : offers)
{
if (jsonOffers.size() == limit)
break;
try
{
ripple::SerialIter it{obj.blob.data(), obj.blob.size()};
ripple::SLE offer{it, obj.key};
ripple::uint256 bookDir =
offer.getFieldH256(ripple::sfBookDirectory);
boost::json::object offerJson = toJson(offer);
offerJson["quality"] =
ripple::amountFromQuality(getQuality(bookDir)).getText();
jsonOffers.push_back(offerJson);
}
catch (std::exception const& e)
{
}
}
end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning) << "Time transforming to json: "
<< ((end - start).count() / 1000000000.0);
if (retCursor)
response["cursor"] = ripple::strHex(*retCursor);
if (warning)
response["warning"] =
"Periodic database update in progress. Data for this book as of "
"this ledger "
"may be incomplete. Data should be complete within one minute";
return response;
}

View File

@@ -0,0 +1,109 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/PayChan.h>
#include <ripple/protocol/STAccount.h>
#include <ripple/protocol/jss.h>
#include <ripple/resource/Fees.h>
#include <handlers/RPCHelpers.h>
#include <optional>
void
serializePayChanAuthorization(
ripple::Serializer& msg,
ripple::uint256 const& key,
ripple::XRPAmount const& amt)
{
msg.add32(ripple::HashPrefix::paymentChannelClaim);
msg.addBitString(key);
msg.add64(amt.drops());
}
boost::json::object
doChannelAuthorize(boost::json::object const& request)
{
boost::json::object response;
if(!request.contains("channel_id"))
{
response["error"] = "missing field channel_id";
return response;
}
if(!request.contains("amount"))
{
response["error"] = "missing field amount";
return response;
}
if (!request.contains("key_type") && !request.contains("secret"))
{
response["error"] = "missing field secret";
return response;
}
boost::json::value error = nullptr;
auto const [pk, sk] = keypairFromRequst(request, error);
if (!error.is_null())
{
response["error"] = error;
return response;
}
ripple::uint256 channelId;
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
{
response["error"] = "channel id malformed";
return response;
}
if (!request.at("amount").is_string())
{
response["error"] = "channel amount malformed";
return response;
}
auto optDrops =
ripple::to_uint64(request.at("amount").as_string().c_str());
if (!optDrops)
{
response["error"] = "could not parse channel amount";
return response;
}
std::uint64_t drops = *optDrops;
ripple::Serializer msg;
ripple::serializePayChanAuthorization(msg, channelId, ripple::XRPAmount(drops));
try
{
auto const buf = ripple::sign(pk, sk, msg.slice());
response["signature"] = ripple::strHex(buf);
}
catch (std::exception&)
{
response["error"] = "Exception occurred during signing.";
return response;
}
return response;
}

View File

@@ -0,0 +1,119 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/PayChan.h>
#include <ripple/protocol/STAccount.h>
#include <ripple/protocol/jss.h>
#include <ripple/resource/Fees.h>
#include <handlers/RPCHelpers.h>
#include <optional>
boost::json::object
doChannelVerify(boost::json::object const& request)
{
boost::json::object response;
if(!request.contains("channel_id"))
{
response["error"] = "missing field channel_id";
return response;
}
if(!request.contains("amount"))
{
response["error"] = "missing field amount";
return response;
}
if (!request.contains("signature"))
{
response["error"] = "missing field signature";
return response;
}
if (!request.contains("public_key"))
{
response["error"] = "missing field public_key";
return response;
}
boost::optional<ripple::PublicKey> pk;
{
std::string const strPk = request.at("public_key").as_string().c_str();
pk = ripple::parseBase58<ripple::PublicKey>(ripple::TokenType::AccountPublic, strPk);
if (!pk)
{
auto pkHex = ripple::strUnHex(strPk);
if (!pkHex)
{
response["error"] = "malformed public key";
return response;
}
auto const pkType = ripple::publicKeyType(ripple::makeSlice(*pkHex));
if (!pkType)
{
response["error"] = "invalid key type";
}
pk.emplace(ripple::makeSlice(*pkHex));
}
}
ripple::uint256 channelId;
if (!channelId.parseHex(request.at("channel_id").as_string().c_str()))
{
response["error"] = "channel id malformed";
return response;
}
auto optDrops =
ripple::to_uint64(request.at("amount").as_string().c_str());
if (!optDrops)
{
response["error"] = "could not parse channel amount";
return response;
}
std::uint64_t drops = *optDrops;
if (!request.at("signature").is_string())
{
response["error"] = "signature must be type string";
return response;
}
auto sig = ripple::strUnHex(request.at("signature").as_string().c_str());
if (!sig || !sig->size())
{
response["error"] = "Invalid signature";
return response;
}
ripple::Serializer msg;
ripple::serializePayChanAuthorization(msg, channelId, ripple::XRPAmount(drops));
response["signature_verified"] =
ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true);
return response;
}

89
src/handlers/Ledger.cpp Normal file
View File

@@ -0,0 +1,89 @@
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
boost::json::object
doLedger(boost::json::object const& request, BackendInterface const& backend)
{
boost::json::object response;
if (!request.contains("ledger_index"))
{
response["error"] = "Please specify a ledger index";
return response;
}
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
bool getTransactions = request.contains("transactions")
? request.at("transactions").as_bool()
: false;
bool expand =
request.contains("expand") ? request.at("expand").as_bool() : false;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
auto lgrInfo = backend.fetchLedgerBySequence(*ledgerSequence);
if (!lgrInfo)
{
response["error"] = "ledger not found";
return response;
}
boost::json::object header;
if (binary)
{
header["blob"] = ripple::strHex(ledgerInfoToBlob(*lgrInfo));
}
else
{
header = toJson(*lgrInfo);
}
response["header"] = header;
if (getTransactions)
{
response["transactions"] = boost::json::value(boost::json::array_kind);
boost::json::array& jsonTransactions =
response.at("transactions").as_array();
if (expand)
{
auto txns = backend.fetchAllTransactionsInLedger(*ledgerSequence);
std::transform(
std::move_iterator(txns.begin()),
std::move_iterator(txns.end()),
std::back_inserter(jsonTransactions),
[binary](auto obj) {
boost::json::object entry;
if (!binary)
{
auto [sttx, meta] = deserializeTxPlusMeta(obj);
entry["transaction"] = toJson(*sttx);
entry["metadata"] = toJson(*meta);
}
else
{
entry["transaction"] = ripple::strHex(obj.transaction);
entry["metadata"] = ripple::strHex(obj.metadata);
}
entry["ledger_sequence"] = obj.ledgerSequence;
return entry;
});
}
else
{
auto hashes =
backend.fetchAllTransactionHashesInLedger(*ledgerSequence);
std::transform(
std::move_iterator(hashes.begin()),
std::move_iterator(hashes.end()),
std::back_inserter(jsonTransactions),
[](auto hash) {
boost::json::object entry;
entry["hash"] = ripple::strHex(hash);
return entry;
});
}
}
return response;
}

185
src/handlers/LedgerData.cpp Normal file
View File

@@ -0,0 +1,185 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/ledger/LedgerToJson.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <boost/json.hpp>
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
// Get state nodes from a ledger
// Inputs:
// limit: integer, maximum number of entries
// marker: opaque, resume point
// binary: boolean, format
// type: string // optional, defaults to all ledger node types
// Outputs:
// ledger_hash: chosen ledger's hash
// ledger_index: chosen ledger's index
// state: array of state nodes
// marker: resume point, if any
//
//
boost::json::object
doLedgerData(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
auto ledger = backend.fetchLedgerBySequence(*ledgerSequence);
if (!ledger)
{
response["error"] = "Ledger not found";
return response;
}
std::optional<ripple::uint256> cursor;
if (request.contains("cursor"))
{
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing cursor";
cursor = ripple::uint256{};
if (!cursor->parseHex(request.at("cursor").as_string().c_str()))
{
response["error"] = "Invalid cursor";
response["request"] = request;
return response;
}
}
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
size_t limit = request.contains("limit") ? request.at("limit").as_int64()
: (binary ? 2048 : 256);
Backend::LedgerPage page;
auto start = std::chrono::system_clock::now();
page = backend.fetchLedgerPage(cursor, *ledgerSequence, limit);
auto end = std::chrono::system_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
boost::json::array objects;
std::vector<Backend::LedgerObject>& results = page.objects;
std::optional<ripple::uint256> const& returnedCursor = page.cursor;
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " number of results = " << results.size();
for (auto const& [key, object] : results)
{
ripple::STLedgerEntry sle{
ripple::SerialIter{object.data(), object.size()}, key};
if (binary)
{
boost::json::object entry;
entry["data"] = ripple::serializeHex(sle);
entry["index"] = ripple::to_string(sle.key());
objects.push_back(entry);
}
else
objects.push_back(toJson(sle));
}
response["objects"] = objects;
if (returnedCursor)
response["cursor"] = ripple::strHex(*returnedCursor);
response["num_results"] = results.size();
response["db_time"] = time;
response["time_per_result"] = time / (results.size() ? results.size() : 1);
if (page.warning)
{
response["warning"] =
"Periodic database update in progress. Data for this ledger may be "
"incomplete. Data should be complete "
"within a few minutes. Other RPC calls are not affected, "
"regardless of ledger. This "
"warning is only present on the first "
"page of the ledger";
}
return response;
}
/*
std::pair<org::xrpl::rpc::v1::GetLedgerDataResponse, grpc::Status>
doLedgerDataGrpc(
RPC::GRPCContext<org::xrpl::rpc::v1::GetLedgerDataRequest>& context)
{
org::xrpl::rpc::v1::GetLedgerDataRequest& request = context.params;
org::xrpl::rpc::v1::GetLedgerDataResponse response;
grpc::Status status = grpc::Status::OK;
std::shared_ptr<ReadView const> ledger;
if (RPC::ledgerFromRequest(ledger, context))
{
grpc::Status errorStatus{
grpc::StatusCode::NOT_FOUND, "ledger not found"};
return {response, errorStatus};
}
ReadView::key_type key = ReadView::key_type();
if (request.marker().size() != 0)
{
key = uint256::fromVoid(request.marker().data());
if (key.size() != request.marker().size())
{
grpc::Status errorStatus{
grpc::StatusCode::INVALID_ARGUMENT, "marker malformed"};
return {response, errorStatus};
}
}
auto e = ledger->sles.end();
ReadView::key_type stopKey = ReadView::key_type();
if (request.end_marker().size() != 0)
{
stopKey = uint256::fromVoid(request.end_marker().data());
if (stopKey.size() != request.marker().size())
{
grpc::Status errorStatus{
grpc::StatusCode::INVALID_ARGUMENT, "end marker malformed"};
return {response, errorStatus};
}
e = ledger->sles.upper_bound(stopKey);
}
int maxLimit = RPC::Tuning::pageLength(true);
for (auto i = ledger->sles.upper_bound(key); i != e; ++i)
{
auto sle = ledger->read(keylet::unchecked((*i)->key()));
if (maxLimit-- <= 0)
{
// Stop processing before the current key.
auto k = sle->key();
--k;
response.set_marker(k.data(), k.size());
break;
}
auto stateObject = response.mutable_ledger_objects()->add_objects();
Serializer s;
sle->add(s);
stateObject->set_data(s.peekData().data(), s.getLength());
stateObject->set_key(sle->key().data(), sle->key().size());
}
return {response, status};
}
*/

View File

@@ -0,0 +1,55 @@
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <boost/json.hpp>
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
// {
// ledger_hash : <ledger>
// ledger_index : <ledger_index>
// ...
// }
boost::json::object
doLedgerEntry(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
auto ledgerSequence = ledgerSequenceFromRequest(request, backend);
if (!ledgerSequence)
{
response["error"] = "Empty database";
return response;
}
ripple::uint256 key;
if (!key.parseHex(request.at("index").as_string().c_str()))
{
response["error"] = "Error parsing index";
return response;
}
auto start = std::chrono::system_clock::now();
auto dbResponse = backend.fetchLedgerObject(key, *ledgerSequence);
auto end = std::chrono::system_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
if (!dbResponse or dbResponse->size() == 0)
{
response["error"] = "Object not found";
return response;
}
if (binary)
{
response["object"] = ripple::strHex(*dbResponse);
}
else
{
ripple::STLedgerEntry sle{
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
response["object"] = toJson(sle);
}
return response;
}

View File

@@ -0,0 +1,22 @@
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
boost::json::object
doLedgerRange(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto range = backend.fetchLedgerRange();
if (!range)
{
response["error"] = "No data";
}
else
{
response["ledger_index_min"] = range->minSequence;
response["ledger_index_max"] = range->maxSequence;
}
return response;
}

423
src/handlers/RPCHelpers.cpp Normal file
View File

@@ -0,0 +1,423 @@
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
std::optional<ripple::AccountID>
accountFromStringStrict(std::string const& account)
{
auto blob = ripple::strUnHex(account);
boost::optional<ripple::PublicKey> publicKey = {};
if (blob && ripple::publicKeyType(ripple::makeSlice(*blob)))
{
publicKey =
ripple::PublicKey(ripple::Slice{blob->data(), blob->size()});
}
else
{
publicKey = ripple::parseBase58<ripple::PublicKey>(
ripple::TokenType::AccountPublic, account);
}
boost::optional<ripple::AccountID> result;
if (publicKey)
result = ripple::calcAccountID(*publicKey);
else
result = ripple::parseBase58<ripple::AccountID>(account);
if (result)
return result.value();
else
return {};
}
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::STObject const>>
deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs)
{
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::STObject const>>
result;
{
ripple::SerialIter s{
blobs.transaction.data(), blobs.transaction.size()};
result.first = std::make_shared<ripple::STTx const>(s);
}
{
ripple::SerialIter s{blobs.metadata.data(), blobs.metadata.size()};
result.second =
std::make_shared<ripple::STObject const>(s, ripple::sfMetadata);
}
return result;
}
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::TxMeta const>>
deserializeTxPlusMeta(
Backend::TransactionAndMetadata const& blobs,
std::uint32_t seq)
{
auto [tx, meta] = deserializeTxPlusMeta(blobs);
std::shared_ptr<ripple::TxMeta> m =
std::make_shared<ripple::TxMeta>(
tx->getTransactionID(),
seq,
*meta);
return {tx, m};
}
boost::json::object
toJson(ripple::STBase const& obj)
{
auto start = std::chrono::system_clock::now();
boost::json::value value = boost::json::parse(
obj.getJson(ripple::JsonOptions::none).toStyledString());
auto end = std::chrono::system_clock::now();
value.as_object()["deserialization_time_microseconds"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
return value.as_object();
}
boost::json::value
getJson(Json::Value const& value)
{
boost::json::value boostValue =
boost::json::parse(value.toStyledString());
return boostValue;
}
boost::json::object
toJson(ripple::TxMeta const& meta)
{
auto start = std::chrono::system_clock::now();
boost::json::value value = boost::json::parse(
meta.getJson(ripple::JsonOptions::none).toStyledString());
auto end = std::chrono::system_clock::now();
value.as_object()["deserialization_time_microseconds"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
return value.as_object();
}
boost::json::object
toJson(ripple::SLE const& sle)
{
auto start = std::chrono::system_clock::now();
boost::json::value value = boost::json::parse(
sle.getJson(ripple::JsonOptions::none).toStyledString());
auto end = std::chrono::system_clock::now();
value.as_object()["deserialization_time_microseconds"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
return value.as_object();
}
boost::json::value
toBoostJson(RippledJson const& value)
{
return boost::json::parse(value.toStyledString());
}
boost::json::object
toJson(ripple::LedgerInfo const& lgrInfo)
{
boost::json::object header;
header["ledger_sequence"] = lgrInfo.seq;
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
header["txns_hash"] = ripple::strHex(lgrInfo.txHash);
header["state_hash"] = ripple::strHex(lgrInfo.accountHash);
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
header["total_coins"] = ripple::to_string(lgrInfo.drops);
header["close_flags"] = lgrInfo.closeFlags;
// Always show fields that contribute to the ledger hash
header["parent_close_time"] =
lgrInfo.parentCloseTime.time_since_epoch().count();
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
header["close_time_resolution"] = lgrInfo.closeTimeResolution.count();
return header;
}
std::optional<uint32_t>
ledgerSequenceFromRequest(
boost::json::object const& request,
BackendInterface const& backend)
{
if (not request.contains("ledger_index"))
{
return backend.fetchLatestLedgerSequence();
}
else
{
return request.at("ledger_index").as_int64();
}
}
std::optional<ripple::uint256>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::AccountID const& accountID,
std::uint32_t sequence,
ripple::uint256 const& cursor,
std::function<bool(ripple::SLE)> atOwnedNode)
{
auto const rootIndex = ripple::keylet::ownerDir(accountID);
auto currentIndex = rootIndex;
std::vector<ripple::uint256> keys;
std::optional<ripple::uint256> nextCursor = {};
auto start = std::chrono::system_clock::now();
for (;;)
{
auto ownedNode = backend.fetchLedgerObject(currentIndex.key, sequence);
if (!ownedNode)
{
throw std::runtime_error("Could not find owned node");
}
ripple::SerialIter it{ownedNode->data(), ownedNode->size()};
ripple::SLE dir{it, currentIndex.key};
for (auto const& key : dir.getFieldV256(ripple::sfIndexes))
{
if (key >= cursor)
keys.push_back(key);
}
auto const uNodeNext = dir.getFieldU64(ripple::sfIndexNext);
if (uNodeNext == 0)
break;
currentIndex = ripple::keylet::page(rootIndex, uNodeNext);
}
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time loading owned directories: "
<< ((end - start).count() / 1000000000.0);
start = std::chrono::system_clock::now();
auto objects = backend.fetchLedgerObjects(keys, sequence);
end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(debug) << "Time loading owned entries: "
<< ((end - start).count() / 1000000000.0);
for (auto i = 0; i < objects.size(); ++i)
{
ripple::SerialIter it{objects[i].data(), objects[i].size()};
ripple::SLE sle(it, keys[i]);
if (!atOwnedNode(sle))
{
nextCursor = keys[i + 1];
break;
}
}
return nextCursor;
}
boost::optional<ripple::Seed>
parseRippleLibSeed(boost::json::value const& value)
{
// ripple-lib encodes seed used to generate an Ed25519 wallet in a
// non-standard way. While rippled never encode seeds that way, we
// try to detect such keys to avoid user confusion.
if (!value.is_string())
return {};
auto const result = ripple::decodeBase58Token(
value.as_string().c_str(), ripple::TokenType::None);
if (result.size() == 18 &&
static_cast<std::uint8_t>(result[0]) == std::uint8_t(0xE1) &&
static_cast<std::uint8_t>(result[1]) == std::uint8_t(0x4B))
return ripple::Seed(ripple::makeSlice(result.substr(2)));
return {};
}
std::pair<ripple::PublicKey, ripple::SecretKey>
keypairFromRequst(boost::json::object const& request, boost::json::value& error)
{
bool const has_key_type = request.contains("key_type");
// All of the secret types we allow, but only one at a time.
// The array should be constexpr, but that makes Visual Studio unhappy.
static std::string const secretTypes[]{
"passphrase", "secret", "seed", "seed_hex"};
// Identify which secret type is in use.
std::string secretType = "";
int count = 0;
for (auto t : secretTypes)
{
if (request.contains(t))
{
++count;
secretType = t;
}
}
if (count == 0)
{
error = "missing field secret";
return {};
}
if (count > 1)
{
error =
"Exactly one of the following must be specified: "
" passphrase, secret, seed, or seed_hex";
return {};
}
boost::optional<ripple::KeyType> keyType;
boost::optional<ripple::Seed> seed;
if (has_key_type)
{
if (!request.at("key_type").is_string())
{
error = "key_type must be string";
return {};
}
std::string key_type = request.at("key_type").as_string().c_str();
keyType = ripple::keyTypeFromString(key_type);
if (!keyType)
{
error = "Invalid field key_type";
return {};
}
if (secretType == "secret")
{
error = "The secret field is not allowed if key_type is used.";
return {};
}
}
// ripple-lib encodes seed used to generate an Ed25519 wallet in a
// non-standard way. While we never encode seeds that way, we try
// to detect such keys to avoid user confusion.
if (secretType != "seed_hex")
{
seed = parseRippleLibSeed(request.at(secretType));
if (seed)
{
// If the user passed in an Ed25519 seed but *explicitly*
// requested another key type, return an error.
if (keyType.value_or(ripple::KeyType::ed25519) !=
ripple::KeyType::ed25519)
{
error = "Specified seed is for an Ed25519 wallet.";
return {};
}
keyType = ripple::KeyType::ed25519;
}
}
if (!keyType)
keyType = ripple::KeyType::secp256k1;
if (!seed)
{
if (has_key_type)
{
if (!request.at(secretType).is_string())
{
error = "secret value must be string";
return {};
}
std::string key = request.at(secretType).as_string().c_str();
if (secretType == "seed")
seed = ripple::parseBase58<ripple::Seed>(key);
else if (secretType == "passphrase")
seed = ripple::parseGenericSeed(key);
else if (secretType == "seed_hex")
{
ripple::uint128 s;
if (s.parseHex(key))
seed.emplace(ripple::Slice(s.data(), s.size()));
}
}
else
{
if (!request.at("secret").is_string())
{
error = "field secret should be a string";
return {};
}
std::string secret = request.at("secret").as_string().c_str();
seed = ripple::parseGenericSeed(secret);
}
}
if (!seed)
{
error = "Bad Seed: invalid field message secretType";
return {};
}
if (keyType != ripple::KeyType::secp256k1 &&
keyType != ripple::KeyType::ed25519)
{
error = "keypairForSignature: invalid key type";
return {};
}
return generateKeyPair(*keyType, *seed);
}
std::vector<ripple::AccountID>
getAccountsFromTransaction(boost::json::object const& transaction)
{
std::vector<ripple::AccountID> accounts = {};
for (auto const& [key, value] : transaction)
{
if (value.is_object())
{
auto inObject = getAccountsFromTransaction(value.as_object());
accounts.insert(accounts.end(), inObject.begin(), inObject.end());
}
else if (value.is_string())
{
auto account = accountFromStringStrict(value.as_string().c_str());
if (account)
{
accounts.push_back(*account);
}
}
}
return accounts;
}
std::vector<unsigned char>
ledgerInfoToBlob(ripple::LedgerInfo const& info)
{
ripple::Serializer s;
s.add32(info.seq);
s.add64(info.drops.drops());
s.addBitString(info.parentHash);
s.addBitString(info.txHash);
s.addBitString(info.accountHash);
s.add32(info.parentCloseTime.time_since_epoch().count());
s.add32(info.closeTime.time_since_epoch().count());
s.add8(info.closeTimeResolution.count());
s.add8(info.closeFlags);
s.addBitString(info.hash);
return s.peekData();
}

65
src/handlers/RPCHelpers.h Normal file
View File

@@ -0,0 +1,65 @@
#ifndef XRPL_REPORTING_RPCHELPERS_H_INCLUDED
#define XRPL_REPORTING_RPCHELPERS_H_INCLUDED
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/STTx.h>
#include <boost/json.hpp>
#include <backend/BackendInterface.h>
std::optional<ripple::AccountID>
accountFromStringStrict(std::string const& account);
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::STObject const>>
deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs);
std::pair<
std::shared_ptr<ripple::STTx const>,
std::shared_ptr<ripple::TxMeta const>>
deserializeTxPlusMeta(
Backend::TransactionAndMetadata const& blobs,
std::uint32_t seq);
boost::json::object
toJson(ripple::STBase const& obj);
boost::json::object
toJson(ripple::SLE const& sle);
boost::json::object
toJson(ripple::LedgerInfo const& info);
boost::json::object
toJson(ripple::TxMeta const& meta);
using RippledJson = Json::Value;
boost::json::value
toBoostJson(RippledJson const& value);
std::optional<uint32_t>
ledgerSequenceFromRequest(
boost::json::object const& request,
BackendInterface const& backend);
std::optional<ripple::uint256>
traverseOwnedNodes(
BackendInterface const& backend,
ripple::AccountID const& accountID,
std::uint32_t sequence,
ripple::uint256 const& cursor,
std::function<bool(ripple::SLE)> atOwnedNode);
std::pair<ripple::PublicKey, ripple::SecretKey>
keypairFromRequst(
boost::json::object const& request,
boost::json::value& error);
std::vector<ripple::AccountID>
getAccountsFromTransaction(boost::json::object const& transaction);
std::vector<unsigned char>
ledgerInfoToBlob(ripple::LedgerInfo const& info);
#endif

View File

@@ -0,0 +1,53 @@
#include <handlers/RPCHelpers.h>
#include <backend/BackendInterface.h>
boost::json::object
doServerInfo(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto rng = backend.fetchLedgerRange();
if (!rng)
{
response["complete_ledgers"] = "empty";
}
else
{
std::string completeLedgers = std::to_string(rng->minSequence);
if (rng->maxSequence != rng->minSequence)
completeLedgers += "-" + std::to_string(rng->maxSequence);
response["complete_ledgers"] = completeLedgers;
}
if (rng)
{
auto lgrInfo = backend.fetchLedgerBySequence(rng->maxSequence);
response["validated_ledger"] = toJson(*lgrInfo);
}
boost::json::array indexes;
if (rng)
{
uint32_t cur = rng->minSequence;
while (cur <= rng->maxSequence + 1)
{
auto keyIndex = backend.getKeyIndexOfSeq(cur);
assert(keyIndex.has_value());
cur = keyIndex->keyIndex;
boost::json::object entry;
entry["complete"] = backend.isLedgerIndexed(cur);
entry["sequence"] = cur;
indexes.emplace_back(entry);
cur = cur + 1;
}
}
response["indexes"] = indexes;
auto indexing = backend.getIndexer().getCurrentlyIndexing();
if (indexing)
response["indexing"] = *indexing;
else
response["indexing"] = "none";
return response;
}

331
src/handlers/Subscribe.cpp Normal file
View File

@@ -0,0 +1,331 @@
#include <boost/json.hpp>
#include <handlers/RPCHelpers.h>
#include <server/WsBase.h>
#include <server/SubscriptionManager.h>
static std::unordered_set<std::string> validStreams{
"ledger",
"transactions",
"transactions_proposed"};
boost::json::value
validateStreams(boost::json::object const& request)
{
if (!request.at("streams").is_array())
{
return "missing or invalid streams";
}
boost::json::array const& streams = request.at("streams").as_array();
for (auto const& stream : streams)
{
if (!stream.is_string())
{
return "streams must be strings";
}
std::string s = stream.as_string().c_str();
if (validStreams.find(s) == validStreams.end())
{
return boost::json::string("invalid stream " + s);
}
}
return nullptr;
}
void
subscribeToStreams(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& streams = request.at("streams").as_array();
for (auto const& stream : streams)
{
std::string s = stream.as_string().c_str();
if (s == "ledger")
manager.subLedger(session);
else if (s == "transactions")
manager.subTransactions(session);
else if (s == "transactions_proposed")
manager.subProposedTransactions(session);
else
assert(false);
}
}
void
unsubscribeToStreams(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& streams = request.at("streams").as_array();
for (auto const& stream : streams)
{
std::string s = stream.as_string().c_str();
if (s == "ledger")
manager.unsubLedger(session);
else if (s == "transactions")
manager.unsubTransactions(session);
else if (s == "transactions_proposed")
manager.unsubProposedTransactions(session);
else
assert(false);
}
}
boost::json::value
validateAccounts(
boost::json::object const& request,
boost::json::array const& accounts)
{
for (auto const& account : accounts)
{
if (!account.is_string())
{
return "account must be strings";
}
std::string s = account.as_string().c_str();
auto id = accountFromStringStrict(s);
if (!id)
{
return boost::json::string("invalid account " + s);
}
}
return nullptr;
}
void
subscribeToAccounts(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& accounts = request.at("accounts").as_array();
for (auto const& account : accounts)
{
std::string s = account.as_string().c_str();
auto accountID = accountFromStringStrict(s);
if (!accountID)
{
assert(false);
continue;
}
manager.subAccount(*accountID, session);
}
}
void
unsubscribeToAccounts(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& accounts = request.at("accounts").as_array();
for (auto const& account : accounts)
{
std::string s = account.as_string().c_str();
auto accountID = accountFromStringStrict(s);
if (!accountID)
{
assert(false);
continue;
}
manager.unsubAccount(*accountID, session);
}
}
void
subscribeToAccountsProposed(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& accounts =
request.at("accounts_proposed").as_array();
for (auto const& account : accounts)
{
std::string s = account.as_string().c_str();
auto accountID = ripple::parseBase58<ripple::AccountID>(s);
if (!accountID)
{
assert(false);
continue;
}
manager.subProposedAccount(*accountID, session);
}
}
void
unsubscribeToAccountsProposed(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::array const& accounts =
request.at("accounts_proposed").as_array();
for (auto const& account : accounts)
{
std::string s = account.as_string().c_str();
auto accountID = ripple::parseBase58<ripple::AccountID>(s);
if (!accountID)
{
assert(false);
continue;
}
manager.unsubProposedAccount(*accountID, session);
}
}
boost::json::object
doSubscribe(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::object response;
if (request.contains("streams"))
{
boost::json::value error = validateStreams(request);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("accounts"))
{
if (!request.at("accounts").is_array())
{
response["error"] = "accounts must be array";
return response;
}
boost::json::array accounts = request.at("accounts").as_array();
boost::json::value error = validateAccounts(request, accounts);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("accounts_proposed"))
{
if (!request.at("accounts_proposed").is_array())
{
response["error"] = "accounts_proposed must be array";
return response;
}
boost::json::array accounts =
request.at("accounts_proposed").as_array();
boost::json::value error = validateAccounts(request, accounts);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("streams"))
subscribeToStreams(request, session, manager);
if (request.contains("accounts"))
subscribeToAccounts(request, session, manager);
if (request.contains("accounts_proposed"))
subscribeToAccountsProposed(request, session, manager);
response["status"] = "success";
return response;
}
boost::json::object
doUnsubscribe(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager)
{
boost::json::object response;
if (request.contains("streams"))
{
boost::json::value error = validateStreams(request);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("accounts"))
{
boost::json::array accounts = request.at("accounts").as_array();
boost::json::value error = validateAccounts(request, accounts);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("accounts_proposed"))
{
boost::json::array accounts =
request.at("accounts_proposed").as_array();
boost::json::value error = validateAccounts(request, accounts);
if (!error.is_null())
{
response["error"] = error;
return response;
}
}
if (request.contains("streams"))
unsubscribeToStreams(request, session, manager);
if (request.contains("accounts"))
unsubscribeToAccounts(request, session, manager);
if (request.contains("accounts_proposed"))
unsubscribeToAccountsProposed(request, session, manager);
response["status"] = "success";
return response;
}

76
src/handlers/Tx.cpp Normal file
View File

@@ -0,0 +1,76 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <backend/BackendInterface.h>
#include <handlers/RPCHelpers.h>
// {
// transaction: <hex>
// }
boost::json::object
doTx(boost::json::object const& request, BackendInterface const& backend)
{
boost::json::object response;
if (!request.contains("transaction"))
{
response["error"] = "Please specify a transaction hash";
return response;
}
ripple::uint256 hash;
if (!hash.parseHex(request.at("transaction").as_string().c_str()))
{
response["error"] = "Error parsing transaction hash";
return response;
}
auto range = backend.fetchLedgerRange();
if (!range)
{
response["error"] = "Database is empty";
return response;
}
auto dbResponse = backend.fetchTransaction(hash);
if (!dbResponse)
{
response["error"] = "Transaction not found in Cassandra";
response["ledger_range"] = std::to_string(range->minSequence) + " - " +
std::to_string(range->maxSequence);
return response;
}
bool binary =
request.contains("binary") ? request.at("binary").as_bool() : false;
if (!binary)
{
auto [sttx, meta] = deserializeTxPlusMeta(dbResponse.value());
response["transaction"] = toJson(*sttx);
response["metadata"] = toJson(*meta);
}
else
{
response["transaction"] = ripple::strHex(dbResponse->transaction);
response["metadata"] = ripple::strHex(dbResponse->metadata);
}
response["ledger_sequence"] = dbResponse->ledgerSequence;
return response;
}

90
src/server/DOSGuard.h Normal file
View File

@@ -0,0 +1,90 @@
#ifndef RIPPLE_REPORTING_DOS_GUARD_H
#define RIPPLE_REPORTING_DOS_GUARD_H
#include <boost/asio.hpp>
#include <string>
#include <unordered_map>
#include <unordered_set>
class DOSGuard
{
std::unordered_map<std::string, uint32_t> ipFetchCount_;
uint32_t maxFetches_ = 100;
uint32_t sweepInterval_ = 1;
std::unordered_set<std::string> whitelist_;
boost::asio::io_context& ctx_;
std::mutex mtx_;
public:
DOSGuard(boost::json::object const& config, boost::asio::io_context& ctx)
: ctx_(ctx)
{
if (config.contains("dos_guard"))
{
auto dosGuardConfig = config.at("dos_guard").as_object();
if (dosGuardConfig.contains("max_fetches") &&
dosGuardConfig.contains("sweep_interval"))
{
maxFetches_ = dosGuardConfig.at("max_fetches").as_int64();
sweepInterval_ = dosGuardConfig.at("sweep_interval").as_int64();
}
if (dosGuardConfig.contains("whitelist"))
{
auto whitelist = dosGuardConfig.at("whitelist").as_array();
for (auto& ip : whitelist)
whitelist_.insert(ip.as_string().c_str());
}
}
createTimer();
}
void
createTimer()
{
auto wait = std::chrono::seconds(sweepInterval_);
std::shared_ptr<boost::asio::steady_timer> timer =
std::make_shared<boost::asio::steady_timer>(
ctx_, std::chrono::steady_clock::now() + wait);
timer->async_wait(
[timer, this](const boost::system::error_code& error) {
clear();
createTimer();
});
}
bool
isOk(std::string const& ip)
{
if (whitelist_.count(ip) > 0)
return true;
std::unique_lock lck(mtx_);
auto it = ipFetchCount_.find(ip);
if (it == ipFetchCount_.end())
return true;
return it->second < maxFetches_;
}
bool
add(std::string const& ip, uint32_t numObjects)
{
if (whitelist_.count(ip) > 0)
return true;
{
std::unique_lock lck(mtx_);
auto it = ipFetchCount_.find(ip);
if (it == ipFetchCount_.end())
ipFetchCount_[ip] = numObjects;
else
it->second += numObjects;
}
return isOk(ip);
}
void
clear()
{
std::unique_lock lck(mtx_);
ipFetchCount_.clear();
}
};
#endif

136
src/server/Handlers.cpp Normal file
View File

@@ -0,0 +1,136 @@
#include <server/Handlers.h>
bool
shouldForwardToRippled(boost::json::object const& request)
{
if (request.contains("forward") && request.at("forward").is_bool())
return request.at("forward").as_bool();
BOOST_LOG_TRIVIAL(info) << "checked forward";
std::string strCommand = request.contains("command")
? request.at("command").as_string().c_str()
: request.at("method").as_string().c_str();
BOOST_LOG_TRIVIAL(info) << "checked command";
if (forwardCommands.find(strCommand) != forwardCommands.end())
return true;
if (request.contains("ledger_index"))
{
auto indexValue = request.at("ledger_index");
if (indexValue.is_string())
{
BOOST_LOG_TRIVIAL(info) << "checking ledger as string";
std::string index = indexValue.as_string().c_str();
return index == "current" || index == "closed";
}
}
BOOST_LOG_TRIVIAL(info) << "checked ledger";
return false;
}
std::pair<boost::json::object, uint32_t>
buildResponse(
boost::json::object const& request,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> manager,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<WsBase> session)
{
std::string command = request.at("command").as_string().c_str();
BOOST_LOG_TRIVIAL(info) << "Received rpc command : " << request;
boost::json::object response;
if (shouldForwardToRippled(request))
return {balancer->forwardToRippled(request), 10};
BOOST_LOG_TRIVIAL(info) << "Not forwarding";
switch (commandMap[command])
{
case tx:
return {doTx(request, *backend), 1};
case account_tx: {
auto res = doAccountTx(request, *backend);
if (res.contains("transactions"))
return {res, res["transactions"].as_array().size()};
return {res, 1};
}
case ledger: {
auto res = doLedger(request, *backend);
BOOST_LOG_TRIVIAL(info) << "did command";
if (res.contains("transactions"))
return {res, res["transactions"].as_array().size()};
return {res, 1};
}
case ledger_entry:
return {doLedgerEntry(request, *backend), 1};
case ledger_range:
return {doLedgerRange(request, *backend), 1};
case ledger_data: {
auto res = doLedgerData(request, *backend);
if (res.contains("objects"))
return {res, res["objects"].as_array().size() * 4};
return {res, 1};
}
case account_info:
return {doAccountInfo(request, *backend), 1};
case book_offers: {
auto res = doBookOffers(request, *backend);
if (res.contains("offers"))
return {res, res["offers"].as_array().size() * 4};
return {res, 1};
}
case account_channels: {
auto res = doAccountChannels(request, *backend);
if (res.contains("channels"))
return {res, res["channels"].as_array().size()};
return {res, 1};
}
case account_lines: {
auto res = doAccountLines(request, *backend);
if (res.contains("lines"))
return {res, res["lines"].as_array().size()};
return {res, 1};
}
case account_currencies: {
auto res = doAccountCurrencies(request, *backend);
size_t count = 1;
if (res.contains("send_currencies"))
count = res["send_currencies"].as_array().size();
if (res.contains("receive_currencies"))
count += res["receive_currencies"].as_array().size();
return {res, count};
}
case account_offers: {
auto res = doAccountOffers(request, *backend);
if (res.contains("offers"))
return {res, res["offers"].as_array().size()};
return {res, 1};
}
case account_objects: {
auto res = doAccountObjects(request, *backend);
if (res.contains("objects"))
return {res, res["objects"].as_array().size()};
return {res, 1};
}
case channel_authorize: {
return {doChannelAuthorize(request), 1};
};
case channel_verify:
return {doChannelVerify(request), 1};
case subscribe:
return {doSubscribe(request, session, *manager), 1};
case unsubscribe:
return {doUnsubscribe(request, session, *manager), 1};
case server_info: {
return {doServerInfo(request, *backend), 1};
break;
}
default:
response["error"] = "Unknown command: " + command;
return {response, 1};
}
}

154
src/server/Handlers.h Normal file
View File

@@ -0,0 +1,154 @@
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/json.hpp>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/trivial.hpp>
#include <etl/ReportingETL.h>
#include <server/WsBase.h>
#include <iostream>
#include <unordered_map>
#ifndef RIPPLE_REPORTING_HANDLERS_H
#define RIPPLE_REPORTING_HANDLERS_H
class ReportingETL;
class SubscriptionManager;
//------------------------------------------------------------------------------
static std::unordered_set<std::string> forwardCommands{
"submit",
"submit_multisigned",
"fee",
"path_find",
"ripple_path_find",
"manifest"};
enum RPCCommand {
tx,
account_tx,
ledger,
account_info,
ledger_data,
book_offers,
ledger_range,
ledger_entry,
account_channels,
account_lines,
account_currencies,
account_offers,
account_objects,
channel_authorize,
channel_verify,
subscribe,
unsubscribe,
server_info
};
static std::unordered_map<std::string, RPCCommand> commandMap{
{"tx", tx},
{"account_tx", account_tx},
{"ledger", ledger},
{"ledger_range", ledger_range},
{"ledger_entry", ledger_entry},
{"account_info", account_info},
{"ledger_data", ledger_data},
{"book_offers", book_offers},
{"account_channels", account_channels},
{"account_lines", account_lines},
{"account_currencies", account_currencies},
{"account_offers", account_offers},
{"account_objects", account_objects},
{"channel_authorize", channel_authorize},
{"channel_verify", channel_verify},
{"subscribe", subscribe},
{"unsubscribe", unsubscribe},
{"server_info", server_info}};
boost::json::object
doTx(boost::json::object const& request, BackendInterface const& backend);
boost::json::object
doAccountTx(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doBookOffers(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doLedgerData(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doLedgerEntry(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doLedger(boost::json::object const& request, BackendInterface const& backend);
boost::json::object
doLedgerRange(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountInfo(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountChannels(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountLines(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountCurrencies(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountOffers(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doAccountObjects(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doChannelAuthorize(boost::json::object const& request);
boost::json::object
doChannelVerify(boost::json::object const& request);
boost::json::object
doServerInfo(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object
doSubscribe(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager);
boost::json::object
doUnsubscribe(
boost::json::object const& request,
std::shared_ptr<WsBase>& session,
SubscriptionManager& manager);
std::pair<boost::json::object, uint32_t>
buildResponse(
boost::json::object const& request,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> manager,
std::shared_ptr<ETLLoadBalancer> balancer,
std::shared_ptr<WsBase> session);
#endif // RIPPLE_REPORTING_HANDLERS_H

352
src/server/HttpBase.h Normal file
View File

@@ -0,0 +1,352 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_REPORTING_HTTP_BASE_SESSION_H
#define RIPPLE_REPORTING_HTTP_BASE_SESSION_H
#include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/version.hpp>
#include <boost/config.hpp>
#include <boost/json.hpp>
#include <algorithm>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <server/DOSGuard.h>
#include <server/Handlers.h>
#include <vector>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
using tcp = boost::asio::ip::tcp;
static std::string defaultResponse =
"<!DOCTYPE html><html><head><title>"
" Test page for reporting mode</title></head><body><h1>"
" Test</h1><p>This page shows xrpl reporting http(s) "
"connectivity is working.</p></body></html>";
inline void
httpFail(boost::beast::error_code ec, char const* what)
{
// ssl::error::stream_truncated, also known as an SSL "short read",
// indicates the peer closed the connection without performing the
// required closing handshake (for example, Google does this to
// improve performance). Generally this can be a security issue,
// but if your communication protocol is self-terminated (as
// it is with both HTTP and WebSocket) then you may simply
// ignore the lack of close_notify.
//
// https://github.com/boostorg/beast/issues/38
//
// https://security.stackexchange.com/questions/91435/how-to-handle-a-malicious-ssl-tls-shutdown
//
// When a short read would cut off the end of an HTTP message,
// Beast returns the error boost::beast::http::error::partial_message.
// Therefore, if we see a short read here, it has occurred
// after the message has been completed, so it is safe to ignore it.
if (ec == net::ssl::error::stream_truncated)
return;
std::cerr << what << ": " << ec.message() << "\n";
}
bool
validRequest(boost::json::object const& req)
{
if (!req.contains("method") || !req.at("method").is_string())
return false;
if (!req.contains("params"))
return true;
if (!req.at("params").is_array())
return false;
auto array = req.at("params").as_array();
if (array.size() != 1)
return false;
if (!array.at(0).is_object())
return false;
return true;
}
// This function produces an HTTP response for the given
// request. The type of the response object depends on the
// contents of the request, so the interface requires the
// caller to pass a generic lambda for receiving the response.
template <class Body, class Allocator, class Send>
void
handle_request(
boost::beast::http::
request<Body, boost::beast::http::basic_fields<Allocator>>&& req,
Send&& send,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
{
auto const response = [&req](
http::status status,
std::string content_type,
std::string message) {
http::response<http::string_body> res{status, req.version()};
res.set(http::field::server, "xrpl-reporting-server-v0.0.0");
res.set(http::field::content_type, content_type);
res.keep_alive(req.keep_alive());
res.body() = std::string(message);
res.prepare_payload();
return res;
};
if (req.method() == http::verb::get && req.body() == "")
{
send(response(http::status::ok, "text/html", defaultResponse));
return;
}
if (req.method() != http::verb::post)
{
send(response(
http::status::bad_request, "text/html", "Expected a POST request"));
return;
}
try
{
BOOST_LOG_TRIVIAL(info) << "Received request: " << req.body();
boost::json::object request;
try
{
request = boost::json::parse(req.body()).as_object();
}
catch (std::runtime_error const& e)
{
send(response(
http::status::bad_request,
"text/html",
"Cannot parse json in body"));
return;
}
if (!validRequest(request))
{
send(response(
http::status::bad_request, "text/html", "Malformed request"));
return;
}
boost::json::object wsStyleRequest = request.contains("params")
? request.at("params").as_array().at(0).as_object()
: boost::json::object{};
wsStyleRequest["command"] = request["method"];
std::cout << "Transfromed to ws style stuff" << std::endl;
auto [builtResponse, cost] =
buildResponse(wsStyleRequest, backend, nullptr, balancer, nullptr);
send(response(
http::status::ok,
"application/json",
boost::json::serialize(builtResponse)));
return;
}
catch (std::exception const& e)
{
std::cout << e.what() << std::endl;
send(response(
http::status::internal_server_error,
"text/html",
"Internal server error occurred"));
return;
}
}
// From Boost Beast examples http_server_flex.cpp
template <class Derived>
class HttpBase
{
// Access the derived class, this is part of
// the Curiously Recurring Template Pattern idiom.
Derived&
derived()
{
return static_cast<Derived&>(*this);
}
struct send_lambda
{
HttpBase& self_;
explicit send_lambda(HttpBase& self) : self_(self)
{
}
template <bool isRequest, class Body, class Fields>
void
operator()(http::message<isRequest, Body, Fields>&& msg) const
{
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
auto sp = std::make_shared<http::message<isRequest, Body, Fields>>(
std::move(msg));
// Store a type-erased version of the shared
// pointer in the class to keep it alive.
self_.res_ = sp;
// Write the response
http::async_write(
self_.derived().stream(),
*sp,
boost::beast::bind_front_handler(
&HttpBase::on_write,
self_.derived().shared_from_this(),
sp->need_eof()));
}
};
http::request<http::string_body> req_;
std::shared_ptr<void> res_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
send_lambda lambda_;
protected:
boost::beast::flat_buffer buffer_;
public:
HttpBase(
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer buffer)
: backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, lambda_(*this)
, buffer_(std::move(buffer))
{
}
void
do_read()
{
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
// Set the timeout.
boost::beast::get_lowest_layer(derived().stream())
.expires_after(std::chrono::seconds(30));
// Read a request
http::async_read(
derived().stream(),
buffer_,
req_,
boost::beast::bind_front_handler(
&HttpBase::on_read, derived().shared_from_this()));
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
// This means they closed the connection
if (ec == http::error::end_of_stream)
return derived().do_close();
if (ec)
return httpFail(ec, "read");
auto ip = derived().ip();
if (boost::beast::websocket::is_upgrade(req_))
{
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(derived().stream()).expires_never();
return make_websocket_session(
derived().release_stream(),
std::move(req_),
std::move(buffer_),
backend_,
subscriptions_,
balancer_,
dosGuard_);
}
// Send the response
handle_request(
std::move(req_), lambda_, backend_, balancer_, dosGuard_);
}
void
on_write(
bool close,
boost::beast::error_code ec,
std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return httpFail(ec, "write");
if (close)
{
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return derived().do_close();
}
// We're done with the response so delete it
res_ = nullptr;
// Read another request
do_read();
}
};
#endif // RIPPLE_REPORTING_HTTP_BASE_SESSION_H

97
src/server/HttpSession.h Normal file
View File

@@ -0,0 +1,97 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_REPORTING_HTTP_SESSION_H
#define RIPPLE_REPORTING_HTTP_SESSION_H
#include <server/HttpBase.h>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
using tcp = boost::asio::ip::tcp;
// Handles an HTTP server connection
class HttpSession : public HttpBase<HttpSession>,
public std::enable_shared_from_this<HttpSession>
{
boost::beast::tcp_stream stream_;
public:
// Take ownership of the socket
explicit HttpSession(
tcp::socket&& socket,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer buffer)
: HttpBase<HttpSession>(
backend,
subscriptions,
balancer,
dosGuard,
std::move(buffer))
, stream_(std::move(socket))
{
}
boost::beast::tcp_stream&
stream()
{
return stream_;
}
boost::beast::tcp_stream
release_stream()
{
return std::move(stream_);
}
std::string
ip()
{
return stream_.socket().remote_endpoint().address().to_string();
}
// Start the asynchronous operation
void
run()
{
// We need to be executing within a strand to perform async operations
// on the I/O objects in this HttpSession. Although not strictly
// necessary for single-threaded contexts, this example code is written
// to be thread-safe by default.
net::dispatch(
stream_.get_executor(),
boost::beast::bind_front_handler(
&HttpBase::do_read, shared_from_this()));
}
void
do_close()
{
// Send a TCP shutdown
boost::beast::error_code ec;
stream_.socket().shutdown(tcp::socket::shutdown_send, ec);
// At this point the connection is closed gracefully
}
};
#endif // RIPPLE_REPORTING_HTTP_SESSION_H

192
src/server/PlainWsSession.h Normal file
View File

@@ -0,0 +1,192 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_REPORTING_WS_SESSION_H
#define RIPPLE_REPORTING_WS_SESSION_H
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/beast/websocket/ssl.hpp>
#include <etl/ReportingETL.h>
#include <server/Handlers.h>
#include <server/WsBase.h>
#include <server/listener.h>
#include <iostream>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
namespace websocket = boost::beast::websocket;
using tcp = boost::asio::ip::tcp;
class ReportingETL;
// Echoes back all received WebSocket messages
class PlainWsSession : public WsSession<PlainWsSession>
{
websocket::stream<boost::beast::tcp_stream> ws_;
public:
// Take ownership of the socket
explicit PlainWsSession(
boost::asio::ip::tcp::socket&& socket,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& buffer)
: WsSession(
backend,
subscriptions,
balancer,
dosGuard,
std::move(buffer))
, ws_(std::move(socket))
{
}
websocket::stream<boost::beast::tcp_stream>&
ws()
{
return ws_;
}
std::string
ip()
{
return ws()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
~PlainWsSession() = default;
};
class WsUpgrader : public std::enable_shared_from_this<WsUpgrader>
{
boost::beast::tcp_stream http_;
boost::optional<http::request_parser<http::string_body>> parser_;
boost::beast::flat_buffer buffer_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
http::request<http::string_body> req_;
public:
WsUpgrader(
boost::asio::ip::tcp::socket&& socket,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& b)
: http_(std::move(socket))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, buffer_(std::move(b))
{
}
WsUpgrader(
boost::beast::tcp_stream&& stream,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: http_(std::move(stream))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, buffer_(std::move(b))
, req_(std::move(req))
{
}
void
run()
{
std::cout << "RUNNING" << std::endl;
// We need to be executing within a strand to perform async operations
// on the I/O objects in this session. Although not strictly necessary
// for single-threaded contexts, this example code is written to be
// thread-safe by default.
net::dispatch(
http_.get_executor(),
boost::beast::bind_front_handler(
&WsUpgrader::do_upgrade, shared_from_this()));
}
private:
void
do_upgrade()
{
std::cout << "doing upgrade" << std::endl;
parser_.emplace();
// Apply a reasonable limit to the allowed size
// of the body in bytes to prevent abuse.
parser_->body_limit(10000);
// Set the timeout.
boost::beast::get_lowest_layer(http_).expires_after(
std::chrono::seconds(30));
on_upgrade();
}
void
on_upgrade()
{
// See if it is a WebSocket Upgrade
if (!websocket::is_upgrade(req_))
{
std::cout << "is not upgrade" << std::endl;
return;
}
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(http_).expires_never();
std::cout << "making session" << std::endl;
std::make_shared<PlainWsSession>(
http_.release_socket(),
backend_,
subscriptions_,
balancer_,
dosGuard_,
std::move(buffer_))
->run(std::move(req_));
}
};
#endif // RIPPLE_REPORTING_WS_SESSION_H

132
src/server/SslHttpSession.h Normal file
View File

@@ -0,0 +1,132 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_REPORTING_HTTPS_SESSION_H
#define RIPPLE_REPORTING_HTTPS_SESSION_H
#include <server/HttpBase.h>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
using tcp = boost::asio::ip::tcp;
// Handles an HTTPS server connection
class SslHttpSession : public HttpBase<SslHttpSession>,
public std::enable_shared_from_this<SslHttpSession>
{
boost::beast::ssl_stream<boost::beast::tcp_stream> stream_;
public:
// Take ownership of the socket
explicit SslHttpSession(
tcp::socket&& socket,
ssl::context& ctx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer buffer)
: HttpBase<SslHttpSession>(
backend,
subscriptions,
balancer,
dosGuard,
std::move(buffer))
, stream_(std::move(socket), ctx)
{
}
boost::beast::ssl_stream<boost::beast::tcp_stream>&
stream()
{
return stream_;
}
boost::beast::ssl_stream<boost::beast::tcp_stream>
release_stream()
{
return std::move(stream_);
}
std::string
ip()
{
return stream_.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
// Start the asynchronous operation
void
run()
{
auto self = shared_from_this();
// We need to be executing within a strand to perform async operations
// on the I/O objects in this session.
net::dispatch(stream_.get_executor(), [self]() {
// Set the timeout.
boost::beast::get_lowest_layer(self->stream())
.expires_after(std::chrono::seconds(30));
// Perform the SSL handshake
// Note, this is the buffered version of the handshake.
self->stream_.async_handshake(
ssl::stream_base::server,
self->buffer_.data(),
boost::beast::bind_front_handler(
&SslHttpSession::on_handshake, self));
});
}
void
on_handshake(boost::beast::error_code ec, std::size_t bytes_used)
{
if (ec)
return httpFail(ec, "handshake");
buffer_.consume(bytes_used);
do_read();
}
void
do_close()
{
// Set the timeout.
boost::beast::get_lowest_layer(stream_).expires_after(
std::chrono::seconds(30));
// Perform the SSL shutdown
stream_.async_shutdown(boost::beast::bind_front_handler(
&SslHttpSession::on_shutdown, shared_from_this()));
}
void
on_shutdown(boost::beast::error_code ec)
{
if (ec)
return httpFail(ec, "shutdown");
// At this point the connection is closed gracefully
}
};
#endif // RIPPLE_REPORTING_HTTPS_SESSION_H

195
src/server/SslWsSession.h Normal file
View File

@@ -0,0 +1,195 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_REPORTING_SSL_WS_SESSION_H
#define RIPPLE_REPORTING_SSL_WS_SESSION_H
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/beast/websocket/ssl.hpp>
#include <etl/ReportingETL.h>
#include <server/Handlers.h>
#include <server/WsBase.h>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
namespace websocket = boost::beast::websocket;
using tcp = boost::asio::ip::tcp;
class ReportingETL;
class SslWsSession : public WsSession<SslWsSession>
{
boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>
ws_;
public:
// Take ownership of the socket
explicit SslWsSession(
boost::beast::ssl_stream<boost::beast::tcp_stream>&& stream,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& b)
: WsSession(backend, subscriptions, balancer, dosGuard, std::move(b))
, ws_(std::move(stream))
{
}
boost::beast::websocket::stream<
boost::beast::ssl_stream<boost::beast::tcp_stream>>&
ws()
{
return ws_;
}
std::string
ip()
{
return ws()
.next_layer()
.next_layer()
.socket()
.remote_endpoint()
.address()
.to_string();
}
};
class SslWsUpgrader : public std::enable_shared_from_this<SslWsUpgrader>
{
boost::beast::ssl_stream<boost::beast::tcp_stream> https_;
boost::optional<http::request_parser<http::string_body>> parser_;
boost::beast::flat_buffer buffer_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
http::request<http::string_body> req_;
public:
SslWsUpgrader(
boost::asio::ip::tcp::socket&& socket,
ssl::context& ctx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& b)
: https_(std::move(socket), ctx)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, buffer_(std::move(b))
{
}
SslWsUpgrader(
boost::beast::ssl_stream<boost::beast::tcp_stream> stream,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& b,
http::request<http::string_body> req)
: https_(std::move(stream))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, buffer_(std::move(b))
, req_(std::move(req))
{
}
~SslWsUpgrader() = default;
void
run()
{
// Set the timeout.
boost::beast::get_lowest_layer(https_).expires_after(
std::chrono::seconds(30));
net::dispatch(
https_.get_executor(),
boost::beast::bind_front_handler(
&SslWsUpgrader::do_upgrade, shared_from_this()));
}
private:
void
on_handshake(boost::beast::error_code ec, std::size_t bytes_used)
{
if (ec)
return wsFail(ec, "handshake");
// Consume the portion of the buffer used by the handshake
buffer_.consume(bytes_used);
do_upgrade();
}
void
do_upgrade()
{
std::cout << "doing upgrade" << std::endl;
parser_.emplace();
// Apply a reasonable limit to the allowed size
// of the body in bytes to prevent abuse.
parser_->body_limit(10000);
// Set the timeout.
boost::beast::get_lowest_layer(https_).expires_after(
std::chrono::seconds(30));
on_upgrade();
}
void
on_upgrade()
{
// See if it is a WebSocket Upgrade
if (!websocket::is_upgrade(req_))
{
return;
}
// Disable the timeout.
// The websocket::stream uses its own timeout settings.
boost::beast::get_lowest_layer(https_).expires_never();
std::make_shared<SslWsSession>(
std::move(https_),
backend_,
subscriptions_,
balancer_,
dosGuard_,
std::move(buffer_))
->run(std::move(req_));
}
};
#endif // RIPPLE_REPORTING_SSL_WS_SESSION_H

View File

@@ -0,0 +1,134 @@
#include <handlers/RPCHelpers.h>
#include <server/SubscriptionManager.h>
#include <server/WsBase.h>
void
SubscriptionManager::subLedger(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[Ledgers].emplace(std::move(session));
}
void
SubscriptionManager::unsubLedger(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[Ledgers].erase(session);
}
void
SubscriptionManager::pubLedger(
ripple::LedgerInfo const& lgrInfo,
ripple::Fees const& fees,
std::string const& ledgerRange,
std::uint32_t txnCount)
{
boost::json::object pubMsg;
pubMsg["type"] = "ledgerClosed";
pubMsg["ledger_index"] = lgrInfo.seq;
pubMsg["ledger_hash"] = to_string(lgrInfo.hash);
pubMsg["ledger_time"] = lgrInfo.closeTime.time_since_epoch().count();
pubMsg["fee_ref"] = toBoostJson(fees.units.jsonClipped());
pubMsg["fee_base"] = toBoostJson(fees.base.jsonClipped());
pubMsg["reserve_base"] = toBoostJson(fees.accountReserve(0).jsonClipped());
pubMsg["reserve_inc"] = toBoostJson(fees.increment.jsonClipped());
pubMsg["validated_ledgers"] = ledgerRange;
pubMsg["txn_count"] = txnCount;
for (auto const& session : streamSubscribers_[Ledgers])
session->send(boost::json::serialize(pubMsg));
}
void
SubscriptionManager::subTransactions(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[Transactions].emplace(std::move(session));
}
void
SubscriptionManager::unsubTransactions(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[Transactions].erase(session);
}
void
SubscriptionManager::subAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
{
accountSubscribers_[account].emplace(std::move(session));
}
void
SubscriptionManager::unsubAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
{
accountSubscribers_[account].erase(session);
}
void
SubscriptionManager::pubTransaction(
Backend::TransactionAndMetadata const& blob,
std::uint32_t seq)
{
auto [tx, meta] = deserializeTxPlusMeta(blob, seq);
boost::json::object pubMsg;
pubMsg["transaction"] = toJson(*tx);
pubMsg["meta"] = toJson(*meta);
for (auto const& session : streamSubscribers_[Transactions])
session->send(boost::json::serialize(pubMsg));
auto journal = ripple::debugLog();
auto accounts = meta->getAffectedAccounts(journal);
for (ripple::AccountID const& account : accounts)
for (auto const& session : accountSubscribers_[account])
session->send(boost::json::serialize(pubMsg));
}
void
SubscriptionManager::forwardProposedTransaction(
boost::json::object const& response)
{
for (auto const& session : streamSubscribers_[TransactionsProposed])
session->send(boost::json::serialize(response));
auto transaction = response.at("transaction").as_object();
auto accounts = getAccountsFromTransaction(transaction);
for (ripple::AccountID const& account : accounts)
for (auto const& session : accountProposedSubscribers_[account])
session->send(boost::json::serialize(response));
}
void
SubscriptionManager::subProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
{
accountProposedSubscribers_[account].emplace(std::move(session));
}
void
SubscriptionManager::unsubProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session)
{
accountProposedSubscribers_[account].erase(session);
}
void
SubscriptionManager::subProposedTransactions(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[TransactionsProposed].emplace(std::move(session));
}
void
SubscriptionManager::unsubProposedTransactions(std::shared_ptr<WsBase>& session)
{
streamSubscribers_[TransactionsProposed].erase(session);
}

View File

@@ -0,0 +1,106 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef SUBSCRIPTION_MANAGER_H
#define SUBSCRIPTION_MANAGER_H
#include <backend/BackendInterface.h>
#include <memory>
class WsBase;
class SubscriptionManager
{
using subscriptions = std::set<std::shared_ptr<WsBase>>;
enum SubscriptionType {
Ledgers,
Transactions,
TransactionsProposed,
finalEntry
};
std::array<subscriptions, finalEntry> streamSubscribers_;
std::unordered_map<ripple::AccountID, subscriptions> accountSubscribers_;
std::unordered_map<ripple::AccountID, subscriptions>
accountProposedSubscribers_;
public:
static std::shared_ptr<SubscriptionManager>
make_SubscriptionManager()
{
return std::make_shared<SubscriptionManager>();
}
void
subLedger(std::shared_ptr<WsBase>& session);
void
pubLedger(
ripple::LedgerInfo const& lgrInfo,
ripple::Fees const& fees,
std::string const& ledgerRange,
std::uint32_t txnCount);
void
unsubLedger(std::shared_ptr<WsBase>& session);
void
subTransactions(std::shared_ptr<WsBase>& session);
void
unsubTransactions(std::shared_ptr<WsBase>& session);
void
pubTransaction(
Backend::TransactionAndMetadata const& blob,
std::uint32_t seq);
void
subAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
void
unsubAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
void
forwardProposedTransaction(boost::json::object const& response);
void
subProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
void
unsubProposedAccount(
ripple::AccountID const& account,
std::shared_ptr<WsBase>& session);
void
subProposedTransactions(std::shared_ptr<WsBase>& session);
void
unsubProposedTransactions(std::shared_ptr<WsBase>& session);
};
#endif // SUBSCRIPTION_MANAGER_H

226
src/server/WsBase.h Normal file
View File

@@ -0,0 +1,226 @@
#ifndef RIPPLE_REPORTING_WS_BASE_SESSION_H
#define RIPPLE_REPORTING_WS_BASE_SESSION_H
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <backend/BackendInterface.h>
#include <etl/ETLSource.h>
#include <iostream>
#include <memory>
#include <server/DOSGuard.h>
#include <server/SubscriptionManager.h>
namespace http = boost::beast::http;
namespace net = boost::asio;
namespace ssl = boost::asio::ssl;
namespace websocket = boost::beast::websocket;
using tcp = boost::asio::ip::tcp;
inline void
wsFail(boost::beast::error_code ec, char const* what)
{
std::cerr << what << ": " << ec.message() << "\n";
}
class WsBase
{
public:
// Send, that enables SubscriptionManager to publish to clients
virtual void
send(std::string&& msg) = 0;
virtual ~WsBase()
{
}
};
// Echoes back all received WebSocket messages
template <class Derived>
class WsSession : public WsBase,
public std::enable_shared_from_this<WsSession<Derived>>
{
boost::beast::flat_buffer buffer_;
std::string response_;
std::shared_ptr<BackendInterface> backend_;
std::weak_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
public:
explicit WsSession(
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard,
boost::beast::flat_buffer&& buffer)
: backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
, buffer_(std::move(buffer))
{
}
virtual ~WsSession()
{
}
// Access the derived class, this is part of
// the Curiously Recurring Template Pattern idiom.
Derived&
derived()
{
return static_cast<Derived&>(*this);
}
void
send(std::string&& msg)
{
derived().ws().text(derived().ws().got_text());
derived().ws().async_write(
boost::asio::buffer(msg),
boost::beast::bind_front_handler(
&WsSession::on_write, this->shared_from_this()));
}
void
run(http::request<http::string_body> req)
{
std::cout << "Running ws" << std::endl;
// Set suggested timeout settings for the websocket
derived().ws().set_option(websocket::stream_base::timeout::suggested(
boost::beast::role_type::server));
std::cout << "Trying to decorate" << std::endl;
// Set a decorator to change the Server of the handshake
derived().ws().set_option(websocket::stream_base::decorator(
[](websocket::response_type& res) {
res.set(
http::field::server,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-server-async");
}));
std::cout << "trying to async accept" << std::endl;
derived().ws().async_accept(
req,
boost::beast::bind_front_handler(
&WsSession::on_accept, this->shared_from_this()));
}
void
on_accept(boost::beast::error_code ec)
{
if (ec)
return wsFail(ec, "accept");
// Read a message
do_read();
}
void
do_read()
{
// Read a message into our buffer
derived().ws().async_read(
buffer_,
boost::beast::bind_front_handler(
&WsSession::on_read, this->shared_from_this()));
}
void
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
// This indicates that the session was closed
if (ec == boost::beast::websocket::error::closed)
{
std::cout << "session closed" << std::endl;
return;
}
if (ec)
return wsFail(ec, "read");
std::string msg{
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
boost::json::object response;
auto ip = derived().ip();
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " received request from ip = " << ip;
if (!dosGuard_.isOk(ip))
response["error"] = "Too many requests. Slow down";
else
{
try
{
boost::json::value raw = boost::json::parse(msg);
boost::json::object request = raw.as_object();
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
try
{
std::shared_ptr<SubscriptionManager> subPtr =
subscriptions_.lock();
if (!subPtr)
return;
auto [res, cost] = buildResponse(
request,
backend_,
subPtr,
balancer_,
this->shared_from_this());
auto start = std::chrono::system_clock::now();
response = std::move(res);
if (!dosGuard_.add(ip, cost))
{
response["warning"] = "Too many requests";
}
auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(info)
<< __func__ << " RPC call took "
<< ((end - start).count() / 1000000000.0)
<< " . request = " << request;
}
catch (Backend::DatabaseTimeout const& t)
{
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
response["error"] =
"Database read timeout. Please retry the request";
}
}
catch (std::exception const& e)
{
BOOST_LOG_TRIVIAL(error)
<< __func__ << "caught exception : " << e.what();
response["error"] = "Unknown exception";
}
}
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
response_ = boost::json::serialize(response);
// Echo the message
derived().ws().text(derived().ws().got_text());
derived().ws().async_write(
boost::asio::buffer(response_),
boost::beast::bind_front_handler(
&WsSession::on_write, this->shared_from_this()));
}
void
on_write(boost::beast::error_code ec, std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);
if (ec)
return wsFail(ec, "write");
// Clear the buffer
buffer_.consume(buffer_.size());
// Do another read
do_read();
}
};
#endif // RIPPLE_REPORTING_WS_BASE_SESSION_H

353
src/server/listener.h Normal file
View File

@@ -0,0 +1,353 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef LISTENER_H
#define LISTENER_H
#include <boost/asio/dispatch.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <server/HttpSession.h>
#include <server/PlainWsSession.h>
#include <server/SslHttpSession.h>
#include <server/SslWsSession.h>
#include <server/SubscriptionManager.h>
#include <iostream>
class SubscriptionManager;
template <class PlainSession, class SslSession>
class Detector
: public std::enable_shared_from_this<Detector<PlainSession, SslSession>>
{
using std::enable_shared_from_this<
Detector<PlainSession, SslSession>>::shared_from_this;
boost::beast::tcp_stream stream_;
std::optional<std::reference_wrapper<ssl::context>> ctx_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
boost::beast::flat_buffer buffer_;
public:
Detector(
tcp::socket&& socket,
std::optional<std::reference_wrapper<ssl::context>> ctx,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
: stream_(std::move(socket))
, ctx_(ctx)
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
{
}
// Launch the detector
void
run()
{
// Set the timeout.
boost::beast::get_lowest_layer(stream_).expires_after(
std::chrono::seconds(30));
// Detect a TLS handshake
async_detect_ssl(
stream_,
buffer_,
boost::beast::bind_front_handler(
&Detector::on_detect, shared_from_this()));
}
void
on_detect(boost::beast::error_code ec, bool result)
{
if (ec)
return httpFail(ec, "detect");
if (result)
{
if (!ctx_)
return httpFail(ec, "ssl not supported by this server");
// Launch SSL session
std::make_shared<SslSession>(
stream_.release_socket(),
*ctx_,
backend_,
subscriptions_,
balancer_,
dosGuard_,
std::move(buffer_))
->run();
return;
}
// Launch plain session
std::make_shared<PlainSession>(
stream_.release_socket(),
backend_,
subscriptions_,
balancer_,
dosGuard_,
std::move(buffer_))
->run();
}
};
void
make_websocket_session(
boost::beast::tcp_stream stream,
http::request<http::string_body> req,
boost::beast::flat_buffer buffer,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
{
std::make_shared<WsUpgrader>(
std::move(stream),
backend,
subscriptions,
balancer,
dosGuard,
std::move(buffer),
std::move(req))
->run();
}
void
make_websocket_session(
boost::beast::ssl_stream<boost::beast::tcp_stream> stream,
http::request<http::string_body> req,
boost::beast::flat_buffer buffer,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
{
std::make_shared<SslWsUpgrader>(
std::move(stream),
backend,
subscriptions,
balancer,
dosGuard,
std::move(buffer),
std::move(req))
->run();
}
template <class PlainSession, class SslSession>
class Listener
: public std::enable_shared_from_this<Listener<PlainSession, SslSession>>
{
using std::enable_shared_from_this<
Listener<PlainSession, SslSession>>::shared_from_this;
net::io_context& ioc_;
std::optional<ssl::context> ctx_;
tcp::acceptor acceptor_;
std::shared_ptr<BackendInterface> backend_;
std::shared_ptr<SubscriptionManager> subscriptions_;
std::shared_ptr<ETLLoadBalancer> balancer_;
DOSGuard& dosGuard_;
public:
Listener(
net::io_context& ioc,
std::optional<ssl::context>&& ctx,
tcp::endpoint endpoint,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
: ioc_(ioc)
, ctx_(std::move(ctx))
, acceptor_(net::make_strand(ioc))
, backend_(backend)
, subscriptions_(subscriptions)
, balancer_(balancer)
, dosGuard_(dosGuard)
{
boost::beast::error_code ec;
// Open the acceptor
acceptor_.open(endpoint.protocol(), ec);
if (ec)
{
httpFail(ec, "open");
return;
}
// Allow address reuse
acceptor_.set_option(net::socket_base::reuse_address(true), ec);
if (ec)
{
httpFail(ec, "set_option");
return;
}
// Bind to the server address
acceptor_.bind(endpoint, ec);
if (ec)
{
httpFail(ec, "bind");
return;
}
// Start listening for connections
acceptor_.listen(net::socket_base::max_listen_connections, ec);
if (ec)
{
httpFail(ec, "listen");
return;
}
}
// Start accepting incoming connections
void
run()
{
do_accept();
}
private:
void
do_accept()
{
// The new connection gets its own strand
acceptor_.async_accept(
net::make_strand(ioc_),
boost::beast::bind_front_handler(
&Listener::on_accept, shared_from_this()));
}
void
on_accept(boost::beast::error_code ec, tcp::socket socket)
{
if (ec)
{
httpFail(ec, "listener_accept");
}
else
{
auto ctxRef = ctx_
? std::optional<
std::reference_wrapper<ssl::context>>{ctx_.value()}
: std::nullopt;
// Create the detector session and run it
std::make_shared<Detector<PlainSession, SslSession>>(
std::move(socket),
ctxRef,
backend_,
subscriptions_,
balancer_,
dosGuard_)
->run();
}
// Accept another connection
do_accept();
}
};
namespace Server {
std::optional<ssl::context>
parse_certs(const char* certFilename, const char* keyFilename)
{
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
if (!readCert)
return {};
std::stringstream contents;
contents << readCert.rdbuf();
readCert.close();
std::string cert = contents.str();
std::ifstream readKey(keyFilename, std::ios::in | std::ios::binary);
if (!readKey)
return {};
contents.str("");
contents << readKey.rdbuf();
readKey.close();
std::string key = contents.str();
ssl::context ctx{ssl::context::tlsv12};
ctx.set_options(
boost::asio::ssl::context::default_workarounds |
boost::asio::ssl::context::no_sslv2);
ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size()));
ctx.use_private_key(
boost::asio::buffer(key.data(), key.size()),
boost::asio::ssl::context::file_format::pem);
return ctx;
}
using WebsocketServer = Listener<WsUpgrader, SslWsUpgrader>;
using HttpServer = Listener<HttpSession, SslHttpSession>;
static std::shared_ptr<HttpServer>
make_HttpServer(
boost::json::object const& config,
boost::asio::io_context& ioc,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<SubscriptionManager> subscriptions,
std::shared_ptr<ETLLoadBalancer> balancer,
DOSGuard& dosGuard)
{
if (!config.contains("server"))
return nullptr;
auto const& serverConfig = config.at("server").as_object();
std::optional<ssl::context> sslCtx;
if (serverConfig.contains("ssl_cert_file") &&
serverConfig.contains("ssl_key_file"))
{
sslCtx = parse_certs(
serverConfig.at("ssl_cert_file").as_string().c_str(),
serverConfig.at("ssl_key_file").as_string().c_str());
}
auto const address = boost::asio::ip::make_address(
serverConfig.at("ip").as_string().c_str());
auto const port =
static_cast<unsigned short>(serverConfig.at("port").as_int64());
auto server = std::make_shared<HttpServer>(
ioc,
std::move(sslCtx),
boost::asio::ip::tcp::endpoint{address, port},
backend,
subscriptions,
balancer,
dosGuard);
server->run();
return server;
}
} // namespace Server
#endif // LISTENER_H

178
src/server/main.cpp Normal file
View File

@@ -0,0 +1,178 @@
//
// Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//
//------------------------------------------------------------------------------
//
// Example: WebSocket server, asynchronous
//
//------------------------------------------------------------------------------
#include <boost/asio/dispatch.hpp>
#include <boost/asio/strand.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/json.hpp>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/trivial.hpp>
#include <algorithm>
#include <backend/BackendFactory.h>
#include <cstdlib>
#include <etl/ReportingETL.h>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <server/listener.h>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
std::optional<boost::json::object>
parse_config(const char* filename)
{
try
{
std::ifstream in(filename, std::ios::in | std::ios::binary);
if (in)
{
std::stringstream contents;
contents << in.rdbuf();
in.close();
std::cout << contents.str() << std::endl;
boost::json::value value = boost::json::parse(contents.str());
return value.as_object();
}
}
catch (std::exception const& e)
{
std::cout << e.what() << std::endl;
}
return {};
}
void
initLogLevel(boost::json::object const& config)
{
auto const logLevel = config.contains("log_level")
? config.at("log_level").as_string()
: "info";
if (boost::iequals(logLevel, "trace"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::trace);
else if (boost::iequals(logLevel, "debug"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::debug);
else if (boost::iequals(logLevel, "info"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::info);
else if (
boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::warning);
else if (boost::iequals(logLevel, "error"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::error);
else if (boost::iequals(logLevel, "fatal"))
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::fatal);
else
{
BOOST_LOG_TRIVIAL(warning) << "Unrecognized log level: " << logLevel
<< ". Setting log level to info";
boost::log::core::get()->set_filter(
boost::log::trivial::severity >= boost::log::trivial::info);
}
BOOST_LOG_TRIVIAL(info) << "Log level = " << logLevel;
}
void
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
{
std::vector<std::thread> v;
v.reserve(numThreads - 1);
for (auto i = numThreads - 1; i > 0; --i)
v.emplace_back([&ioc] { ioc.run(); });
ioc.run();
}
int
main(int argc, char* argv[])
{
// Check command line arguments.
if (argc != 2)
{
std::cerr << "Usage: websocket-server-async "
"<config_file> \n"
<< "Example:\n"
<< " websocket-server-async config.json \n";
return EXIT_FAILURE;
}
auto const config = parse_config(argv[1]);
if (!config)
{
std::cerr << "Couldnt parse config. Exiting..." << std::endl;
return EXIT_FAILURE;
}
initLogLevel(*config);
auto const threads = config->contains("workers")
? config->at("workers").as_int64()
: std::thread::hardware_concurrency();
if (threads <= 0)
{
BOOST_LOG_TRIVIAL(fatal) << "Workers is less than 0";
return EXIT_FAILURE;
}
BOOST_LOG_TRIVIAL(info) << "Number of workers = " << threads;
// io context to handle all incoming requests, as well as other things
// This is not the only io context in the application
boost::asio::io_context ioc{threads};
// Rate limiter, to prevent abuse
DOSGuard dosGuard{config.value(), ioc};
// Interface to the database
std::shared_ptr<BackendInterface> backend{Backend::make_Backend(*config)};
// Manages clients subscribed to streams
std::shared_ptr<SubscriptionManager> subscriptions{
SubscriptionManager::make_SubscriptionManager()};
// Tracks which ledgers have been validated by the
// network
std::shared_ptr<NetworkValidatedLedgers> ledgers{
NetworkValidatedLedgers::make_ValidatedLedgers()};
// Handles the connection to one or more rippled nodes.
// ETL uses the balancer to extract data.
// The server uses the balancer to forward RPCs to a rippled node.
// The balancer itself publishes to streams (transactions_proposed and
// accounts_proposed)
auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(
*config, ioc, backend, subscriptions, ledgers);
// ETL is responsible for writing and publishing to streams. In read-only
// mode, ETL only publishes
auto etl = ReportingETL::make_ReportingETL(
*config, ioc, backend, subscriptions, balancer, ledgers);
// The server handles incoming RPCs
auto httpServer = Server::make_HttpServer(
*config, ioc, backend, subscriptions, balancer, dosGuard);
// Blocks until stopped.
// When stopped, shared_ptrs fall out of scope
// Calls destructors on all resources, and destructs in order
start(ioc, threads);
return EXIT_SUCCESS;
}