mirror of
https://github.com/XRPLF/clio.git
synced 2025-12-06 17:27:58 +00:00
Add cassandra
This commit is contained in:
@@ -7,6 +7,7 @@
|
|||||||
# Official repository: https://github.com/boostorg/beast
|
# Official repository: https://github.com/boostorg/beast
|
||||||
#
|
#
|
||||||
|
|
||||||
|
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||||
project(reporting)
|
project(reporting)
|
||||||
cmake_minimum_required(VERSION 3.17)
|
cmake_minimum_required(VERSION 3.17)
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
@@ -15,11 +16,16 @@ set(Boost_USE_STATIC_LIBS ON)
|
|||||||
set(Boost_USE_MULTITHREADED ON)
|
set(Boost_USE_MULTITHREADED ON)
|
||||||
set(Boost_USE_STATIC_RUNTIME ON)
|
set(Boost_USE_STATIC_RUNTIME ON)
|
||||||
|
|
||||||
FIND_PACKAGE( Boost 1.75 COMPONENTS log log_setup thread system REQUIRED )
|
FIND_PACKAGE( Boost 1.75 COMPONENTS filesystem log log_setup thread system REQUIRED )
|
||||||
|
|
||||||
add_executable (reporting
|
add_executable (reporting
|
||||||
websocket_server_async.cpp
|
websocket_server_async.cpp
|
||||||
)
|
)
|
||||||
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/deps")
|
||||||
|
include(ExternalProject)
|
||||||
|
message(${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
message(${CMAKE_MODULE_PATH})
|
||||||
|
include(cassandra)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -83,7 +89,7 @@ target_include_directories (grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR})
|
|||||||
target_link_libraries (grpc_pbufs ${_REFLECTION} ${_PROTOBUF_LIBPROTOBUF} ${_GRPC_GRPCPP})
|
target_link_libraries (grpc_pbufs ${_REFLECTION} ${_PROTOBUF_LIBPROTOBUF} ${_GRPC_GRPCPP})
|
||||||
|
|
||||||
|
|
||||||
target_sources(reporting PRIVATE reporting/ETLSource.cpp)
|
target_sources(reporting PRIVATE reporting/ETLSource.cpp reporting/ReportingBackend.cpp)
|
||||||
|
|
||||||
|
|
||||||
message(${Boost_LIBRARIES})
|
message(${Boost_LIBRARIES})
|
||||||
|
|||||||
@@ -25,32 +25,33 @@
|
|||||||
#include <boost/log/trivial.hpp>
|
#include <boost/log/trivial.hpp>
|
||||||
#include <reporting/ETLSource.h>
|
#include <reporting/ETLSource.h>
|
||||||
|
|
||||||
namespace ripple {
|
|
||||||
|
|
||||||
// Create ETL source without grpc endpoint
|
// Create ETL source without grpc endpoint
|
||||||
// Fetch ledger and load initial ledger will fail for this source
|
// Fetch ledger and load initial ledger will fail for this source
|
||||||
// Primarly used in read-only mode, to monitor when ledgers are validated
|
// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||||
ETLSource::ETLSource(std::string ip, std::string wsPort)
|
ETLSource::ETLSource(
|
||||||
: ip_(ip)
|
boost::json::object const& config,
|
||||||
, wsPort_(wsPort)
|
CassandraFlatMapBackend& backend)
|
||||||
, ws_(std::make_unique<
|
: ws_(std::make_unique<
|
||||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||||
boost::asio::make_strand(ioc_)))
|
boost::asio::make_strand(ioc_)))
|
||||||
, resolver_(boost::asio::make_strand(ioc_))
|
, resolver_(boost::asio::make_strand(ioc_))
|
||||||
, timer_(ioc_)
|
, timer_(ioc_)
|
||||||
|
, backend_(backend)
|
||||||
{
|
{
|
||||||
|
if (config.contains("ip"))
|
||||||
|
{
|
||||||
|
auto ipJs = config.at("ip").as_string();
|
||||||
|
ip_ = {ipJs.c_str(), ipJs.size()};
|
||||||
}
|
}
|
||||||
|
if (config.contains("ws_port"))
|
||||||
ETLSource::ETLSource(std::string ip, std::string wsPort, std::string grpcPort)
|
|
||||||
: ip_(ip)
|
|
||||||
, wsPort_(wsPort)
|
|
||||||
, grpcPort_(grpcPort)
|
|
||||||
, ws_(std::make_unique<
|
|
||||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
|
||||||
boost::asio::make_strand(ioc_)))
|
|
||||||
, resolver_(boost::asio::make_strand(ioc_))
|
|
||||||
, timer_(ioc_)
|
|
||||||
{
|
{
|
||||||
|
auto portjs = config.at("ws_port").as_string();
|
||||||
|
wsPort_ = {portjs.c_str(), portjs.size()};
|
||||||
|
}
|
||||||
|
if (config.contains("grpc_port"))
|
||||||
|
{
|
||||||
|
auto portjs = config.at("grpc_port").as_string();
|
||||||
|
grpcPort_ = {portjs.c_str(), portjs.size()};
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
boost::asio::ip::tcp::endpoint endpoint{
|
boost::asio::ip::tcp::endpoint endpoint{
|
||||||
@@ -58,7 +59,8 @@ ETLSource::ETLSource(std::string ip, std::string wsPort, std::string grpcPort)
|
|||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << endpoint;
|
ss << endpoint;
|
||||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||||
grpc::CreateChannel(ss.str(), grpc::InsecureChannelCredentials()));
|
grpc::CreateChannel(
|
||||||
|
ss.str(), grpc::InsecureChannelCredentials()));
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
|
BOOST_LOG_TRIVIAL(debug) << "Made stub for remote = " << toString();
|
||||||
}
|
}
|
||||||
catch (std::exception const& e)
|
catch (std::exception const& e)
|
||||||
@@ -68,6 +70,7 @@ ETLSource::ETLSource(std::string ip, std::string wsPort, std::string grpcPort)
|
|||||||
<< " . Remote = " << toString();
|
<< " . Remote = " << toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ETLSource::reconnect(boost::beast::error_code ec)
|
ETLSource::reconnect(boost::beast::error_code ec)
|
||||||
@@ -388,6 +391,7 @@ public:
|
|||||||
process(
|
process(
|
||||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||||
grpc::CompletionQueue& cq,
|
grpc::CompletionQueue& cq,
|
||||||
|
CassandraFlatMapBackend& backend,
|
||||||
bool abort = false)
|
bool abort = false)
|
||||||
{
|
{
|
||||||
std::cout << "Processing calldata" << std::endl;
|
std::cout << "Processing calldata" << std::endl;
|
||||||
@@ -429,12 +433,10 @@ public:
|
|||||||
|
|
||||||
for (auto& obj : *(cur_->mutable_ledger_objects()->mutable_objects()))
|
for (auto& obj : *(cur_->mutable_ledger_objects()->mutable_objects()))
|
||||||
{
|
{
|
||||||
/*
|
backend.store(
|
||||||
flatMapBackend.store(
|
|
||||||
std::move(*obj.mutable_key()),
|
std::move(*obj.mutable_key()),
|
||||||
request_.ledger().sequence(),
|
request_.ledger().sequence(),
|
||||||
std::move(*obj.mutable_data()));
|
std::move(*obj.mutable_data()));
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||||
@@ -505,7 +507,7 @@ ETLSource::loadInitialLedger(uint32_t sequence)
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< "Marker prefix = " << ptr->getMarkerPrefix();
|
<< "Marker prefix = " << ptr->getMarkerPrefix();
|
||||||
auto result = ptr->process(stub_, cq, abort);
|
auto result = ptr->process(stub_, cq, backend_, abort);
|
||||||
if (result != AsyncCallData::CallStatus::MORE)
|
if (result != AsyncCallData::CallStatus::MORE)
|
||||||
{
|
{
|
||||||
numFinished++;
|
numFinished++;
|
||||||
@@ -550,34 +552,18 @@ ETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects)
|
|||||||
}
|
}
|
||||||
return {status, std::move(response)};
|
return {status, std::move(response)};
|
||||||
}
|
}
|
||||||
/*
|
ETLLoadBalancer::ETLLoadBalancer(
|
||||||
ETLLoadBalancer::ETLLoadBalancer(ReportingETL& etl)
|
boost::json::array const& config,
|
||||||
: etl_(etl)
|
CassandraFlatMapBackend& backend)
|
||||||
, journal_(etl_.getApplication().journal("ReportingETL::LoadBalancer"))
|
|
||||||
{
|
{
|
||||||
}
|
for (auto& entry : config)
|
||||||
|
|
||||||
void
|
|
||||||
ETLLoadBalancer::add(
|
|
||||||
std::string& host,
|
|
||||||
std::string& websocketPort,
|
|
||||||
std::string& grpcPort)
|
|
||||||
{
|
{
|
||||||
std::unique_ptr<ETLSource> ptr =
|
std::unique_ptr<ETLSource> source =
|
||||||
std::make_unique<ETLSource>(host, websocketPort, grpcPort, etl_);
|
std::make_unique<ETLSource>(entry.as_object(), backend);
|
||||||
sources_.push_back(std::move(ptr));
|
sources_.push_back(std::move(source));
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
||||||
<< sources_.back()->toString();
|
<< sources_.back()->toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
ETLLoadBalancer::add(std::string& host, std::string& websocketPort)
|
|
||||||
{
|
|
||||||
std::unique_ptr<ETLSource> ptr =
|
|
||||||
std::make_unique<ETLSource>(host, websocketPort, etl_);
|
|
||||||
sources_.push_back(std::move(ptr));
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : added etl source - "
|
|
||||||
<< sources_.back()->toString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -588,12 +574,9 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence)
|
|||||||
bool res = source->loadInitialLedger(sequence);
|
bool res = source->loadInitialLedger(sequence);
|
||||||
if (!res)
|
if (!res)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(error) << "Failed to download initial
|
BOOST_LOG_TRIVIAL(error) << "Failed to download initial ledger."
|
||||||
ledger.
|
|
||||||
"
|
|
||||||
<< " Sequence = " << sequence
|
<< " Sequence = " << sequence
|
||||||
<< " source = " <<
|
<< " source = " << source->toString();
|
||||||
source->toString();
|
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
},
|
},
|
||||||
@@ -634,6 +617,7 @@ ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects)
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||||
ETLLoadBalancer::getP2pForwardingStub() const
|
ETLLoadBalancer::getP2pForwardingStub() const
|
||||||
{
|
{
|
||||||
@@ -691,8 +675,9 @@ ETLSource::getP2pForwardingStub() const
|
|||||||
return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||||
grpc::CreateChannel(
|
grpc::CreateChannel(
|
||||||
beast::IP::Endpoint(
|
beast::IP::Endpoint(
|
||||||
boost::asio::ip::make_address(ip_),
|
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_))
|
||||||
std::stoi(grpcPort_)) .to_string(), grpc::InsecureChannelCredentials()));
|
.to_string(),
|
||||||
|
grpc::InsecureChannelCredentials()));
|
||||||
}
|
}
|
||||||
catch (std::exception const&)
|
catch (std::exception const&)
|
||||||
{
|
{
|
||||||
@@ -705,8 +690,7 @@ Json::Value
|
|||||||
ETLSource::forwardToP2p(RPC::JsonContext& context) const
|
ETLSource::forwardToP2p(RPC::JsonContext& context) const
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
|
BOOST_LOG_TRIVIAL(debug) << "Attempting to forward request to tx. "
|
||||||
<< "request = " <<
|
<< "request = " << context.params.toStyledString();
|
||||||
context.params.toStyledString();
|
|
||||||
|
|
||||||
Json::Value response;
|
Json::Value response;
|
||||||
if (!connected_)
|
if (!connected_)
|
||||||
@@ -720,7 +704,8 @@ context.params.toStyledString();
|
|||||||
namespace websocket = beast::websocket; // from
|
namespace websocket = beast::websocket; // from
|
||||||
<boost / beast / websocket.hpp> namespace net = boost::asio; // from
|
<boost / beast / websocket.hpp> namespace net = boost::asio; // from
|
||||||
<boost / asio.hpp> using tcp = boost::asio::ip::tcp; // from
|
<boost / asio.hpp> using tcp = boost::asio::ip::tcp; // from
|
||||||
<boost/asio/ip/tcp.hpp> Json::Value& request = context.params; try
|
<boost / asio / ip / tcp.hpp> Json::Value& request = context.params;
|
||||||
|
try
|
||||||
{
|
{
|
||||||
// The io_context is required for all I/O
|
// The io_context is required for all I/O
|
||||||
net::io_context ioc;
|
net::io_context ioc;
|
||||||
@@ -753,8 +738,8 @@ https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
|||||||
http::field::forwarded,
|
http::field::forwarded,
|
||||||
"for=" + context.consumer.to_string());
|
"for=" + context.consumer.to_string());
|
||||||
}));
|
}));
|
||||||
BOOST_LOG_TRIVIAL(debug) << "client ip: " <<
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
context.consumer.to_string();
|
<< "client ip: " << context.consumer.to_string();
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
|
BOOST_LOG_TRIVIAL(debug) << "Performing websocket handshake";
|
||||||
// Perform the websocket handshake
|
// Perform the websocket handshake
|
||||||
@@ -787,7 +772,7 @@ context.consumer.to_string();
|
|||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
template <class Func>
|
template <class Func>
|
||||||
bool
|
bool
|
||||||
ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||||
@@ -796,7 +781,7 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
|||||||
auto sourceIdx = rand() % sources_.size();
|
auto sourceIdx = rand() % sources_.size();
|
||||||
auto numAttempts = 0;
|
auto numAttempts = 0;
|
||||||
|
|
||||||
while (!etl_.isStopping())
|
while (true)
|
||||||
{
|
{
|
||||||
auto& source = sources_[sourceIdx];
|
auto& source = sources_[sourceIdx];
|
||||||
|
|
||||||
@@ -836,11 +821,7 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
|||||||
numAttempts++;
|
numAttempts++;
|
||||||
if (numAttempts % sources_.size() == 0)
|
if (numAttempts % sources_.size() == 0)
|
||||||
{
|
{
|
||||||
// If another process loaded the ledger into the database, we
|
/*
|
||||||
can
|
|
||||||
// abort trying to fetch the ledger from a transaction
|
|
||||||
processing
|
|
||||||
// process
|
|
||||||
if (etl_.getApplication().getLedgerMaster().getLedgerBySeq(
|
if (etl_.getApplication().getLedgerMaster().getLedgerBySeq(
|
||||||
ledgerSequence))
|
ledgerSequence))
|
||||||
{
|
{
|
||||||
@@ -851,6 +832,7 @@ processing
|
|||||||
<< " Sequence = " << ledgerSequence;
|
<< " Sequence = " << ledgerSequence;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
BOOST_LOG_TRIVIAL(error)
|
BOOST_LOG_TRIVIAL(error)
|
||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Error executing function "
|
<< "Error executing function "
|
||||||
@@ -859,7 +841,7 @@ processing
|
|||||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return !etl_.isStopping();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -875,5 +857,3 @@ ETLLoadBalancer::stop()
|
|||||||
for (auto& source : sources_)
|
for (auto& source : sources_)
|
||||||
source->stop();
|
source->stop();
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
} // namespace ripple
|
|
||||||
|
|||||||
@@ -25,14 +25,11 @@
|
|||||||
#include <boost/beast/core.hpp>
|
#include <boost/beast/core.hpp>
|
||||||
#include <boost/beast/core/string.hpp>
|
#include <boost/beast/core/string.hpp>
|
||||||
#include <boost/beast/websocket.hpp>
|
#include <boost/beast/websocket.hpp>
|
||||||
|
#include <reporting/ReportingBackend.h>
|
||||||
|
|
||||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||||
#include <grpcpp/grpcpp.h>
|
#include <grpcpp/grpcpp.h>
|
||||||
|
|
||||||
namespace ripple {
|
|
||||||
|
|
||||||
class ReportingETL;
|
|
||||||
|
|
||||||
/// This class manages a connection to a single ETL source. This is almost
|
/// This class manages a connection to a single ETL source. This is almost
|
||||||
/// always a p2p node, but really could be another reporting node. This class
|
/// always a p2p node, but really could be another reporting node. This class
|
||||||
/// subscribes to the ledgers and transactions_proposed streams of the
|
/// subscribes to the ledgers and transactions_proposed streams of the
|
||||||
@@ -88,6 +85,8 @@ class ETLSource
|
|||||||
// used for retrying connections
|
// used for retrying connections
|
||||||
boost::asio::steady_timer timer_;
|
boost::asio::steady_timer timer_;
|
||||||
|
|
||||||
|
CassandraFlatMapBackend& backend_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
bool
|
bool
|
||||||
isConnected() const
|
isConnected() const
|
||||||
@@ -112,10 +111,9 @@ public:
|
|||||||
/// Create ETL source without gRPC endpoint
|
/// Create ETL source without gRPC endpoint
|
||||||
/// Fetch ledger and load initial ledger will fail for this source
|
/// Fetch ledger and load initial ledger will fail for this source
|
||||||
/// Primarly used in read-only mode, to monitor when ledgers are validated
|
/// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||||
ETLSource(std::string ip, std::string wsPort);
|
ETLSource(
|
||||||
|
boost::json::object const& config,
|
||||||
/// Create ETL source with gRPC endpoint
|
CassandraFlatMapBackend& backend);
|
||||||
ETLSource(std::string ip, std::string wsPort, std::string grpcPort);
|
|
||||||
|
|
||||||
/// @param sequence ledger sequence to check for
|
/// @param sequence ledger sequence to check for
|
||||||
/// @return true if this source has the desired ledger
|
/// @return true if this source has the desired ledger
|
||||||
@@ -269,8 +267,6 @@ public:
|
|||||||
getP2pForwardingStub() const;
|
getP2pForwardingStub() const;
|
||||||
*/
|
*/
|
||||||
};
|
};
|
||||||
/*
|
|
||||||
*
|
|
||||||
/// This class is used to manage connections to transaction processing processes
|
/// This class is used to manage connections to transaction processing processes
|
||||||
/// This class spawns a listener for each etl source, which listens to messages
|
/// This class spawns a listener for each etl source, which listens to messages
|
||||||
/// on the ledgers stream (to keep track of which ledgers have been validated by
|
/// on the ledgers stream (to keep track of which ledgers have been validated by
|
||||||
@@ -280,29 +276,14 @@ public:
|
|||||||
class ETLLoadBalancer
|
class ETLLoadBalancer
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
ReportingETL& etl_;
|
// ReportingETL& etl_;
|
||||||
|
|
||||||
beast::Journal journal_;
|
|
||||||
|
|
||||||
std::vector<std::unique_ptr<ETLSource>> sources_;
|
std::vector<std::unique_ptr<ETLSource>> sources_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ETLLoadBalancer(ReportingETL& etl);
|
ETLLoadBalancer(
|
||||||
|
boost::json::array const& config,
|
||||||
/// Add an ETL source
|
CassandraFlatMapBackend& backend);
|
||||||
/// @param host host or ip of ETL source
|
|
||||||
/// @param websocketPort port where ETL source accepts websocket connections
|
|
||||||
/// @param grpcPort port where ETL source accepts gRPC requests
|
|
||||||
void
|
|
||||||
add(std::string& host, std::string& websocketPort, std::string& grpcPort);
|
|
||||||
|
|
||||||
/// Add an ETL source without gRPC support. This source will send messages
|
|
||||||
/// on the ledgers and transactions_proposed streams, but will not be able
|
|
||||||
/// to handle the gRPC requests that are used for ETL
|
|
||||||
/// @param host host or ip of ETL source
|
|
||||||
/// @param websocketPort port where ETL source accepts websocket connections
|
|
||||||
void
|
|
||||||
add(std::string& host, std::string& websocketPort);
|
|
||||||
|
|
||||||
/// Load the initial ledger, writing data to the queue
|
/// Load the initial ledger, writing data to the queue
|
||||||
/// @param sequence sequence of ledger to download
|
/// @param sequence sequence of ledger to download
|
||||||
@@ -336,47 +317,47 @@ public:
|
|||||||
/// to clients).
|
/// to clients).
|
||||||
/// @param in ETLSource in question
|
/// @param in ETLSource in question
|
||||||
/// @return true if messages should be forwarded
|
/// @return true if messages should be forwarded
|
||||||
bool
|
// bool
|
||||||
shouldPropagateTxnStream(ETLSource* in) const
|
// shouldPropagateTxnStream(ETLSource* in) const
|
||||||
{
|
// {
|
||||||
for (auto& src : sources_)
|
// for (auto& src : sources_)
|
||||||
{
|
// {
|
||||||
assert(src);
|
// assert(src);
|
||||||
// We pick the first ETLSource encountered that is connected
|
// // We pick the first ETLSource encountered that is connected
|
||||||
if (src->isConnected())
|
// if (src->isConnected())
|
||||||
{
|
// {
|
||||||
if (src.get() == in)
|
// if (src.get() == in)
|
||||||
return true;
|
// return true;
|
||||||
else
|
// else
|
||||||
return false;
|
// return false;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
|
// // If no sources connected, then this stream has not been
|
||||||
|
// forwarded. return true;
|
||||||
|
// }
|
||||||
|
|
||||||
// If no sources connected, then this stream has not been forwarded.
|
// Json::Value
|
||||||
return true;
|
// toJson() const
|
||||||
}
|
// {
|
||||||
|
// Json::Value ret(Json::arrayValue);
|
||||||
Json::Value
|
// for (auto& src : sources_)
|
||||||
toJson() const
|
// {
|
||||||
{
|
// ret.append(src->toJson());
|
||||||
Json::Value ret(Json::arrayValue);
|
// }
|
||||||
for (auto& src : sources_)
|
// return ret;
|
||||||
{
|
// }
|
||||||
ret.append(src->toJson());
|
//
|
||||||
}
|
// /// Randomly select a p2p node to forward a gRPC request to
|
||||||
return ret;
|
// /// @return gRPC stub to forward requests to p2p node
|
||||||
}
|
// std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||||
|
// getP2pForwardingStub() const;
|
||||||
/// Randomly select a p2p node to forward a gRPC request to
|
//
|
||||||
/// @return gRPC stub to forward requests to p2p node
|
// /// Forward a JSON RPC request to a randomly selected p2p node
|
||||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
// /// @param context context of the request
|
||||||
getP2pForwardingStub() const;
|
// /// @return response received from p2p node
|
||||||
|
// Json::Value
|
||||||
/// Forward a JSON RPC request to a randomly selected p2p node
|
// forwardToP2p(RPC::JsonContext& context) const;
|
||||||
/// @param context context of the request
|
|
||||||
/// @return response received from p2p node
|
|
||||||
Json::Value
|
|
||||||
forwardToP2p(RPC::JsonContext& context) const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// f is a function that takes an ETLSource as an argument and returns a
|
/// f is a function that takes an ETLSource as an argument and returns a
|
||||||
@@ -393,6 +374,4 @@ private:
|
|||||||
bool
|
bool
|
||||||
execute(Func f, uint32_t ledgerSequence);
|
execute(Func f, uint32_t ledgerSequence);
|
||||||
};
|
};
|
||||||
*/
|
|
||||||
} // namespace ripple
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
147
reporting/ReportingBackend.cpp
Normal file
147
reporting/ReportingBackend.cpp
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
#include <reporting/ReportingBackend.h>
|
||||||
|
// Process the result of an asynchronous write. Retry on error
|
||||||
|
// @param fut cassandra future associated with the write
|
||||||
|
// @param cbData struct that holds the request parameters
|
||||||
|
void
|
||||||
|
flatMapWriteCallback(CassFuture* fut, void* cbData)
|
||||||
|
{
|
||||||
|
CassandraFlatMapBackend::WriteCallbackData& requestParams =
|
||||||
|
*static_cast<CassandraFlatMapBackend::WriteCallbackData*>(cbData);
|
||||||
|
CassandraFlatMapBackend& backend = *requestParams.backend;
|
||||||
|
auto rc = cass_future_error_code(fut);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "ERROR!!! Cassandra insert error: " << rc << ", "
|
||||||
|
<< cass_error_desc(rc) << ", retrying ";
|
||||||
|
// exponential backoff with a max wait of 2^10 ms (about 1 second)
|
||||||
|
auto wait = std::chrono::milliseconds(
|
||||||
|
lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
|
||||||
|
++requestParams.currentRetries;
|
||||||
|
std::shared_ptr<boost::asio::steady_timer> timer =
|
||||||
|
std::make_shared<boost::asio::steady_timer>(
|
||||||
|
backend.ioContext_, std::chrono::steady_clock::now() + wait);
|
||||||
|
timer->async_wait([timer, &requestParams, &backend](
|
||||||
|
const boost::system::error_code& error) {
|
||||||
|
backend.write(requestParams, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
--(backend.numRequestsOutstanding_);
|
||||||
|
|
||||||
|
backend.throttleCv_.notify_all();
|
||||||
|
if (backend.numRequestsOutstanding_ == 0)
|
||||||
|
backend.syncCv_.notify_all();
|
||||||
|
delete &requestParams;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void
|
||||||
|
flatMapWriteTransactionCallback(CassFuture* fut, void* cbData)
|
||||||
|
{
|
||||||
|
CassandraFlatMapBackend::WriteTransactionCallbackData& requestParams =
|
||||||
|
*static_cast<CassandraFlatMapBackend::WriteTransactionCallbackData*>(
|
||||||
|
cbData);
|
||||||
|
CassandraFlatMapBackend& backend = *requestParams.backend;
|
||||||
|
auto rc = cass_future_error_code(fut);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "ERROR!!! Cassandra insert error: " << rc << ", "
|
||||||
|
<< cass_error_desc(rc) << ", retrying ";
|
||||||
|
// exponential backoff with a max wait of 2^10 ms (about 1 second)
|
||||||
|
auto wait = std::chrono::milliseconds(
|
||||||
|
lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
|
||||||
|
++requestParams.currentRetries;
|
||||||
|
std::shared_ptr<boost::asio::steady_timer> timer =
|
||||||
|
std::make_shared<boost::asio::steady_timer>(
|
||||||
|
backend.ioContext_, std::chrono::steady_clock::now() + wait);
|
||||||
|
timer->async_wait([timer, &requestParams, &backend](
|
||||||
|
const boost::system::error_code& error) {
|
||||||
|
backend.writeTransaction(requestParams, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
--(backend.numRequestsOutstanding_);
|
||||||
|
|
||||||
|
backend.throttleCv_.notify_all();
|
||||||
|
if (backend.numRequestsOutstanding_ == 0)
|
||||||
|
backend.syncCv_.notify_all();
|
||||||
|
delete &requestParams;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process the result of an asynchronous read. Retry on error
|
||||||
|
// @param fut cassandra future associated with the read
|
||||||
|
// @param cbData struct that holds the request parameters
|
||||||
|
void
|
||||||
|
flatMapReadCallback(CassFuture* fut, void* cbData)
|
||||||
|
{
|
||||||
|
CassandraFlatMapBackend::ReadCallbackData& requestParams =
|
||||||
|
*static_cast<CassandraFlatMapBackend::ReadCallbackData*>(cbData);
|
||||||
|
|
||||||
|
CassError rc = cass_future_error_code(fut);
|
||||||
|
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning) << "Cassandra fetch error : " << rc << " : "
|
||||||
|
<< cass_error_desc(rc) << " - retrying";
|
||||||
|
// Retry right away. The only time the cluster should ever be overloaded
|
||||||
|
// is when the very first ledger is being written in full (millions of
|
||||||
|
// writes at once), during which no reads should be occurring. If reads
|
||||||
|
// are timing out, the code/architecture should be modified to handle
|
||||||
|
// greater read load, as opposed to just exponential backoff
|
||||||
|
requestParams.backend.read(requestParams);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto finish = [&requestParams]() {
|
||||||
|
size_t batchSize = requestParams.batchSize;
|
||||||
|
if (++(requestParams.numFinished) == batchSize)
|
||||||
|
requestParams.cv.notify_all();
|
||||||
|
};
|
||||||
|
CassResult const* res = cass_future_get_result(fut);
|
||||||
|
|
||||||
|
CassRow const* row = cass_result_first_row(res);
|
||||||
|
if (!row)
|
||||||
|
{
|
||||||
|
cass_result_free(res);
|
||||||
|
BOOST_LOG_TRIVIAL(error) << "Cassandra fetch get row error : " << rc
|
||||||
|
<< ", " << cass_error_desc(rc);
|
||||||
|
finish();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cass_byte_t const* buf;
|
||||||
|
std::size_t bufSize;
|
||||||
|
rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
cass_result_free(res);
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "Cassandra fetch get bytes error : " << rc << ", "
|
||||||
|
<< cass_error_desc(rc);
|
||||||
|
finish();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<unsigned char> txn{buf, buf + bufSize};
|
||||||
|
cass_byte_t const* buf2;
|
||||||
|
std::size_t buf2Size;
|
||||||
|
rc =
|
||||||
|
cass_value_get_bytes(cass_row_get_column(row, 1), &buf2, &buf2Size);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
cass_result_free(res);
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "Cassandra fetch get bytes error : " << rc << ", "
|
||||||
|
<< cass_error_desc(rc);
|
||||||
|
finish();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
std::vector<unsigned char> meta{buf2, buf2 + buf2Size};
|
||||||
|
requestParams.result = std::make_pair(std::move(txn), std::move(meta));
|
||||||
|
cass_result_free(res);
|
||||||
|
finish();
|
||||||
|
}
|
||||||
|
}
|
||||||
1156
reporting/ReportingBackend.h
Normal file
1156
reporting/ReportingBackend.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -24,6 +24,8 @@
|
|||||||
#include <functional>
|
#include <functional>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <reporting/ETLSource.h>
|
||||||
|
#include <reporting/ReportingBackend.h>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
@@ -267,6 +269,26 @@ main(int argc, char* argv[])
|
|||||||
auto const port = static_cast<unsigned short>(std::atoi(argv[2]));
|
auto const port = static_cast<unsigned short>(std::atoi(argv[2]));
|
||||||
auto const threads = std::max<int>(1, std::atoi(argv[3]));
|
auto const threads = std::max<int>(1, std::atoi(argv[3]));
|
||||||
auto const config = parse_config(argv[4]);
|
auto const config = parse_config(argv[4]);
|
||||||
|
if (!config)
|
||||||
|
{
|
||||||
|
std::cerr << "couldnt parse config. Exiting..." << std::endl;
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
auto cassConfig =
|
||||||
|
(*config).at("database").as_object().at("cassandra").as_object();
|
||||||
|
std::cout << cassConfig << std::endl;
|
||||||
|
|
||||||
|
CassandraFlatMapBackend backend{cassConfig};
|
||||||
|
backend.open();
|
||||||
|
boost::json::array sources = (*config).at("etl_sources").as_array();
|
||||||
|
if (!sources.size())
|
||||||
|
{
|
||||||
|
std::cerr << "no etl sources listed in config. exiting..." << std::endl;
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
ETLSource source{sources[0].as_object(), backend};
|
||||||
|
source.start();
|
||||||
|
// source.loadInitialLedger(60000000);
|
||||||
|
|
||||||
// The io_context is required for all I/O
|
// The io_context is required for all I/O
|
||||||
net::io_context ioc{threads};
|
net::io_context ioc{threads};
|
||||||
|
|||||||
Reference in New Issue
Block a user