mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-27 23:25:53 +00:00
Add database timeout exception. clear out incomplete cassandra data. add ledger_entry rpc
This commit is contained in:
@@ -68,7 +68,8 @@ target_sources(reporting PRIVATE
|
|||||||
handlers/LedgerData.cpp
|
handlers/LedgerData.cpp
|
||||||
handlers/BookOffers.cpp
|
handlers/BookOffers.cpp
|
||||||
handlers/LedgerRange.cpp
|
handlers/LedgerRange.cpp
|
||||||
handlers/Ledger.cpp)
|
handlers/Ledger.cpp
|
||||||
|
handlers/LedgerEntry.cpp)
|
||||||
|
|
||||||
|
|
||||||
message(${Boost_LIBRARIES})
|
message(${Boost_LIBRARIES})
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
#include <boost/json.hpp>
|
#include <boost/json.hpp>
|
||||||
#include <handlers/RPCHelpers.h>
|
#include <handlers/RPCHelpers.h>
|
||||||
#include <reporting/BackendInterface.h>
|
#include <reporting/BackendInterface.h>
|
||||||
#include <reporting/Pg.h>
|
|
||||||
|
|
||||||
// {
|
// {
|
||||||
// account: <ident>,
|
// account: <ident>,
|
||||||
|
|||||||
@@ -445,6 +445,22 @@ CassandraBackend::open()
|
|||||||
|
|
||||||
cass_cluster_set_connect_timeout(cluster, 10000);
|
cass_cluster_set_connect_timeout(cluster, 10000);
|
||||||
|
|
||||||
|
auto executeSimpleStatement = [this](std::string const& query) {
|
||||||
|
CassStatement* statement = makeStatement(query.c_str(), 0);
|
||||||
|
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
||||||
|
CassError rc = cass_future_error_code(fut);
|
||||||
|
cass_future_free(fut);
|
||||||
|
cass_statement_free(statement);
|
||||||
|
if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY)
|
||||||
|
{
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "nodestore: Error executing simple statement: " << rc << ", "
|
||||||
|
<< cass_error_desc(rc) << " - " << query;
|
||||||
|
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
CassStatement* statement;
|
CassStatement* statement;
|
||||||
CassFuture* fut;
|
CassFuture* fut;
|
||||||
bool setupSessionAndTable = false;
|
bool setupSessionAndTable = false;
|
||||||
@@ -467,23 +483,6 @@ CassandraBackend::open()
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto executeSimpleStatement = [this](std::string const& query) {
|
|
||||||
CassStatement* statement = makeStatement(query.c_str(), 0);
|
|
||||||
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
|
||||||
CassError rc = cass_future_error_code(fut);
|
|
||||||
cass_future_free(fut);
|
|
||||||
cass_statement_free(statement);
|
|
||||||
if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY)
|
|
||||||
{
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "nodestore: Error executing simple statement: " << rc
|
|
||||||
<< ", " << cass_error_desc(rc) << " - " << query;
|
|
||||||
BOOST_LOG_TRIVIAL(error) << ss.str();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::stringstream query;
|
std::stringstream query;
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "objects"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "objects"
|
||||||
<< " ( key blob, sequence bigint, object blob, PRIMARY "
|
<< " ( key blob, sequence bigint, object blob, PRIMARY "
|
||||||
@@ -497,6 +496,18 @@ CassandraBackend::open()
|
|||||||
<< " LIMIT 1";
|
<< " LIMIT 1";
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
query = {};
|
||||||
|
query << "CREATE INDEX ON " << tablePrefix << "objects(sequence)";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
query = {};
|
||||||
|
query << "SELECT * FROM " << tablePrefix << "objects WHERE sequence=1"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
query = {};
|
query = {};
|
||||||
query
|
query
|
||||||
<< "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
<< "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
||||||
@@ -505,17 +516,24 @@ CassandraBackend::open()
|
|||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
query = {};
|
|
||||||
query << "CREATE INDEX ON " << tablePrefix
|
|
||||||
<< "transactions(ledger_sequence)";
|
|
||||||
if (!executeSimpleStatement(query.str()))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
query = {};
|
query = {};
|
||||||
query << "SELECT * FROM " << tablePrefix << "transactions"
|
query << "SELECT * FROM " << tablePrefix << "transactions"
|
||||||
<< " LIMIT 1";
|
<< " LIMIT 1";
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
query = {};
|
||||||
|
query << "CREATE INDEX ON " << tablePrefix
|
||||||
|
<< "transactions(ledger_sequence)";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "SELECT * FROM " << tablePrefix
|
||||||
|
<< "transactions WHERE ledger_sequence = 1"
|
||||||
|
<< " LIMIT 1";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
|
||||||
query = {};
|
query = {};
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "keys"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "keys"
|
||||||
<< " ( key blob, created bigint, deleted bigint, PRIMARY KEY "
|
<< " ( key blob, created bigint, deleted bigint, PRIMARY KEY "
|
||||||
@@ -763,6 +781,43 @@ CassandraBackend::open()
|
|||||||
setupPreparedStatements = true;
|
setupPreparedStatements = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
|
if (!fetchLatestLedgerSequence())
|
||||||
|
{
|
||||||
|
std::stringstream query;
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "ledger_range";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "ledgers";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "ledger_hashes";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "objects";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "transactions";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "account_tx";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "TRUNCATE TABLE " << tablePrefix << "books";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (config_.contains("max_requests_outstanding"))
|
if (config_.contains("max_requests_outstanding"))
|
||||||
{
|
{
|
||||||
maxRequestsOutstanding = config_["max_requests_outstanding"].as_int64();
|
maxRequestsOutstanding = config_["max_requests_outstanding"].as_int64();
|
||||||
|
|||||||
@@ -461,11 +461,23 @@ public:
|
|||||||
cass_result_free(result_);
|
cass_result_free(result_);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
inline bool
|
||||||
|
isTimeout(CassError rc)
|
||||||
|
{
|
||||||
|
if (rc == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or
|
||||||
|
rc == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||||
|
rc == CASS_ERROR_SERVER_UNAVAILABLE or
|
||||||
|
rc == CASS_ERROR_SERVER_OVERLOADED or
|
||||||
|
rc == CASS_ERROR_SERVER_READ_TIMEOUT)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
template <class T, class F>
|
template <class T, class F>
|
||||||
class CassandraAsyncResult
|
class CassandraAsyncResult
|
||||||
{
|
{
|
||||||
T& requestParams_;
|
T& requestParams_;
|
||||||
CassandraResult result_;
|
CassandraResult result_;
|
||||||
|
bool timedOut_ = false;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CassandraAsyncResult(T& requestParams, CassFuture* fut, F retry)
|
CassandraAsyncResult(T& requestParams, CassFuture* fut, F retry)
|
||||||
@@ -474,7 +486,14 @@ public:
|
|||||||
CassError rc = cass_future_error_code(fut);
|
CassError rc = cass_future_error_code(fut);
|
||||||
if (rc != CASS_OK)
|
if (rc != CASS_OK)
|
||||||
{
|
{
|
||||||
retry(requestParams_);
|
// TODO - should we ever be retrying requests? These are reads,
|
||||||
|
// and they usually only fail when the db is under heavy load. Seems
|
||||||
|
// best to just return an error to the client and have the client
|
||||||
|
// try again
|
||||||
|
if (isTimeout(rc))
|
||||||
|
timedOut_ = true;
|
||||||
|
else
|
||||||
|
retry(requestParams_);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -484,7 +503,7 @@ public:
|
|||||||
|
|
||||||
~CassandraAsyncResult()
|
~CassandraAsyncResult()
|
||||||
{
|
{
|
||||||
if (!!result_)
|
if (!!result_ or timedOut_)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace) << "finished a request";
|
BOOST_LOG_TRIVIAL(trace) << "finished a request";
|
||||||
size_t batchSize = requestParams_.batchSize;
|
size_t batchSize = requestParams_.batchSize;
|
||||||
@@ -498,6 +517,12 @@ public:
|
|||||||
{
|
{
|
||||||
return result_;
|
return result_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
timedOut()
|
||||||
|
{
|
||||||
|
return timedOut_;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class CassandraBackend : public BackendInterface
|
class CassandraBackend : public BackendInterface
|
||||||
@@ -1093,6 +1118,11 @@ public:
|
|||||||
cv.wait(lck, [&numFinished, &numHashes]() {
|
cv.wait(lck, [&numFinished, &numHashes]() {
|
||||||
return numFinished == numHashes;
|
return numFinished == numHashes;
|
||||||
});
|
});
|
||||||
|
for (auto const& res : results)
|
||||||
|
{
|
||||||
|
if (res.transaction.size() == 0)
|
||||||
|
throw DatabaseTimeout();
|
||||||
|
}
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< "Fetched " << numHashes << " transactions from Cassandra";
|
<< "Fetched " << numHashes << " transactions from Cassandra";
|
||||||
@@ -1169,6 +1199,11 @@ public:
|
|||||||
std::unique_lock<std::mutex> lck(mtx);
|
std::unique_lock<std::mutex> lck(mtx);
|
||||||
cv.wait(
|
cv.wait(
|
||||||
lck, [&numFinished, &numKeys]() { return numFinished == numKeys; });
|
lck, [&numFinished, &numKeys]() { return numFinished == numKeys; });
|
||||||
|
for (auto const& res : results)
|
||||||
|
{
|
||||||
|
if (res.size() == 0)
|
||||||
|
throw DatabaseTimeout();
|
||||||
|
}
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(trace)
|
||||||
<< "Fetched " << numKeys << " records from Cassandra";
|
<< "Fetched " << numKeys << " records from Cassandra";
|
||||||
@@ -1564,6 +1599,7 @@ public:
|
|||||||
ss << ", retrying";
|
ss << ", retrying";
|
||||||
ss << ": " << cass_error_desc(rc);
|
ss << ": " << cass_error_desc(rc);
|
||||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||||
}
|
}
|
||||||
} while (rc != CASS_OK);
|
} while (rc != CASS_OK);
|
||||||
cass_future_free(fut);
|
cass_future_free(fut);
|
||||||
@@ -1585,6 +1621,7 @@ public:
|
|||||||
ss << ", retrying";
|
ss << ", retrying";
|
||||||
ss << ": " << cass_error_desc(rc);
|
ss << ": " << cass_error_desc(rc);
|
||||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||||
}
|
}
|
||||||
} while (rc != CASS_OK);
|
} while (rc != CASS_OK);
|
||||||
CassResult const* res = cass_future_get_result(fut);
|
CassResult const* res = cass_future_get_result(fut);
|
||||||
@@ -1628,6 +1665,12 @@ public:
|
|||||||
ss << ": " << cass_error_desc(rc);
|
ss << ": " << cass_error_desc(rc);
|
||||||
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
BOOST_LOG_TRIVIAL(warning) << ss.str();
|
||||||
}
|
}
|
||||||
|
if (isTimeout(rc))
|
||||||
|
{
|
||||||
|
cass_future_free(fut);
|
||||||
|
throw DatabaseTimeout();
|
||||||
|
}
|
||||||
|
|
||||||
if (rc == CASS_ERROR_SERVER_INVALID_QUERY)
|
if (rc == CASS_ERROR_SERVER_INVALID_QUERY)
|
||||||
{
|
{
|
||||||
throw std::runtime_error("invalid query");
|
throw std::runtime_error("invalid query");
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
namespace Backend {
|
namespace Backend {
|
||||||
|
|
||||||
PostgresBackend::PostgresBackend(boost::json::object const& config)
|
PostgresBackend::PostgresBackend(boost::json::object const& config)
|
||||||
: pgPool_(make_PgPool(config))
|
: pgPool_(make_PgPool(config)), writeConnection_(pgPool_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
void
|
void
|
||||||
@@ -12,13 +12,29 @@ PostgresBackend::writeLedger(
|
|||||||
std::string&& ledgerHeader,
|
std::string&& ledgerHeader,
|
||||||
bool isFirst) const
|
bool isFirst) const
|
||||||
{
|
{
|
||||||
ledgerHeader_ = ledgerInfo;
|
auto cmd = boost::format(
|
||||||
|
R"(INSERT INTO ledgers
|
||||||
|
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
|
||||||
|
|
||||||
|
auto ledgerInsert = boost::str(
|
||||||
|
cmd % ledgerInfo.seq % ripple::strHex(ledgerInfo.hash) %
|
||||||
|
ripple::strHex(ledgerInfo.parentHash) % ledgerInfo.drops.drops() %
|
||||||
|
ledgerInfo.closeTime.time_since_epoch().count() %
|
||||||
|
ledgerInfo.parentCloseTime.time_since_epoch().count() %
|
||||||
|
ledgerInfo.closeTimeResolution.count() % ledgerInfo.closeFlags %
|
||||||
|
ripple::strHex(ledgerInfo.accountHash) %
|
||||||
|
ripple::strHex(ledgerInfo.txHash));
|
||||||
|
|
||||||
|
auto res = writeConnection_(ledgerInsert.data());
|
||||||
|
abortWrite_ = !res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
PostgresBackend::writeAccountTransactions(
|
PostgresBackend::writeAccountTransactions(
|
||||||
std::vector<AccountTransactionsData>&& data) const
|
std::vector<AccountTransactionsData>&& data) const
|
||||||
{
|
{
|
||||||
|
if (abortWrite_)
|
||||||
|
return;
|
||||||
PgQuery pg(pgPool_);
|
PgQuery pg(pgPool_);
|
||||||
for (auto const& record : data)
|
for (auto const& record : data)
|
||||||
{
|
{
|
||||||
@@ -42,6 +58,8 @@ PostgresBackend::writeLedgerObject(
|
|||||||
bool isDeleted,
|
bool isDeleted,
|
||||||
std::optional<ripple::uint256>&& book) const
|
std::optional<ripple::uint256>&& book) const
|
||||||
{
|
{
|
||||||
|
if (abortWrite_)
|
||||||
|
return;
|
||||||
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||||
<< ripple::strHex(blob) << '\n';
|
<< ripple::strHex(blob) << '\n';
|
||||||
@@ -50,8 +68,7 @@ PostgresBackend::writeLedgerObject(
|
|||||||
// insert after 1 million records
|
// insert after 1 million records
|
||||||
if (numRowsInObjectsBuffer_ % 1000000 == 0)
|
if (numRowsInObjectsBuffer_ % 1000000 == 0)
|
||||||
{
|
{
|
||||||
PgQuery pgQuery(pgPool_);
|
writeConnection_.bulkInsert("objects", objectsBuffer_.str());
|
||||||
pgQuery.bulkInsert("objects", objectsBuffer_.str());
|
|
||||||
objectsBuffer_ = {};
|
objectsBuffer_ = {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,6 +87,8 @@ PostgresBackend::writeTransaction(
|
|||||||
std::string&& transaction,
|
std::string&& transaction,
|
||||||
std::string&& metadata) const
|
std::string&& metadata) const
|
||||||
{
|
{
|
||||||
|
if (abortWrite_)
|
||||||
|
return;
|
||||||
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
|
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
|
||||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||||
<< ripple::strHex(transaction) << '\t' << "\\\\x"
|
<< ripple::strHex(transaction) << '\t' << "\\\\x"
|
||||||
@@ -226,7 +245,9 @@ PostgresBackend::fetchLedgerObject(
|
|||||||
auto res = pgQuery(sql.str().data());
|
auto res = pgQuery(sql.str().data());
|
||||||
if (checkResult(res, 1))
|
if (checkResult(res, 1))
|
||||||
{
|
{
|
||||||
return res.asUnHexedBlob(0, 0);
|
auto blob = res.asUnHexedBlob(0, 0);
|
||||||
|
if (blob.size())
|
||||||
|
return blob;
|
||||||
}
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
@@ -286,7 +307,7 @@ PostgresBackend::fetchAllTransactionHashesInLedger(
|
|||||||
sql << "SELECT hash FROM transactions WHERE "
|
sql << "SELECT hash FROM transactions WHERE "
|
||||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||||
auto res = pgQuery(sql.str().data());
|
auto res = pgQuery(sql.str().data());
|
||||||
if (size_t numRows = checkResult(res, 3))
|
if (size_t numRows = checkResult(res, 1))
|
||||||
{
|
{
|
||||||
std::vector<ripple::uint256> hashes;
|
std::vector<ripple::uint256> hashes;
|
||||||
for (size_t i = 0; i < numRows; ++i)
|
for (size_t i = 0; i < numRows; ++i)
|
||||||
@@ -510,43 +531,30 @@ void
|
|||||||
PostgresBackend::startWrites() const
|
PostgresBackend::startWrites() const
|
||||||
{
|
{
|
||||||
numRowsInObjectsBuffer_ = 0;
|
numRowsInObjectsBuffer_ = 0;
|
||||||
}
|
abortWrite_ = false;
|
||||||
|
auto res = writeConnection_("BEGIN");
|
||||||
bool
|
|
||||||
PostgresBackend::finishWrites() const
|
|
||||||
{
|
|
||||||
PgQuery pg(pgPool_);
|
|
||||||
auto res = pg("BEGIN");
|
|
||||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||||
{
|
{
|
||||||
std::stringstream msg;
|
std::stringstream msg;
|
||||||
msg << "Postgres error creating transaction: " << res.msg();
|
msg << "Postgres error creating transaction: " << res.msg();
|
||||||
throw std::runtime_error(msg.str());
|
throw std::runtime_error(msg.str());
|
||||||
}
|
}
|
||||||
auto cmd = boost::format(
|
}
|
||||||
R"(INSERT INTO ledgers
|
|
||||||
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
|
|
||||||
|
|
||||||
auto ledgerInsert = boost::str(
|
bool
|
||||||
cmd % ledgerHeader_.seq % ripple::strHex(ledgerHeader_.hash) %
|
PostgresBackend::finishWrites() const
|
||||||
ripple::strHex(ledgerHeader_.parentHash) % ledgerHeader_.drops.drops() %
|
{
|
||||||
ledgerHeader_.closeTime.time_since_epoch().count() %
|
if (!abortWrite_)
|
||||||
ledgerHeader_.parentCloseTime.time_since_epoch().count() %
|
|
||||||
ledgerHeader_.closeTimeResolution.count() % ledgerHeader_.closeFlags %
|
|
||||||
ripple::strHex(ledgerHeader_.accountHash) %
|
|
||||||
ripple::strHex(ledgerHeader_.txHash));
|
|
||||||
|
|
||||||
res = pg(ledgerInsert.data());
|
|
||||||
if (res)
|
|
||||||
{
|
{
|
||||||
pg.bulkInsert("transactions", transactionsBuffer_.str());
|
writeConnection_.bulkInsert("transactions", transactionsBuffer_.str());
|
||||||
pg.bulkInsert("books", booksBuffer_.str());
|
writeConnection_.bulkInsert("books", booksBuffer_.str());
|
||||||
pg.bulkInsert("account_transactions", accountTxBuffer_.str());
|
writeConnection_.bulkInsert(
|
||||||
|
"account_transactions", accountTxBuffer_.str());
|
||||||
std::string objectsStr = objectsBuffer_.str();
|
std::string objectsStr = objectsBuffer_.str();
|
||||||
if (objectsStr.size())
|
if (objectsStr.size())
|
||||||
pg.bulkInsert("objects", objectsStr);
|
writeConnection_.bulkInsert("objects", objectsStr);
|
||||||
}
|
}
|
||||||
res = pg("COMMIT");
|
auto res = writeConnection_("COMMIT");
|
||||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||||
{
|
{
|
||||||
std::stringstream msg;
|
std::stringstream msg;
|
||||||
|
|||||||
@@ -12,10 +12,11 @@ private:
|
|||||||
mutable std::stringstream transactionsBuffer_;
|
mutable std::stringstream transactionsBuffer_;
|
||||||
mutable std::stringstream booksBuffer_;
|
mutable std::stringstream booksBuffer_;
|
||||||
mutable std::stringstream accountTxBuffer_;
|
mutable std::stringstream accountTxBuffer_;
|
||||||
mutable ripple::LedgerInfo ledgerHeader_;
|
std::shared_ptr<PgPool> pgPool_;
|
||||||
|
mutable PgQuery writeConnection_;
|
||||||
|
mutable bool abortWrite_ = false;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
std::shared_ptr<PgPool> pgPool_;
|
|
||||||
PostgresBackend(boost::json::object const& config);
|
PostgresBackend(boost::json::object const& config);
|
||||||
|
|
||||||
std::optional<uint32_t>
|
std::optional<uint32_t>
|
||||||
|
|||||||
49
test.py
49
test.py
@@ -211,6 +211,16 @@ async def tx(ip, port, tx_hash, binary):
|
|||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
async def ledger_entry(ip, port, index, ledger, binary):
|
||||||
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
|
try:
|
||||||
|
async with websockets.connect(address) as ws:
|
||||||
|
await ws.send(json.dumps({"command":"ledger_entry","index":index,"binary":bool(binary),"ledger_index":int(ledger)}))
|
||||||
|
res = json.loads(await ws.recv())
|
||||||
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
async def ledger_data(ip, port, ledger, limit, binary):
|
async def ledger_data(ip, port, ledger, limit, binary):
|
||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
@@ -219,9 +229,30 @@ async def ledger_data(ip, port, ledger, limit, binary):
|
|||||||
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary)}))
|
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary)}))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
print(json.dumps(res,indent=4,sort_keys=True))
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
|
objects = []
|
||||||
|
blobs = []
|
||||||
|
keys = []
|
||||||
|
if "result" in res:
|
||||||
|
objects = res["result"]["state"]
|
||||||
|
else:
|
||||||
|
objects = res["objects"]
|
||||||
|
for x in objects:
|
||||||
|
blobs.append(x["data"])
|
||||||
|
keys.append(x["index"])
|
||||||
|
return (keys,blobs)
|
||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
def writeLedgerData(data,filename):
|
||||||
|
print(len(data[0]))
|
||||||
|
with open(filename,'w') as f:
|
||||||
|
data[0].sort()
|
||||||
|
data[1].sort()
|
||||||
|
for k,v in zip(data[0],data[1]):
|
||||||
|
f.write(k)
|
||||||
|
f.write('\n')
|
||||||
|
f.write(v)
|
||||||
|
f.write('\n')
|
||||||
|
|
||||||
|
|
||||||
async def ledger_data_full(ip, port, ledger, binary, limit):
|
async def ledger_data_full(ip, port, ledger, binary, limit):
|
||||||
@@ -239,7 +270,7 @@ async def ledger_data_full(ip, port, ledger, binary, limit):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"cursor":marker, "binary":bool(binary), "limit":int(limit)}))
|
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"cursor":marker, "marker":marker,"binary":bool(binary), "limit":int(limit)}))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
|
|
||||||
if "error" in res:
|
if "error" in res:
|
||||||
@@ -372,7 +403,7 @@ async def ledger_range(ip, port):
|
|||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
||||||
parser.add_argument('action', choices=["account_info", "tx", "account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range"])
|
parser.add_argument('action', choices=["account_info", "tx", "account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry"])
|
||||||
parser.add_argument('--ip', default='127.0.0.1')
|
parser.add_argument('--ip', default='127.0.0.1')
|
||||||
parser.add_argument('--port', default='8080')
|
parser.add_argument('--port', default='8080')
|
||||||
parser.add_argument('--hash')
|
parser.add_argument('--hash')
|
||||||
@@ -391,6 +422,8 @@ parser.add_argument('--expand',default=False)
|
|||||||
parser.add_argument('--transactions',default=False)
|
parser.add_argument('--transactions',default=False)
|
||||||
parser.add_argument('--minLedger',default=-1)
|
parser.add_argument('--minLedger',default=-1)
|
||||||
parser.add_argument('--maxLedger',default=-1)
|
parser.add_argument('--maxLedger',default=-1)
|
||||||
|
parser.add_argument('--filename')
|
||||||
|
parser.add_argument('--index')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -408,6 +441,9 @@ def run(args):
|
|||||||
res2 = asyncio.get_event_loop().run_until_complete(
|
res2 = asyncio.get_event_loop().run_until_complete(
|
||||||
account_info(args.p2pIp, args.p2pPort, args.account, args.ledger, args.binary))
|
account_info(args.p2pIp, args.p2pPort, args.account, args.ledger, args.binary))
|
||||||
print(compareAccountInfo(res1,res2))
|
print(compareAccountInfo(res1,res2))
|
||||||
|
elif args.action == "ledger_entry":
|
||||||
|
asyncio.get_event_loop().run_until_complete(
|
||||||
|
ledger_entry(args.ip, args.port, args.index, args.ledger, args.binary))
|
||||||
elif args.action == "tx":
|
elif args.action == "tx":
|
||||||
if args.hash is None:
|
if args.hash is None:
|
||||||
args.hash = getHashes(asyncio.get_event_loop().run_until_complete(ledger(args.ip,args.port,args.ledger,False,True,False)))[0]
|
args.hash = getHashes(asyncio.get_event_loop().run_until_complete(ledger(args.ip,args.port,args.ledger,False,True,False)))[0]
|
||||||
@@ -430,11 +466,16 @@ def run(args):
|
|||||||
|
|
||||||
print(compareAccountTx(res,res2))
|
print(compareAccountTx(res,res2))
|
||||||
elif args.action == "ledger_data":
|
elif args.action == "ledger_data":
|
||||||
asyncio.get_event_loop().run_until_complete(
|
res = asyncio.get_event_loop().run_until_complete(
|
||||||
ledger_data(args.ip, args.port, args.ledger, args.limit, args.binary))
|
ledger_data(args.ip, args.port, args.ledger, args.limit, args.binary))
|
||||||
|
if args.verify:
|
||||||
|
writeLedgerData(res,args.filename)
|
||||||
elif args.action == "ledger_data_full":
|
elif args.action == "ledger_data_full":
|
||||||
asyncio.get_event_loop().run_until_complete(
|
res = asyncio.get_event_loop().run_until_complete(
|
||||||
ledger_data_full(args.ip, args.port, args.ledger, args.binary, args.limit))
|
ledger_data_full(args.ip, args.port, args.ledger, args.binary, args.limit))
|
||||||
|
if args.verify:
|
||||||
|
writeLedgerData(res,args.filename)
|
||||||
|
|
||||||
elif args.action == "ledger":
|
elif args.action == "ledger":
|
||||||
|
|
||||||
res = asyncio.get_event_loop().run_until_complete(
|
res = asyncio.get_event_loop().run_until_complete(
|
||||||
|
|||||||
@@ -42,13 +42,15 @@ enum RPCCommand {
|
|||||||
account_info,
|
account_info,
|
||||||
ledger_data,
|
ledger_data,
|
||||||
book_offers,
|
book_offers,
|
||||||
ledger_range
|
ledger_range,
|
||||||
|
ledger_entry
|
||||||
};
|
};
|
||||||
std::unordered_map<std::string, RPCCommand> commandMap{
|
std::unordered_map<std::string, RPCCommand> commandMap{
|
||||||
{"tx", tx},
|
{"tx", tx},
|
||||||
{"account_tx", account_tx},
|
{"account_tx", account_tx},
|
||||||
{"ledger", ledger},
|
{"ledger", ledger},
|
||||||
{"ledger_range", ledger_range},
|
{"ledger_range", ledger_range},
|
||||||
|
{"ledger_entry", ledger_entry},
|
||||||
{"account_info", account_info},
|
{"account_info", account_info},
|
||||||
{"ledger_data", ledger_data},
|
{"ledger_data", ledger_data},
|
||||||
{"book_offers", book_offers}};
|
{"book_offers", book_offers}};
|
||||||
@@ -68,6 +70,10 @@ doLedgerData(
|
|||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
BackendInterface const& backend);
|
BackendInterface const& backend);
|
||||||
boost::json::object
|
boost::json::object
|
||||||
|
doLedgerEntry(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
doBookOffers(
|
doBookOffers(
|
||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
BackendInterface const& backend);
|
BackendInterface const& backend);
|
||||||
@@ -97,6 +103,9 @@ buildResponse(
|
|||||||
case ledger:
|
case ledger:
|
||||||
return doLedger(request, backend);
|
return doLedger(request, backend);
|
||||||
break;
|
break;
|
||||||
|
case ledger_entry:
|
||||||
|
return doLedgerEntry(request, backend);
|
||||||
|
break;
|
||||||
case ledger_range:
|
case ledger_range:
|
||||||
return doLedgerRange(request, backend);
|
return doLedgerRange(request, backend);
|
||||||
break;
|
break;
|
||||||
@@ -207,19 +216,27 @@ public:
|
|||||||
std::string msg{
|
std::string msg{
|
||||||
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
|
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
|
||||||
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
|
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
|
||||||
boost::json::value raw = boost::json::parse(msg);
|
|
||||||
// BOOST_LOG_TRIVIAL(debug) << __func__ << " parsed";
|
|
||||||
boost::json::object request = raw.as_object();
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
|
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
response = buildResponse(request, backend_);
|
boost::json::value raw = boost::json::parse(msg);
|
||||||
|
boost::json::object request = raw.as_object();
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
response = buildResponse(request, backend_);
|
||||||
|
}
|
||||||
|
catch (Backend::DatabaseTimeout const& t)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
|
||||||
|
response["error"] =
|
||||||
|
"Database read timeout. Please retry the request";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (Backend::DatabaseTimeout const& t)
|
catch (std::exception const& e)
|
||||||
{
|
{
|
||||||
response["error"] =
|
BOOST_LOG_TRIVIAL(error)
|
||||||
"Database read timeout. Please retry the request";
|
<< __func__ << "caught exception : " << e.what();
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
|
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
|
||||||
response_ = boost::json::serialize(response);
|
response_ = boost::json::serialize(response);
|
||||||
|
|||||||
Reference in New Issue
Block a user