bug fixes in both backends. add ledger_range rpc. improve test scripts

This commit is contained in:
CJ Cobb
2021-03-05 16:46:55 -05:00
parent 7a6dfe5967
commit e3a121e571
13 changed files with 179 additions and 51 deletions

View File

@@ -67,6 +67,7 @@ target_sources(reporting PRIVATE
handlers/AccountTx.cpp handlers/AccountTx.cpp
handlers/LedgerData.cpp handlers/LedgerData.cpp
handlers/BookOffers.cpp handlers/BookOffers.cpp
handlers/LedgerRange.cpp
handlers/Ledger.cpp) handlers/Ledger.cpp)

View File

@@ -169,7 +169,10 @@ doAccountTx(boost::json::object const& request, BackendInterface const& backend)
cursor = {*ledgerSequence, *transactionIndex}; cursor = {*ledgerSequence, *transactionIndex};
} }
constexpr uint32_t limit = 500; uint32_t limit = 200;
if (request.contains("limit") and
request.at("limit").kind() == boost::json::kind::int64)
limit = request.at("limit").as_int64();
boost::json::array txns; boost::json::array txns;
auto [blobs, retCursor] = auto [blobs, retCursor] =
backend.fetchAccountTransactions(*account, limit, cursor); backend.fetchAccountTransactions(*account, limit, cursor);

View File

@@ -293,7 +293,7 @@ doBookOffers(
return response; return response;
} }
std::uint32_t limit = 500; std::uint32_t limit = 200;
if (request.contains("limit") and if (request.contains("limit") and
request.at("limit").kind() == boost::json::kind::int64) request.at("limit").kind() == boost::json::kind::int64)
limit = request.at("limit").as_int64(); limit = request.at("limit").as_int64();

View File

@@ -70,7 +70,7 @@ doLedgerData(
std::vector<Backend::LedgerObject>& results = page.objects; std::vector<Backend::LedgerObject>& results = page.objects;
std::optional<ripple::uint256> const& returnedCursor = page.cursor; std::optional<ripple::uint256> const& returnedCursor = page.cursor;
BOOST_LOG_TRIVIAL(debug) BOOST_LOG_TRIVIAL(debug)
<< "doUpperBound returned " << results.size() << " results"; << __func__ << " number of results = " << results.size();
for (auto const& [key, object] : results) for (auto const& [key, object] : results)
{ {
ripple::STLedgerEntry sle{ ripple::STLedgerEntry sle{

22
handlers/LedgerRange.cpp Normal file
View File

@@ -0,0 +1,22 @@
#include <handlers/RPCHelpers.h>
#include <reporting/BackendInterface.h>
boost::json::object
doLedgerRange(
boost::json::object const& request,
BackendInterface const& backend)
{
boost::json::object response;
auto range = backend.fetchLedgerRange();
if (!range)
{
response["error"] = "No data";
}
else
{
response["ledger_index_min"] = range->minSequence;
response["ledger_index_max"] = range->maxSequence;
}
return response;
}

View File

@@ -229,6 +229,7 @@ std::vector<TransactionAndMetadata>
CassandraBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const CassandraBackend::fetchAllTransactionsInLedger(uint32_t ledgerSequence) const
{ {
CassandraStatement statement{selectAllTransactionsInLedger_}; CassandraStatement statement{selectAllTransactionsInLedger_};
statement.bindInt(ledgerSequence);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
if (!result) if (!result)
{ {
@@ -351,7 +352,7 @@ CassandraBackend::open()
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
cass_cluster_set_request_timeout(cluster, 2000); cass_cluster_set_request_timeout(cluster, 10000);
rc = cass_cluster_set_queue_size_io( rc = cass_cluster_set_queue_size_io(
cluster, cluster,

View File

@@ -204,6 +204,27 @@ public:
curBindingIndex_++; curBindingIndex_++;
} }
void
bindUInt(uint32_t value)
{
if (!statement_)
throw std::runtime_error(
"CassandraStatement::bindUInt - statement_ is null");
BOOST_LOG_TRIVIAL(info)
<< std::to_string(curBindingIndex_) << " " << std::to_string(value);
CassError rc =
cass_statement_bind_int32(statement_, curBindingIndex_, value);
if (rc != CASS_OK)
{
std::stringstream ss;
ss << "Error binding uint to statement: " << rc << ", "
<< cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << __func__ << " : " << ss.str();
throw std::runtime_error(ss.str());
}
curBindingIndex_++;
}
void void
bindInt(uint32_t value) bindInt(uint32_t value)
{ {
@@ -278,11 +299,27 @@ class CassandraResult
size_t curGetIndex_ = 0; size_t curGetIndex_ = 0;
public: public:
CassandraResult() CassandraResult() : result_(nullptr), row_(nullptr), iter_(nullptr)
{ {
} }
CassandraResult&
operator=(CassandraResult&& other)
{
result_ = other.result_;
row_ = other.row_;
iter_ = other.iter_;
curGetIndex_ = other.curGetIndex_;
other.result_ = nullptr;
other.row_ = nullptr;
other.iter_ = nullptr;
other.curGetIndex_ = 0;
return *this;
}
CassandraResult(CassandraResult const& other) = delete; CassandraResult(CassandraResult const& other) = delete;
CassandraResult&
operator=(CassandraResult const& other) = delete;
CassandraResult(CassResult const* result) : result_(result) CassandraResult(CassResult const* result) : result_(result)
{ {
@@ -351,7 +388,7 @@ public:
getUInt256() getUInt256()
{ {
if (!row_) if (!row_)
throw std::runtime_error("CassandraResult::getBytes - no result"); throw std::runtime_error("CassandraResult::uint256 - no result");
cass_byte_t const* buf; cass_byte_t const* buf;
std::size_t bufSize; std::size_t bufSize;
CassError rc = cass_value_get_bytes( CassError rc = cass_value_get_bytes(
@@ -359,7 +396,7 @@ public:
if (rc != CASS_OK) if (rc != CASS_OK)
{ {
std::stringstream msg; std::stringstream msg;
msg << "CassandraResult::getBytes - error getting value: " << rc msg << "CassandraResult::getuint256 - error getting value: " << rc
<< ", " << cass_error_desc(rc); << ", " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(error) << msg.str(); BOOST_LOG_TRIVIAL(error) << msg.str();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
@@ -420,6 +457,7 @@ public:
~CassandraResult() ~CassandraResult()
{ {
if (result_ != nullptr)
cass_result_free(result_); cass_result_free(result_);
} }
}; };
@@ -440,7 +478,7 @@ public:
} }
else else
{ {
result_ = CassandraResult(cass_future_get_result(fut)); result_ = std::move(CassandraResult(cass_future_get_result(fut)));
} }
} }
@@ -531,7 +569,7 @@ private:
// maximum number of concurrent in flight requests. New requests will wait // maximum number of concurrent in flight requests. New requests will wait
// for earlier requests to finish if this limit is exceeded // for earlier requests to finish if this limit is exceeded
uint32_t maxRequestsOutstanding = 10000000; uint32_t maxRequestsOutstanding = 10000;
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0; mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
// mutex and condition_variable to limit the number of concurrent in flight // mutex and condition_variable to limit the number of concurrent in flight
@@ -606,7 +644,7 @@ public:
cursor->ledgerSequence, cursor->transactionIndex); cursor->ledgerSequence, cursor->transactionIndex);
else else
statement.bindIntTuple(INT32_MAX, INT32_MAX); statement.bindIntTuple(INT32_MAX, INT32_MAX);
statement.bindInt(limit); statement.bindUInt(limit);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
if (!result.hasResult()) if (!result.hasResult())
{ {
@@ -618,6 +656,8 @@ public:
size_t numRows = result.numRows(); size_t numRows = result.numRows();
bool returnCursor = numRows == limit; bool returnCursor = numRows == limit;
std::optional<AccountTransactionsCursor> retCursor; std::optional<AccountTransactionsCursor> retCursor;
BOOST_LOG_TRIVIAL(info) << "num_rows = " << std::to_string(numRows);
do do
{ {
hashes.push_back(result.getUInt256()); hashes.push_back(result.getUInt256());
@@ -706,6 +746,8 @@ public:
++numRequestsOutstanding_; ++numRequestsOutstanding_;
writeLedgerHeader(*headerCb, false); writeLedgerHeader(*headerCb, false);
writeLedgerHash(*hashCb, false); writeLedgerHash(*hashCb, false);
ledgerSequence_ = ledgerInfo.seq;
isFirstLedger_ = isFirst;
} }
void void
writeLedgerHash(WriteLedgerHashCallbackData& cb, bool isRetry) const writeLedgerHash(WriteLedgerHashCallbackData& cb, bool isRetry) const
@@ -820,10 +862,10 @@ public:
return {{result.getBytes(), result.getBytes()}}; return {{result.getBytes(), result.getBytes()}};
} }
LedgerPage LedgerPage
fetchLedgerPage( fetchLedgerPage2(
std::optional<ripple::uint256> const& cursor, std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence, std::uint32_t ledgerSequence,
std::uint32_t limit) const override std::uint32_t limit) const
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; BOOST_LOG_TRIVIAL(trace) << __func__;
CassandraStatement statement{selectLedgerPageKeys_}; CassandraStatement statement{selectLedgerPageKeys_};
@@ -839,7 +881,7 @@ public:
statement.bindInt(intCursor); statement.bindInt(intCursor);
statement.bindInt(ledgerSequence); statement.bindInt(ledgerSequence);
statement.bindInt(ledgerSequence); statement.bindInt(ledgerSequence);
statement.bindInt(limit); statement.bindUInt(limit);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
@@ -867,56 +909,80 @@ public:
return {{}, {}}; return {{}, {}};
} }
LedgerPage LedgerPage
fetchLedgerPage2( fetchLedgerPage(
std::optional<ripple::uint256> cursor, std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence, std::uint32_t ledgerSequence,
std::uint32_t limit) const std::uint32_t limit) const override
{ {
BOOST_LOG_TRIVIAL(trace) << __func__; BOOST_LOG_TRIVIAL(trace) << __func__;
std::optional<ripple::uint256> currentCursor = cursor;
std::vector<LedgerObject> objects; std::vector<LedgerObject> objects;
uint32_t curLimit = limit;
while (objects.size() < limit) while (objects.size() < limit)
{ {
CassandraStatement statement{selectLedgerPage_}; CassandraStatement statement{selectLedgerPage_};
int64_t intCursor = INT64_MIN; int64_t intCursor = INT64_MIN;
if (cursor) if (currentCursor)
{ {
auto token = getToken(cursor->data()); auto token = getToken(currentCursor->data());
if (token) if (token)
intCursor = *token; intCursor = *token;
} }
BOOST_LOG_TRIVIAL(trace) BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - cursor = " << std::to_string(intCursor) << __func__ << " - cursor = " << std::to_string(intCursor)
<< " , sequence = " << std::to_string(ledgerSequence) << " , sequence = " << std::to_string(ledgerSequence)
<< ", - limit = " << std::to_string(limit); << ", - limit = " << std::to_string(limit);
statement.bindInt(intCursor); statement.bindInt(intCursor);
statement.bindInt(ledgerSequence); statement.bindInt(ledgerSequence);
statement.bindInt(limit); statement.bindUInt(curLimit);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
if (!!result) if (!!result)
{ {
BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - got keys - size = " << result.numRows();
size_t prevSize = objects.size();
do do
{ {
objects.push_back({result.getUInt256(), result.getBytes()}); ripple::uint256 key = result.getUInt256();
std::vector<unsigned char> object = result.getBytes();
if (object.size())
{
objects.push_back({std::move(key), std::move(object)});
}
} while (result.nextRow()); } while (result.nextRow());
size_t prevBatchSize = objects.size() - prevSize;
BOOST_LOG_TRIVIAL(debug)
<< __func__
<< " - added to objects. size = " << objects.size();
if (result.numRows() < curLimit)
{
currentCursor = {};
break;
}
if (objects.size() < limit) if (objects.size() < limit)
{ {
double sparsity = limit / objects.size(); BOOST_LOG_TRIVIAL(debug)
limit = (limit - objects.size()) * sparsity; << __func__
<< " cur limit = " << std::to_string(curLimit)
<< " , numRows = " << std::to_string(prevBatchSize);
double sparsity =
(double)(curLimit + 1) / (double)(prevBatchSize + 1);
curLimit = (limit - objects.size()) * sparsity;
BOOST_LOG_TRIVIAL(debug) BOOST_LOG_TRIVIAL(debug)
<< __func__ << __func__
<< " - sparsity = " << std::to_string(sparsity) << " - sparsity = " << std::to_string(sparsity)
<< " , limit = " << std::to_string(limit); << " , curLimit = " << std::to_string(curLimit);
} }
assert(objects.size()); assert(objects.size());
cursor = objects[objects.size() - 1].key; currentCursor = objects[objects.size() - 1].key;
} }
} }
if (objects.size()) if (objects.size())
return {objects, cursor}; return {objects, currentCursor};
return {{}, {}}; return {{}, {}};
} }
@@ -939,11 +1005,13 @@ public:
ripple::uint256 zero = {}; ripple::uint256 zero = {};
statement.bindBytes(zero); statement.bindBytes(zero);
} }
statement.bindInt(limit); statement.bindUInt(limit);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys"; BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
std::vector<ripple::uint256> keys; std::vector<ripple::uint256> keys;
if (!result)
return {{}, {}};
do do
{ {
keys.push_back(result.getUInt256()); keys.push_back(result.getUInt256());
@@ -1003,8 +1071,8 @@ public:
fetchTransactions(std::vector<ripple::uint256> const& hashes) const override fetchTransactions(std::vector<ripple::uint256> const& hashes) const override
{ {
std::size_t const numHashes = hashes.size(); std::size_t const numHashes = hashes.size();
BOOST_LOG_TRIVIAL(trace) BOOST_LOG_TRIVIAL(debug)
<< "Fetching " << numHashes << " records from Cassandra"; << "Fetching " << numHashes << " transactions from Cassandra";
std::atomic_uint32_t numFinished = 0; std::atomic_uint32_t numFinished = 0;
std::condition_variable cv; std::condition_variable cv;
std::mutex mtx; std::mutex mtx;
@@ -1024,8 +1092,8 @@ public:
return numFinished == numHashes; return numFinished == numHashes;
}); });
BOOST_LOG_TRIVIAL(trace) BOOST_LOG_TRIVIAL(debug)
<< "Fetched " << numHashes << " records from Cassandra"; << "Fetched " << numHashes << " transactions from Cassandra";
return results; return results;
} }
@@ -1278,6 +1346,7 @@ public:
statement.bindBytes(account); statement.bindBytes(account);
statement.bindIntTuple( statement.bindIntTuple(
data.data.ledgerSequence, data.data.transactionIndex); data.data.ledgerSequence, data.data.transactionIndex);
statement.bindBytes(data.data.txHash);
executeAsyncWrite( executeAsyncWrite(
statement, flatMapWriteAccountTxCallback, data, isRetry); statement, flatMapWriteAccountTxCallback, data, isRetry);
@@ -1525,6 +1594,10 @@ public:
ss << ": " << cass_error_desc(rc); ss << ": " << cass_error_desc(rc);
BOOST_LOG_TRIVIAL(warning) << ss.str(); BOOST_LOG_TRIVIAL(warning) << ss.str();
} }
if (rc == CASS_ERROR_SERVER_INVALID_QUERY)
{
throw std::runtime_error("invalid query");
}
} while (rc != CASS_OK); } while (rc != CASS_OK);
CassResult const* res = cass_future_get_result(fut); CassResult const* res = cass_future_get_result(fut);

View File

@@ -137,13 +137,13 @@ public:
char const* char const*
c_str(int ntuple = 0, int nfield = 0) const c_str(int ntuple = 0, int nfield = 0) const
{ {
return PQgetvalue(result_.get(), ntuple, nfield) + 2; return PQgetvalue(result_.get(), ntuple, nfield);
} }
std::vector<unsigned char> std::vector<unsigned char>
asUnHexedBlob(int ntuple = 0, int nfield = 0) const asUnHexedBlob(int ntuple = 0, int nfield = 0) const
{ {
std::string_view view{c_str(ntuple, nfield)}; std::string_view view{c_str(ntuple, nfield) + 2};
auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend()); auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend());
if (res) if (res)
return *res; return *res;
@@ -154,7 +154,7 @@ public:
asUInt256(int ntuple = 0, int nfield = 0) const asUInt256(int ntuple = 0, int nfield = 0) const
{ {
ripple::uint256 val; ripple::uint256 val;
if (!val.parseHex(c_str(ntuple, nfield))) if (!val.parseHex(c_str(ntuple, nfield) + 2))
throw std::runtime_error("Pg - failed to parse hex into uint256"); throw std::runtime_error("Pg - failed to parse hex into uint256");
return val; return val;
} }

View File

@@ -66,14 +66,13 @@ PostgresBackend::writeLedgerObject(
{ {
if (abortWrite_) if (abortWrite_)
return; return;
static int numRows = 0;
numRows++;
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t' objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
<< std::to_string(seq) << '\t' << "\\\\x" << std::to_string(seq) << '\t' << "\\\\x"
<< ripple::strHex(blob) << '\n'; << ripple::strHex(blob) << '\n';
numRowsInObjectsBuffer_++;
// If the buffer gets too large, the insert fails. Not sure why. So we // If the buffer gets too large, the insert fails. Not sure why. So we
// insert after 1 million records // insert after 1 million records
if (numRows % 1000000 == 0) if (numRowsInObjectsBuffer_ % 1000000 == 0)
{ {
PgQuery pgQuery(pgPool_); PgQuery pgQuery(pgPool_);
pgQuery.bulkInsert("objects", objectsBuffer_.str()); pgQuery.bulkInsert("objects", objectsBuffer_.str());
@@ -205,6 +204,7 @@ PostgresBackend::fetchLedgerRange() const
return {}; return {};
std::string res{range.c_str()}; std::string res{range.c_str()};
BOOST_LOG_TRIVIAL(debug) << "range is = " << res;
try try
{ {
size_t minVal = 0; size_t minVal = 0;
@@ -300,7 +300,7 @@ PostgresBackend::fetchLedgerPage(
<< " (SELECT DISTINCT ON (key) * FROM objects" << " (SELECT DISTINCT ON (key) * FROM objects"
<< " WHERE ledger_seq <= " << std::to_string(ledgerSequence); << " WHERE ledger_seq <= " << std::to_string(ledgerSequence);
if (cursor) if (cursor)
sql << " AND key > \'x\\" << ripple::strHex(*cursor) << "\'"; sql << " AND key > \'\\x" << ripple::strHex(*cursor) << "\'";
sql << " ORDER BY key, ledger_seq DESC) sub" sql << " ORDER BY key, ledger_seq DESC) sub"
<< " WHERE object != \'\\x\'" << " WHERE object != \'\\x\'"
<< " LIMIT " << std::to_string(limit); << " LIMIT " << std::to_string(limit);
@@ -496,6 +496,7 @@ PostgresBackend::startWrites() const
msg << "Postgres error creating transaction: " << res.msg(); msg << "Postgres error creating transaction: " << res.msg();
throw std::runtime_error(msg.str()); throw std::runtime_error(msg.str());
} }
numRowsInObjectsBuffer_ = 0;
} }
bool bool
@@ -525,6 +526,7 @@ PostgresBackend::finishWrites() const
booksBuffer_.clear(); booksBuffer_.clear();
accountTxBuffer_.str(""); accountTxBuffer_.str("");
accountTxBuffer_.clear(); accountTxBuffer_.clear();
numRowsInObjectsBuffer_ = 0;
return true; return true;
} }

View File

@@ -7,6 +7,7 @@ namespace Backend {
class PostgresBackend : public BackendInterface class PostgresBackend : public BackendInterface
{ {
private: private:
mutable size_t numRowsInObjectsBuffer_ = 0;
mutable std::stringstream objectsBuffer_; mutable std::stringstream objectsBuffer_;
mutable std::stringstream transactionsBuffer_; mutable std::stringstream transactionsBuffer_;
mutable std::stringstream booksBuffer_; mutable std::stringstream booksBuffer_;

View File

@@ -115,7 +115,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence)
flatMapBackend_->startWrites(); flatMapBackend_->startWrites();
flatMapBackend_->writeLedger( flatMapBackend_->writeLedger(
lgrInfo, std::move(*ledgerData->mutable_ledger_header())); lgrInfo, std::move(*ledgerData->mutable_ledger_header()), true);
std::vector<AccountTransactionsData> accountTxData = std::vector<AccountTransactionsData> accountTxData =
insertTransactions(lgrInfo, *ledgerData); insertTransactions(lgrInfo, *ledgerData);

34
test.py
View File

@@ -52,7 +52,7 @@ async def ledger_data(ip, port, ledger, limit):
address = 'ws://' + str(ip) + ':' + str(port) address = 'ws://' + str(ip) + ':' + str(port)
try: try:
async with websockets.connect(address) as ws: async with websockets.connect(address) as ws:
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"limit":int(limit),"binary":True})) await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":True}))
res = json.loads(await ws.recv()) res = json.loads(await ws.recv())
print(json.dumps(res,indent=4,sort_keys=True)) print(json.dumps(res,indent=4,sort_keys=True))
except websockets.exceptions.connectionclosederror as e: except websockets.exceptions.connectionclosederror as e:
@@ -68,14 +68,14 @@ async def ledger_data_full(ip, port, ledger):
if marker is None: if marker is None:
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":True})) await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":True}))
res = json.loads(await ws.recv()) res = json.loads(await ws.recv())
print(res)
else: else:
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"marker":marker}))
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"cursor":marker, "binary":True}))
res = json.loads(await ws.recv()) res = json.loads(await ws.recv())
if "marker" in res: if "cursor" in res:
marker = int(res["marker"]) marker = res["cursor"]
print(marker) print(marker)
elif "result" in res and "marker" in res["result"]: elif "result" in res and "marker" in res["result"]:
marker = res["result"]["marker"] marker = res["result"]["marker"]
@@ -108,6 +108,7 @@ async def book_offers(ip, port, ledger, pay_currency, pay_issuer, get_currency,
print(e) print(e)
async def ledger(ip, port, ledger): async def ledger(ip, port, ledger):
address = 'ws://' + str(ip) + ':' + str(port) address = 'ws://' + str(ip) + ':' + str(port)
try: try:
async with websockets.connect(address) as ws: async with websockets.connect(address) as ws:
@@ -116,19 +117,29 @@ async def ledger(ip, port, ledger):
print(json.dumps(res,indent=4,sort_keys=True)) print(json.dumps(res,indent=4,sort_keys=True))
except websockets.exceptions.connectionclosederror as e: except websockets.exceptions.connectionclosederror as e:
print(e) print(e)
async def ledger_range(ip, port):
address = 'ws://' + str(ip) + ':' + str(port)
try:
async with websockets.connect(address) as ws:
await ws.send(json.dumps({"command":"ledger_range"}))
res = json.loads(await ws.recv())
print(json.dumps(res,indent=4,sort_keys=True))
return res["ledger_index_max"]
except websockets.exceptions.connectionclosederror as e:
print(e)
parser = argparse.ArgumentParser(description='test script for xrpl-reporting') parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
parser.add_argument('action', choices=["account_info", "tx", "account_tx", "ledger_data", "ledger_data_full", "book_offers","ledger"]) parser.add_argument('action', choices=["account_info", "tx", "account_tx", "ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range"])
parser.add_argument('--ip', default='127.0.0.1') parser.add_argument('--ip', default='127.0.0.1')
parser.add_argument('--port', default='8080') parser.add_argument('--port', default='8080')
parser.add_argument('--hash') parser.add_argument('--hash')
parser.add_argument('--account', default="rLC64xxNif3GiY9FQnbaM4kcE6VvDhwRod") parser.add_argument('--account', default="rLC64xxNif3GiY9FQnbaM4kcE6VvDhwRod")
parser.add_argument('--ledger') parser.add_argument('--ledger')
parser.add_argument('--limit', default='200') parser.add_argument('--limit', default='200')
parser.add_argument('--taker_pays_issuer') parser.add_argument('--taker_pays_issuer',default='rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B')
parser.add_argument('--taker_pays_currency') parser.add_argument('--taker_pays_currency',default='USD')
parser.add_argument('--taker_gets_issuer') parser.add_argument('--taker_gets_issuer')
parser.add_argument('--taker_gets_currency') parser.add_argument('--taker_gets_currency',default='XRP')
@@ -136,6 +147,8 @@ parser.add_argument('--taker_gets_currency')
args = parser.parse_args() args = parser.parse_args()
def run(args): def run(args):
if(args.ledger is None):
args.ledger = asyncio.get_event_loop().run_until_complete(ledger_range(args.ip, args.port));
asyncio.set_event_loop(asyncio.new_event_loop()) asyncio.set_event_loop(asyncio.new_event_loop())
if args.action == "account_info": if args.action == "account_info":
asyncio.get_event_loop().run_until_complete( asyncio.get_event_loop().run_until_complete(
@@ -155,6 +168,9 @@ def run(args):
elif args.action == "ledger": elif args.action == "ledger":
asyncio.get_event_loop().run_until_complete( asyncio.get_event_loop().run_until_complete(
ledger(args.ip, args.port, args.ledger)) ledger(args.ip, args.port, args.ledger))
elif args.action == "ledger_range":
asyncio.get_event_loop().run_until_complete(
ledger_range(args.ip, args.port))
elif args.action == "book_offers": elif args.action == "book_offers":
asyncio.get_event_loop().run_until_complete( asyncio.get_event_loop().run_until_complete(
book_offers(args.ip, args.port, args.ledger, args.taker_pays_currency, args.taker_pays_issuer, args.taker_gets_currency, args.taker_gets_issuer)) book_offers(args.ip, args.port, args.ledger, args.taker_pays_currency, args.taker_pays_issuer, args.taker_gets_currency, args.taker_gets_issuer))

View File

@@ -41,12 +41,14 @@ enum RPCCommand {
ledger, ledger,
account_info, account_info,
ledger_data, ledger_data,
book_offers book_offers,
ledger_range
}; };
std::unordered_map<std::string, RPCCommand> commandMap{ std::unordered_map<std::string, RPCCommand> commandMap{
{"tx", tx}, {"tx", tx},
{"account_tx", account_tx}, {"account_tx", account_tx},
{"ledger", ledger}, {"ledger", ledger},
{"ledger_range", ledger_range},
{"account_info", account_info}, {"account_info", account_info},
{"ledger_data", ledger_data}, {"ledger_data", ledger_data},
{"book_offers", book_offers}}; {"book_offers", book_offers}};
@@ -71,6 +73,10 @@ doBookOffers(
BackendInterface const& backend); BackendInterface const& backend);
boost::json::object boost::json::object
doLedger(boost::json::object const& request, BackendInterface const& backend); doLedger(boost::json::object const& request, BackendInterface const& backend);
boost::json::object
doLedgerRange(
boost::json::object const& request,
BackendInterface const& backend);
boost::json::object boost::json::object
buildResponse( buildResponse(
@@ -91,6 +97,9 @@ buildResponse(
case ledger: case ledger:
return doLedger(request, backend); return doLedger(request, backend);
break; break;
case ledger_range:
return doLedgerRange(request, backend);
break;
case ledger_data: case ledger_data:
return doLedgerData(request, backend); return doLedgerData(request, backend);
break; break;