mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-25 14:15:53 +00:00
partial merge needs fixing
This commit is contained in:
@@ -10,7 +10,7 @@
|
|||||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||||
project(reporting)
|
project(reporting)
|
||||||
cmake_minimum_required(VERSION 3.16)
|
cmake_minimum_required(VERSION 3.16)
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
set (CMAKE_CXX_STANDARD 20)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -Wno-narrowing")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -Wno-narrowing")
|
||||||
set(Boost_USE_STATIC_LIBS ON)
|
set(Boost_USE_STATIC_LIBS ON)
|
||||||
set(Boost_USE_MULTITHREADED ON)
|
set(Boost_USE_MULTITHREADED ON)
|
||||||
@@ -21,10 +21,22 @@ endif ()
|
|||||||
file (TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
file (TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
||||||
|
|
||||||
FIND_PACKAGE( Boost 1.75 COMPONENTS filesystem log log_setup thread system REQUIRED )
|
FIND_PACKAGE( Boost 1.75 COMPONENTS filesystem log log_setup thread system REQUIRED )
|
||||||
|
include(FetchContent)
|
||||||
add_executable (reporting
|
FetchContent_Declare(
|
||||||
websocket_server_async.cpp
|
googletest
|
||||||
|
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
|
||||||
)
|
)
|
||||||
|
FetchContent_MakeAvailable(googletest)
|
||||||
|
enable_testing()
|
||||||
|
include(GoogleTest)
|
||||||
|
|
||||||
|
add_executable (reporting_main
|
||||||
|
server/websocket_server_async.cpp
|
||||||
|
)
|
||||||
|
add_executable (reporting_tests
|
||||||
|
unittests/main.cpp
|
||||||
|
)
|
||||||
|
add_library(reporting reporting/BackendInterface.h)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/deps")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/deps")
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
message(${CMAKE_CURRENT_BINARY_DIR})
|
message(${CMAKE_CURRENT_BINARY_DIR})
|
||||||
@@ -59,6 +71,7 @@ target_sources(reporting PRIVATE
|
|||||||
reporting/CassandraBackend.cpp
|
reporting/CassandraBackend.cpp
|
||||||
reporting/PostgresBackend.cpp
|
reporting/PostgresBackend.cpp
|
||||||
reporting/BackendIndexer.cpp
|
reporting/BackendIndexer.cpp
|
||||||
|
reporting/BackendInterface.cpp
|
||||||
reporting/Pg.cpp
|
reporting/Pg.cpp
|
||||||
reporting/P2pProxy.cpp
|
reporting/P2pProxy.cpp
|
||||||
reporting/DBHelpers.cpp
|
reporting/DBHelpers.cpp
|
||||||
@@ -74,6 +87,7 @@ target_sources(reporting PRIVATE
|
|||||||
handlers/LedgerRange.cpp
|
handlers/LedgerRange.cpp
|
||||||
handlers/Ledger.cpp
|
handlers/Ledger.cpp
|
||||||
handlers/LedgerEntry.cpp
|
handlers/LedgerEntry.cpp
|
||||||
|
<<<<<<< HEAD
|
||||||
handlers/AccountChannels.cpp
|
handlers/AccountChannels.cpp
|
||||||
handlers/AccountLines.cpp
|
handlers/AccountLines.cpp
|
||||||
handlers/AccountCurrencies.cpp
|
handlers/AccountCurrencies.cpp
|
||||||
@@ -82,8 +96,17 @@ target_sources(reporting PRIVATE
|
|||||||
handlers/ChannelAuthorize.cpp
|
handlers/ChannelAuthorize.cpp
|
||||||
handlers/ChannelVerify.cpp
|
handlers/ChannelVerify.cpp
|
||||||
handlers/Subscribe.cpp)
|
handlers/Subscribe.cpp)
|
||||||
|
=======
|
||||||
|
handlers/ServerInfo.cpp)
|
||||||
|
>>>>>>> dev
|
||||||
|
|
||||||
|
|
||||||
message(${Boost_LIBRARIES})
|
message(${Boost_LIBRARIES})
|
||||||
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR} ${Boost_INCLUDE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR} ${Boost_INCLUDE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
TARGET_LINK_LIBRARIES(reporting PUBLIC ${Boost_LIBRARIES})
|
TARGET_LINK_LIBRARIES(reporting PUBLIC ${Boost_LIBRARIES})
|
||||||
|
TARGET_LINK_LIBRARIES(reporting_main PUBLIC reporting)
|
||||||
|
TARGET_LINK_LIBRARIES(reporting_tests PUBLIC reporting gtest_main)
|
||||||
|
|
||||||
|
|
||||||
|
gtest_discover_tests(reporting_tests)
|
||||||
|
|
||||||
|
|||||||
4
deps/cassandra.cmake
vendored
4
deps/cassandra.cmake
vendored
@@ -97,7 +97,6 @@ if(NOT cassandra)
|
|||||||
|
|
||||||
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_library (cassandra STATIC IMPORTED GLOBAL)
|
add_library (cassandra STATIC IMPORTED GLOBAL)
|
||||||
ExternalProject_Add(cassandra_src
|
ExternalProject_Add(cassandra_src
|
||||||
PREFIX ${nih_cache_path}
|
PREFIX ${nih_cache_path}
|
||||||
@@ -148,6 +147,9 @@ if(NOT cassandra)
|
|||||||
else()
|
else()
|
||||||
target_link_libraries(cassandra INTERFACE ${zlib})
|
target_link_libraries(cassandra INTERFACE ${zlib})
|
||||||
endif()
|
endif()
|
||||||
|
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||||
|
find_package(OpenSSL REQUIRED)
|
||||||
|
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
|
||||||
|
|
||||||
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
||||||
target_link_libraries(reporting PUBLIC cassandra)
|
target_link_libraries(reporting PUBLIC cassandra)
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ doAccountInfo(
|
|||||||
{
|
{
|
||||||
response["success"] = "fetched successfully!";
|
response["success"] = "fetched successfully!";
|
||||||
if (!binary)
|
if (!binary)
|
||||||
response["object"] = getJson(sle);
|
response["object"] = toJson(sle);
|
||||||
else
|
else
|
||||||
response["object"] = ripple::strHex(*dbResponse);
|
response["object"] = ripple::strHex(*dbResponse);
|
||||||
response["db_time"] = time;
|
response["db_time"] = time;
|
||||||
@@ -124,7 +124,7 @@ doAccountInfo(
|
|||||||
// support multiple SignerLists on one account.
|
// support multiple SignerLists on one account.
|
||||||
auto const sleSigners = ledger->read(keylet::signers(accountID));
|
auto const sleSigners = ledger->read(keylet::signers(accountID));
|
||||||
if (sleSigners)
|
if (sleSigners)
|
||||||
jvSignerList.append(sleSigners->getJson(JsonOptions::none));
|
jvSignerList.append(sleSigners->toJson(JsonOptions::none));
|
||||||
|
|
||||||
result[jss::account_data][jss::signer_lists] =
|
result[jss::account_data][jss::signer_lists] =
|
||||||
std::move(jvSignerList);
|
std::move(jvSignerList);
|
||||||
|
|||||||
@@ -19,102 +19,6 @@
|
|||||||
|
|
||||||
#include <handlers/RPCHelpers.h>
|
#include <handlers/RPCHelpers.h>
|
||||||
#include <reporting/BackendInterface.h>
|
#include <reporting/BackendInterface.h>
|
||||||
#include <reporting/Pg.h>
|
|
||||||
|
|
||||||
std::vector<std::pair<
|
|
||||||
std::shared_ptr<ripple::STTx const>,
|
|
||||||
std::shared_ptr<ripple::STObject const>>>
|
|
||||||
doAccountTxStoredProcedure(
|
|
||||||
ripple::AccountID const& account,
|
|
||||||
std::shared_ptr<PgPool>& pgPool,
|
|
||||||
BackendInterface const& backend)
|
|
||||||
{
|
|
||||||
pg_params dbParams;
|
|
||||||
|
|
||||||
char const*& command = dbParams.first;
|
|
||||||
std::vector<std::optional<std::string>>& values = dbParams.second;
|
|
||||||
command =
|
|
||||||
"SELECT account_tx($1::bytea, $2::bool, "
|
|
||||||
"$3::bigint, $4::bigint, $5::bigint, $6::bytea, "
|
|
||||||
"$7::bigint, $8::bool, $9::bigint, $10::bigint)";
|
|
||||||
values.resize(10);
|
|
||||||
values[0] = "\\x" + ripple::strHex(account);
|
|
||||||
values[1] = "true";
|
|
||||||
static std::uint32_t const page_length(200);
|
|
||||||
values[2] = std::to_string(page_length);
|
|
||||||
|
|
||||||
auto res = PgQuery(pgPool)(dbParams);
|
|
||||||
if (!res)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(error)
|
|
||||||
<< __func__ << " : Postgres response is null - account = "
|
|
||||||
<< ripple::strHex(account);
|
|
||||||
assert(false);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
else if (res.status() != PGRES_TUPLES_OK)
|
|
||||||
{
|
|
||||||
assert(false);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (res.isNull() || res.ntuples() == 0)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(error)
|
|
||||||
<< __func__ << " : No data returned from Postgres : account = "
|
|
||||||
<< ripple::strHex(account);
|
|
||||||
|
|
||||||
assert(false);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
char const* resultStr = res.c_str();
|
|
||||||
|
|
||||||
boost::json::object result = boost::json::parse(resultStr).as_object();
|
|
||||||
if (result.contains("transactions"))
|
|
||||||
{
|
|
||||||
std::vector<ripple::uint256> nodestoreHashes;
|
|
||||||
for (auto& t : result.at("transactions").as_array())
|
|
||||||
{
|
|
||||||
boost::json::object obj = t.as_object();
|
|
||||||
if (obj.contains("ledger_seq") && obj.contains("nodestore_hash"))
|
|
||||||
{
|
|
||||||
std::string nodestoreHashHex =
|
|
||||||
obj.at("nodestore_hash").as_string().c_str();
|
|
||||||
nodestoreHashHex.erase(0, 2);
|
|
||||||
ripple::uint256 nodestoreHash;
|
|
||||||
if (!nodestoreHash.parseHex(nodestoreHashHex))
|
|
||||||
assert(false);
|
|
||||||
|
|
||||||
if (nodestoreHash.isNonZero())
|
|
||||||
{
|
|
||||||
nodestoreHashes.push_back(nodestoreHash);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::pair<
|
|
||||||
std::shared_ptr<ripple::STTx const>,
|
|
||||||
std::shared_ptr<ripple::STObject const>>>
|
|
||||||
results;
|
|
||||||
auto dbResults = backend.fetchTransactions(nodestoreHashes);
|
|
||||||
for (auto const& res : dbResults)
|
|
||||||
{
|
|
||||||
if (res.transaction.size() && res.metadata.size())
|
|
||||||
results.push_back(deserializeTxPlusMeta(res));
|
|
||||||
}
|
|
||||||
return results;
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
// {
|
// {
|
||||||
// account: account,
|
// account: account,
|
||||||
@@ -190,7 +94,9 @@ doAccountTx(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
auto [blobs, retCursor] =
|
auto [blobs, retCursor] =
|
||||||
backend.fetchAccountTransactions(*account, limit, cursor);
|
backend.fetchAccountTransactions(*account, limit, cursor);
|
||||||
auto end = std::chrono::system_clock::now();
|
auto end = std::chrono::system_clock::now();
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took " << ((end - start).count() / 1000000000.0) << " num blobs = " << blobs.size();
|
BOOST_LOG_TRIVIAL(info) << __func__ << " db fetch took "
|
||||||
|
<< ((end - start).count() / 1000000000.0)
|
||||||
|
<< " num blobs = " << blobs.size();
|
||||||
for (auto const& txnPlusMeta : blobs)
|
for (auto const& txnPlusMeta : blobs)
|
||||||
{
|
{
|
||||||
if (txnPlusMeta.ledgerSequence > ledgerSequence)
|
if (txnPlusMeta.ledgerSequence > ledgerSequence)
|
||||||
@@ -204,8 +110,8 @@ doAccountTx(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
if (!binary)
|
if (!binary)
|
||||||
{
|
{
|
||||||
auto [txn, meta] = deserializeTxPlusMeta(txnPlusMeta);
|
auto [txn, meta] = deserializeTxPlusMeta(txnPlusMeta);
|
||||||
obj["transaction"] = getJson(*txn);
|
obj["transaction"] = toJson(*txn);
|
||||||
obj["metadata"] = getJson(*meta);
|
obj["metadata"] = toJson(*meta);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -224,7 +130,8 @@ doAccountTx(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
response["cursor"] = cursorJson;
|
response["cursor"] = cursorJson;
|
||||||
}
|
}
|
||||||
auto end2 = std::chrono::system_clock::now();
|
auto end2 = std::chrono::system_clock::now();
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took " << ((end2 - end).count() / 1000000000.0);
|
BOOST_LOG_TRIVIAL(info) << __func__ << " serialization took "
|
||||||
|
<< ((end2 - end).count() / 1000000000.0);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -272,7 +272,7 @@ doBookOffers(
|
|||||||
ripple::SLE offer{it, obj.key};
|
ripple::SLE offer{it, obj.key};
|
||||||
ripple::uint256 bookDir = offer.getFieldH256(ripple::sfBookDirectory);
|
ripple::uint256 bookDir = offer.getFieldH256(ripple::sfBookDirectory);
|
||||||
|
|
||||||
boost::json::object offerJson = getJson(offer);
|
boost::json::object offerJson = toJson(offer);
|
||||||
offerJson["quality"] = ripple::amountFromQuality(getQuality(bookDir)).getText();
|
offerJson["quality"] = ripple::amountFromQuality(getQuality(bookDir)).getText();
|
||||||
jsonOffers.push_back(offerJson);
|
jsonOffers.push_back(offerJson);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,5 @@
|
|||||||
#include <handlers/RPCHelpers.h>
|
#include <handlers/RPCHelpers.h>
|
||||||
#include <reporting/BackendInterface.h>
|
#include <reporting/BackendInterface.h>
|
||||||
std::vector<unsigned char>
|
|
||||||
ledgerInfoToBlob(ripple::LedgerInfo const& info)
|
|
||||||
{
|
|
||||||
ripple::Serializer s;
|
|
||||||
s.add32(info.seq);
|
|
||||||
s.add64(info.drops.drops());
|
|
||||||
s.addBitString(info.parentHash);
|
|
||||||
s.addBitString(info.txHash);
|
|
||||||
s.addBitString(info.accountHash);
|
|
||||||
s.add32(info.parentCloseTime.time_since_epoch().count());
|
|
||||||
s.add32(info.closeTime.time_since_epoch().count());
|
|
||||||
s.add8(info.closeTimeResolution.count());
|
|
||||||
s.add8(info.closeFlags);
|
|
||||||
// s.addBitString(info.hash);
|
|
||||||
return s.peekData();
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
doLedger(boost::json::object const& request, BackendInterface const& backend)
|
doLedger(boost::json::object const& request, BackendInterface const& backend)
|
||||||
@@ -53,19 +37,7 @@ doLedger(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
header["ledger_sequence"] = lgrInfo->seq;
|
header = toJson(*lgrInfo);
|
||||||
header["ledger_hash"] = ripple::strHex(lgrInfo->hash);
|
|
||||||
header["txns_hash"] = ripple::strHex(lgrInfo->txHash);
|
|
||||||
header["state_hash"] = ripple::strHex(lgrInfo->accountHash);
|
|
||||||
header["parent_hash"] = ripple::strHex(lgrInfo->parentHash);
|
|
||||||
header["total_coins"] = ripple::to_string(lgrInfo->drops);
|
|
||||||
header["close_flags"] = lgrInfo->closeFlags;
|
|
||||||
|
|
||||||
// Always show fields that contribute to the ledger hash
|
|
||||||
header["parent_close_time"] =
|
|
||||||
lgrInfo->parentCloseTime.time_since_epoch().count();
|
|
||||||
header["close_time"] = lgrInfo->closeTime.time_since_epoch().count();
|
|
||||||
header["close_time_resolution"] = lgrInfo->closeTimeResolution.count();
|
|
||||||
}
|
}
|
||||||
response["header"] = header;
|
response["header"] = header;
|
||||||
if (getTransactions)
|
if (getTransactions)
|
||||||
@@ -86,8 +58,8 @@ doLedger(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
if (!binary)
|
if (!binary)
|
||||||
{
|
{
|
||||||
auto [sttx, meta] = deserializeTxPlusMeta(obj);
|
auto [sttx, meta] = deserializeTxPlusMeta(obj);
|
||||||
entry["transaction"] = getJson(*sttx);
|
entry["transaction"] = toJson(*sttx);
|
||||||
entry["metadata"] = getJson(*meta);
|
entry["metadata"] = toJson(*meta);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -59,7 +59,12 @@ doLedgerData(
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing cursor";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : parsing cursor";
|
||||||
cursor = ripple::uint256{};
|
cursor = ripple::uint256{};
|
||||||
cursor->parseHex(request.at("cursor").as_string().c_str());
|
if (!cursor->parseHex(request.at("cursor").as_string().c_str()))
|
||||||
|
{
|
||||||
|
response["error"] = "Invalid cursor";
|
||||||
|
response["request"] = request;
|
||||||
|
return response;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bool binary =
|
bool binary =
|
||||||
request.contains("binary") ? request.at("binary").as_bool() : false;
|
request.contains("binary") ? request.at("binary").as_bool() : false;
|
||||||
@@ -91,7 +96,7 @@ doLedgerData(
|
|||||||
objects.push_back(entry);
|
objects.push_back(entry);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
objects.push_back(getJson(sle));
|
objects.push_back(toJson(sle));
|
||||||
}
|
}
|
||||||
response["objects"] = objects;
|
response["objects"] = objects;
|
||||||
if (returnedCursor)
|
if (returnedCursor)
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ doLedgerEntry(
|
|||||||
{
|
{
|
||||||
ripple::STLedgerEntry sle{
|
ripple::STLedgerEntry sle{
|
||||||
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
|
ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key};
|
||||||
response["object"] = getJson(sle);
|
response["object"] = toJson(sle);
|
||||||
}
|
}
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs, std::uint32_
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
getJson(ripple::STBase const& obj)
|
toJson(ripple::STBase const& obj)
|
||||||
{
|
{
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
boost::json::value value = boost::json::parse(
|
boost::json::value value = boost::json::parse(
|
||||||
@@ -82,6 +82,7 @@ getJson(ripple::STBase const& obj)
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
|
<<<<<<< HEAD
|
||||||
getJson(ripple::TxMeta const& meta)
|
getJson(ripple::TxMeta const& meta)
|
||||||
{
|
{
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
@@ -105,6 +106,9 @@ getJson(Json::Value const& value)
|
|||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
getJson(ripple::SLE const& sle)
|
getJson(ripple::SLE const& sle)
|
||||||
|
=======
|
||||||
|
toJson(ripple::SLE const& sle)
|
||||||
|
>>>>>>> dev
|
||||||
{
|
{
|
||||||
auto start = std::chrono::system_clock::now();
|
auto start = std::chrono::system_clock::now();
|
||||||
boost::json::value value = boost::json::parse(
|
boost::json::value value = boost::json::parse(
|
||||||
@@ -115,6 +119,27 @@ getJson(ripple::SLE const& sle)
|
|||||||
.count();
|
.count();
|
||||||
return value.as_object();
|
return value.as_object();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
toJson(ripple::LedgerInfo const& lgrInfo)
|
||||||
|
{
|
||||||
|
boost::json::object header;
|
||||||
|
header["ledger_sequence"] = lgrInfo.seq;
|
||||||
|
header["ledger_hash"] = ripple::strHex(lgrInfo.hash);
|
||||||
|
header["txns_hash"] = ripple::strHex(lgrInfo.txHash);
|
||||||
|
header["state_hash"] = ripple::strHex(lgrInfo.accountHash);
|
||||||
|
header["parent_hash"] = ripple::strHex(lgrInfo.parentHash);
|
||||||
|
header["total_coins"] = ripple::to_string(lgrInfo.drops);
|
||||||
|
header["close_flags"] = lgrInfo.closeFlags;
|
||||||
|
|
||||||
|
// Always show fields that contribute to the ledger hash
|
||||||
|
header["parent_close_time"] =
|
||||||
|
lgrInfo.parentCloseTime.time_since_epoch().count();
|
||||||
|
header["close_time"] = lgrInfo.closeTime.time_since_epoch().count();
|
||||||
|
header["close_time_resolution"] = lgrInfo.closeTimeResolution.count();
|
||||||
|
return header;
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<uint32_t>
|
std::optional<uint32_t>
|
||||||
ledgerSequenceFromRequest(
|
ledgerSequenceFromRequest(
|
||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
@@ -129,6 +154,7 @@ ledgerSequenceFromRequest(
|
|||||||
return request.at("ledger_index").as_int64();
|
return request.at("ledger_index").as_int64();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
<<<<<<< HEAD
|
||||||
|
|
||||||
std::optional<ripple::uint256>
|
std::optional<ripple::uint256>
|
||||||
traverseOwnedNodes(
|
traverseOwnedNodes(
|
||||||
@@ -380,4 +406,21 @@ getAccountsFromTransaction(boost::json::object const& transaction)
|
|||||||
}
|
}
|
||||||
|
|
||||||
return accounts;
|
return accounts;
|
||||||
|
=======
|
||||||
|
std::vector<unsigned char>
|
||||||
|
ledgerInfoToBlob(ripple::LedgerInfo const& info)
|
||||||
|
{
|
||||||
|
ripple::Serializer s;
|
||||||
|
s.add32(info.seq);
|
||||||
|
s.add64(info.drops.drops());
|
||||||
|
s.addBitString(info.parentHash);
|
||||||
|
s.addBitString(info.txHash);
|
||||||
|
s.addBitString(info.accountHash);
|
||||||
|
s.add32(info.parentCloseTime.time_since_epoch().count());
|
||||||
|
s.add32(info.closeTime.time_since_epoch().count());
|
||||||
|
s.add8(info.closeTimeResolution.count());
|
||||||
|
s.add8(info.closeFlags);
|
||||||
|
s.addBitString(info.hash);
|
||||||
|
return s.peekData();
|
||||||
|
>>>>>>> dev
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,10 +21,13 @@ std::pair<
|
|||||||
deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs, std::uint32_t seq);
|
deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs, std::uint32_t seq);
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
getJson(ripple::STBase const& obj);
|
toJson(ripple::STBase const& obj);
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
getJson(ripple::SLE const& sle);
|
toJson(ripple::SLE const& sle);
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
toJson(ripple::LedgerInfo const& info);
|
||||||
|
|
||||||
boost::json::object
|
boost::json::object
|
||||||
getJson(ripple::TxMeta const& meta);
|
getJson(ripple::TxMeta const& meta);
|
||||||
@@ -37,6 +40,7 @@ ledgerSequenceFromRequest(
|
|||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
BackendInterface const& backend);
|
BackendInterface const& backend);
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
std::optional<ripple::uint256>
|
std::optional<ripple::uint256>
|
||||||
traverseOwnedNodes(
|
traverseOwnedNodes(
|
||||||
BackendInterface const& backend,
|
BackendInterface const& backend,
|
||||||
@@ -52,5 +56,9 @@ keypairFromRequst(
|
|||||||
|
|
||||||
std::vector<ripple::AccountID>
|
std::vector<ripple::AccountID>
|
||||||
getAccountsFromTransaction(boost::json::object const& transaction);
|
getAccountsFromTransaction(boost::json::object const& transaction);
|
||||||
|
=======
|
||||||
|
std::vector<unsigned char>
|
||||||
|
ledgerInfoToBlob(ripple::LedgerInfo const& info);
|
||||||
|
>>>>>>> dev
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
53
handlers/ServerInfo.cpp
Normal file
53
handlers/ServerInfo.cpp
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#include <handlers/RPCHelpers.h>
|
||||||
|
#include <reporting/BackendInterface.h>
|
||||||
|
boost::json::object
|
||||||
|
doServerInfo(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend)
|
||||||
|
{
|
||||||
|
boost::json::object response;
|
||||||
|
|
||||||
|
auto rng = backend.fetchLedgerRange();
|
||||||
|
if (!rng)
|
||||||
|
{
|
||||||
|
response["complete_ledgers"] = "empty";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::string completeLedgers = std::to_string(rng->minSequence);
|
||||||
|
if (rng->maxSequence != rng->minSequence)
|
||||||
|
completeLedgers += "-" + std::to_string(rng->maxSequence);
|
||||||
|
response["complete_ledgers"] = completeLedgers;
|
||||||
|
}
|
||||||
|
if (rng)
|
||||||
|
{
|
||||||
|
auto lgrInfo = backend.fetchLedgerBySequence(rng->maxSequence);
|
||||||
|
response["validated_ledger"] = toJson(*lgrInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::json::array indexes;
|
||||||
|
|
||||||
|
if (rng)
|
||||||
|
{
|
||||||
|
uint32_t cur = rng->minSequence;
|
||||||
|
while (cur <= rng->maxSequence + 1)
|
||||||
|
{
|
||||||
|
auto keyIndex = backend.getKeyIndexOfSeq(cur);
|
||||||
|
assert(keyIndex.has_value());
|
||||||
|
cur = keyIndex->keyIndex;
|
||||||
|
boost::json::object entry;
|
||||||
|
entry["complete"] = backend.isLedgerIndexed(cur);
|
||||||
|
entry["sequence"] = cur;
|
||||||
|
indexes.emplace_back(entry);
|
||||||
|
cur = cur + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
response["indexes"] = indexes;
|
||||||
|
auto indexing = backend.getIndexer().getCurrentlyIndexing();
|
||||||
|
if (indexing)
|
||||||
|
response["indexing"] = *indexing;
|
||||||
|
else
|
||||||
|
response["indexing"] = "none";
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
@@ -63,8 +63,8 @@ doTx(boost::json::object const& request, BackendInterface const& backend)
|
|||||||
if (!binary)
|
if (!binary)
|
||||||
{
|
{
|
||||||
auto [sttx, meta] = deserializeTxPlusMeta(dbResponse.value());
|
auto [sttx, meta] = deserializeTxPlusMeta(dbResponse.value());
|
||||||
response["transaction"] = getJson(*sttx);
|
response["transaction"] = toJson(*sttx);
|
||||||
response["metadata"] = getJson(*meta);
|
response["metadata"] = toJson(*meta);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,9 +10,13 @@ namespace Backend {
|
|||||||
std::unique_ptr<BackendInterface>
|
std::unique_ptr<BackendInterface>
|
||||||
make_Backend(boost::json::object const& config)
|
make_Backend(boost::json::object const& config)
|
||||||
{
|
{
|
||||||
|
<<<<<<< HEAD
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
|
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
|
||||||
|
|
||||||
boost::json::object const& dbConfig = config.at("database").as_object();
|
boost::json::object const& dbConfig = config.at("database").as_object();
|
||||||
|
=======
|
||||||
|
boost::json::object dbConfig = config.at("database").as_object();
|
||||||
|
>>>>>>> dev
|
||||||
|
|
||||||
bool readOnly = false;
|
bool readOnly = false;
|
||||||
if (config.contains("read_only"))
|
if (config.contains("read_only"))
|
||||||
@@ -24,7 +28,14 @@ make_Backend(boost::json::object const& config)
|
|||||||
|
|
||||||
if (boost::iequals(type, "cassandra"))
|
if (boost::iequals(type, "cassandra"))
|
||||||
{
|
{
|
||||||
|
<<<<<<< HEAD
|
||||||
backend =
|
backend =
|
||||||
|
=======
|
||||||
|
if (config.contains("online_delete"))
|
||||||
|
dbConfig.at(type).as_object()["ttl"] =
|
||||||
|
config.at("online_delete").as_int64() * 4;
|
||||||
|
auto backend =
|
||||||
|
>>>>>>> dev
|
||||||
std::make_unique<CassandraBackend>(dbConfig.at(type).as_object());
|
std::make_unique<CassandraBackend>(dbConfig.at(type).as_object());
|
||||||
}
|
}
|
||||||
else if (boost::iequals(type, "postgres"))
|
else if (boost::iequals(type, "postgres"))
|
||||||
|
|||||||
@@ -2,236 +2,25 @@
|
|||||||
|
|
||||||
namespace Backend {
|
namespace Backend {
|
||||||
BackendIndexer::BackendIndexer(boost::json::object const& config)
|
BackendIndexer::BackendIndexer(boost::json::object const& config)
|
||||||
|
: strand_(ioc_)
|
||||||
{
|
{
|
||||||
if (config.contains("indexer_key_shift"))
|
if (config.contains("indexer_key_shift"))
|
||||||
keyShift_ = config.at("indexer_key_shift").as_int64();
|
keyShift_ = config.at("indexer_key_shift").as_int64();
|
||||||
if (config.contains("indexer_book_shift"))
|
|
||||||
bookShift_ = config.at("indexer_book_shift").as_int64();
|
|
||||||
work_.emplace(ioc_);
|
work_.emplace(ioc_);
|
||||||
ioThread_ = std::thread{[this]() { ioc_.run(); }};
|
ioThread_ = std::thread{[this]() { ioc_.run(); }};
|
||||||
};
|
};
|
||||||
BackendIndexer::~BackendIndexer()
|
BackendIndexer::~BackendIndexer()
|
||||||
{
|
{
|
||||||
std::unique_lock lck(mutex_);
|
|
||||||
work_.reset();
|
work_.reset();
|
||||||
ioThread_.join();
|
ioThread_.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
BackendIndexer::addKey(ripple::uint256 const& key)
|
BackendIndexer::addKey(ripple::uint256&& key)
|
||||||
{
|
{
|
||||||
std::unique_lock lck(mtx);
|
keys.insert(std::move(key));
|
||||||
keys.insert(key);
|
|
||||||
keysCumulative.insert(key);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
BackendIndexer::addKeyAsync(ripple::uint256 const& key)
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
keysCumulative.insert(key);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
BackendIndexer::deleteKey(ripple::uint256 const& key)
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
keysCumulative.erase(key);
|
|
||||||
if (populatingCacheAsync)
|
|
||||||
deletedKeys.insert(key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
BackendIndexer::addBookOffer(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
ripple::uint256 const& offerKey)
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
books[book].insert(offerKey);
|
|
||||||
booksCumulative[book].insert(offerKey);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
BackendIndexer::addBookOfferAsync(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
ripple::uint256 const& offerKey)
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
booksCumulative[book].insert(offerKey);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
BackendIndexer::deleteBookOffer(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
ripple::uint256 const& offerKey)
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
booksCumulative[book].erase(offerKey);
|
|
||||||
if (populatingCacheAsync)
|
|
||||||
deletedBooks[book].insert(offerKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
writeKeyFlagLedger(
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
uint32_t shift,
|
|
||||||
BackendInterface const& backend,
|
|
||||||
std::unordered_set<ripple::uint256> const& keys)
|
|
||||||
{
|
|
||||||
uint32_t nextFlag = ((ledgerSequence >> shift << shift) + (1 << shift));
|
|
||||||
ripple::uint256 zero = {};
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " starting. ledgerSequence = " << std::to_string(ledgerSequence)
|
|
||||||
<< " nextFlag = " << std::to_string(nextFlag)
|
|
||||||
<< " keys.size() = " << std::to_string(keys.size());
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto [objects, curCursor, warning] =
|
|
||||||
backend.fetchLedgerPage({}, nextFlag, 1);
|
|
||||||
if (!warning)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
|
||||||
<< __func__ << " flag ledger already written. sequence = "
|
|
||||||
<< std::to_string(ledgerSequence)
|
|
||||||
<< " next flag = " << std::to_string(nextFlag)
|
|
||||||
<< "returning";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
catch (DatabaseTimeout& t)
|
|
||||||
{
|
|
||||||
;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
backend.writeKeys(keys, nextFlag, true);
|
|
||||||
backend.writeKeys({zero}, nextFlag, true);
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " finished. ledgerSequence = " << std::to_string(ledgerSequence)
|
|
||||||
<< " nextFlag = " << std::to_string(nextFlag)
|
|
||||||
<< " keys.size() = " << std::to_string(keys.size())
|
|
||||||
<< std::chrono::duration_cast<std::chrono::seconds>(end - start)
|
|
||||||
.count();
|
|
||||||
}
|
|
||||||
void
|
|
||||||
writeBookFlagLedger(
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
uint32_t shift,
|
|
||||||
BackendInterface const& backend,
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books)
|
|
||||||
{
|
|
||||||
uint32_t nextFlag = ((ledgerSequence >> shift << shift) + (1 << shift));
|
|
||||||
ripple::uint256 zero = {};
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " starting. ledgerSequence = " << std::to_string(ledgerSequence)
|
|
||||||
<< " nextFlag = " << std::to_string(nextFlag)
|
|
||||||
<< " books.size() = " << std::to_string(books.size());
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
backend.writeBooks(books, nextFlag, true);
|
|
||||||
backend.writeBooks({{zero, {zero}}}, nextFlag, true);
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " finished. ledgerSequence = " << std::to_string(ledgerSequence)
|
|
||||||
<< " nextFlag = " << std::to_string(nextFlag)
|
|
||||||
<< " books.size() = " << std::to_string(books.size()) << " time = "
|
|
||||||
<< std::chrono::duration_cast<std::chrono::seconds>(end - start)
|
|
||||||
.count();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
BackendIndexer::clearCaches()
|
|
||||||
{
|
|
||||||
keysCumulative = {};
|
|
||||||
booksCumulative = {};
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
BackendIndexer::doBooksRepair(
|
|
||||||
BackendInterface const& backend,
|
|
||||||
std::optional<uint32_t> sequence)
|
|
||||||
{
|
|
||||||
auto rng = backend.fetchLedgerRangeNoThrow();
|
|
||||||
|
|
||||||
if (!rng)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!sequence)
|
|
||||||
sequence = rng->maxSequence;
|
|
||||||
|
|
||||||
if(sequence < rng->minSequence)
|
|
||||||
sequence = rng->minSequence;
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " sequence = " << std::to_string(*sequence);
|
|
||||||
|
|
||||||
ripple::uint256 zero = {};
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto [objects, cursor, warning] =
|
|
||||||
backend.fetchBookOffers(zero, *sequence, 1);
|
|
||||||
if (!warning)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
|
||||||
<< __func__ << " flag ledger already written. sequence = "
|
|
||||||
<< std::to_string(*sequence) << "returning";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
uint32_t lower = (*sequence - 1) >> bookShift_ << bookShift_;
|
|
||||||
doBooksRepair(backend, lower);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
catch (DatabaseTimeout& t)
|
|
||||||
{
|
|
||||||
;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::optional<ripple::uint256> cursor;
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto [objects, curCursor, warning] =
|
|
||||||
backend.fetchLedgerPage(cursor, *sequence, 2048);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
|
||||||
cursor = curCursor;
|
|
||||||
for (auto& obj : objects)
|
|
||||||
{
|
|
||||||
if (isOffer(obj.blob))
|
|
||||||
{
|
|
||||||
auto book = getBook(obj.blob);
|
|
||||||
booksRepair[book].insert(obj.key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!cursor)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
catch (DatabaseTimeout const& e)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
|
||||||
<< __func__ << " Database timeout fetching keys";
|
|
||||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeBookFlagLedger(*sequence, bookShift_, backend, booksRepair);
|
|
||||||
booksRepair = {};
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " finished. sequence = " << std::to_string(*sequence);
|
|
||||||
}
|
|
||||||
void
|
void
|
||||||
BackendIndexer::doKeysRepair(
|
BackendIndexer::doKeysRepair(
|
||||||
BackendInterface const& backend,
|
BackendInterface const& backend,
|
||||||
@@ -256,36 +45,29 @@ BackendIndexer::doKeysRepair(
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto [objects, curCursor, warning] =
|
if (backend.isLedgerIndexed(*sequence))
|
||||||
backend.fetchLedgerPage(cursor, *sequence, 2048);
|
|
||||||
// no cursor means this is the first page
|
|
||||||
if (!cursor)
|
|
||||||
{
|
|
||||||
// if there is no warning, we don't need to do a repair
|
|
||||||
// warning only shows up on the first page
|
|
||||||
if (!warning)
|
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< __func__
|
<< __func__ << " - " << std::to_string(*sequence)
|
||||||
<< " flag ledger already written. returning";
|
<< " flag ledger already written. returning";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " - " << std::to_string(*sequence)
|
||||||
|
<< " flag ledger not written. recursing..";
|
||||||
uint32_t lower = (*sequence - 1) >> keyShift_ << keyShift_;
|
uint32_t lower = (*sequence - 1) >> keyShift_ << keyShift_;
|
||||||
doKeysRepair(backend, lower);
|
doKeysRepair(backend, lower);
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " - "
|
||||||
|
<< " sequence = " << std::to_string(*sequence)
|
||||||
|
<< " lower = " << std::to_string(lower)
|
||||||
|
<< " finished recursing. submitting repair ";
|
||||||
|
writeKeyFlagLedger(lower, backend);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
|
||||||
cursor = curCursor;
|
|
||||||
for (auto& obj : objects)
|
|
||||||
{
|
|
||||||
keysRepair.insert(obj.key);
|
|
||||||
}
|
|
||||||
if (!cursor)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
catch (DatabaseTimeout const& e)
|
catch (DatabaseTimeout const& e)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
@@ -293,41 +75,96 @@ BackendIndexer::doKeysRepair(
|
|||||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writeKeyFlagLedger(*sequence, keyShift_, backend, keysRepair);
|
|
||||||
keysRepair = {};
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< __func__ << " finished. sequence = " << std::to_string(*sequence);
|
<< __func__ << " finished. sequence = " << std::to_string(*sequence);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
BackendIndexer::populateCaches(BackendInterface const& backend)
|
BackendIndexer::doKeysRepairAsync(
|
||||||
|
BackendInterface const& backend,
|
||||||
|
std::optional<uint32_t> sequence)
|
||||||
{
|
{
|
||||||
auto rng = backend.fetchLedgerRangeNoThrow();
|
boost::asio::post(strand_, [this, sequence, &backend]() {
|
||||||
if (!rng)
|
|
||||||
return;
|
|
||||||
uint32_t sequence = rng->maxSequence;
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " sequence = " << std::to_string(sequence);
|
|
||||||
doBooksRepair(backend, sequence);
|
|
||||||
doKeysRepair(backend, sequence);
|
doKeysRepair(backend, sequence);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
void
|
||||||
|
BackendIndexer::writeKeyFlagLedger(
|
||||||
|
uint32_t ledgerSequence,
|
||||||
|
BackendInterface const& backend)
|
||||||
|
{
|
||||||
|
auto nextFlag = getKeyIndexOfSeq(ledgerSequence + 1);
|
||||||
|
uint32_t lower = ledgerSequence >> keyShift_ << keyShift_;
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "writeKeyFlagLedger - "
|
||||||
|
<< "next flag = " << std::to_string(nextFlag.keyIndex)
|
||||||
|
<< "lower = " << std::to_string(lower)
|
||||||
|
<< "ledgerSequence = " << std::to_string(ledgerSequence) << " starting";
|
||||||
|
ripple::uint256 zero = {};
|
||||||
std::optional<ripple::uint256> cursor;
|
std::optional<ripple::uint256> cursor;
|
||||||
|
size_t numKeys = 0;
|
||||||
|
auto begin = std::chrono::system_clock::now();
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "writeKeyFlagLedger - checking for complete...";
|
||||||
|
if (backend.isLedgerIndexed(nextFlag.keyIndex))
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< "writeKeyFlagLedger - "
|
||||||
|
<< "flag ledger already written. flag = "
|
||||||
|
<< std::to_string(nextFlag.keyIndex)
|
||||||
|
<< " , ledger sequence = "
|
||||||
|
<< std::to_string(ledgerSequence);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< "writeKeyFlagLedger - is not complete";
|
||||||
|
}
|
||||||
|
indexing_ = nextFlag.keyIndex;
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
auto [objects, curCursor, warning] =
|
auto [objects, curCursor, warning] =
|
||||||
backend.fetchLedgerPage(cursor, sequence, 2048);
|
backend.fetchLedgerPage(cursor, lower, 2048);
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
auto mid = std::chrono::system_clock::now();
|
||||||
|
// no cursor means this is the first page
|
||||||
|
if (!cursor)
|
||||||
|
{
|
||||||
|
if (warning)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "writeKeyFlagLedger - "
|
||||||
|
<< " prev flag ledger not written "
|
||||||
|
<< std::to_string(nextFlag.keyIndex) << " : "
|
||||||
|
<< std::to_string(ledgerSequence);
|
||||||
|
assert(false);
|
||||||
|
throw std::runtime_error("Missing prev flag");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cursor = curCursor;
|
cursor = curCursor;
|
||||||
|
std::unordered_set<ripple::uint256> keys;
|
||||||
for (auto& obj : objects)
|
for (auto& obj : objects)
|
||||||
{
|
{
|
||||||
addKeyAsync(obj.key);
|
keys.insert(obj.key);
|
||||||
if (isOffer(obj.blob))
|
|
||||||
{
|
|
||||||
auto book = getBook(obj.blob);
|
|
||||||
addBookOfferAsync(book, obj.key);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
backend.writeKeys(keys, nextFlag, true);
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
|
||||||
|
<< " fetched a page "
|
||||||
|
<< " cursor = "
|
||||||
|
<< (cursor.has_value() ? ripple::strHex(*cursor)
|
||||||
|
: std::string{})
|
||||||
|
<< " num keys = " << std::to_string(numKeys) << " fetch time = "
|
||||||
|
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
mid - start)
|
||||||
|
.count()
|
||||||
|
<< " write time = "
|
||||||
|
<< std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
end - mid)
|
||||||
|
.count();
|
||||||
if (!cursor)
|
if (!cursor)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -338,59 +175,16 @@ BackendIndexer::populateCaches(BackendInterface const& backend)
|
|||||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Do reconcilation. Remove anything from keys or books that shouldn't
|
backend.writeKeys({zero}, nextFlag, true);
|
||||||
// be there
|
auto end = std::chrono::system_clock::now();
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
populatingCacheAsync = false;
|
|
||||||
}
|
|
||||||
for (auto& key : deletedKeys)
|
|
||||||
{
|
|
||||||
deleteKey(key);
|
|
||||||
}
|
|
||||||
for (auto& book : deletedBooks)
|
|
||||||
{
|
|
||||||
for (auto& offer : book.second)
|
|
||||||
{
|
|
||||||
deleteBookOffer(book.first, offer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
deletedKeys = {};
|
|
||||||
deletedBooks = {};
|
|
||||||
cv_.notify_one();
|
|
||||||
}
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< __func__
|
<< "writeKeyFlagLedger - " << std::to_string(nextFlag.keyIndex)
|
||||||
<< " finished. keys.size() = " << std::to_string(keysCumulative.size());
|
<< " finished. "
|
||||||
|
<< " num keys = " << std::to_string(numKeys) << " total time = "
|
||||||
|
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
|
||||||
|
.count();
|
||||||
|
indexing_ = 0;
|
||||||
}
|
}
|
||||||
void
|
|
||||||
BackendIndexer::populateCachesAsync(BackendInterface const& backend)
|
|
||||||
{
|
|
||||||
if (keysCumulative.size() > 0)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " caches already populated. returning";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
populatingCacheAsync = true;
|
|
||||||
}
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__;
|
|
||||||
boost::asio::post(ioc_, [this, &backend]() { populateCaches(backend); });
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
BackendIndexer::waitForCaches()
|
|
||||||
{
|
|
||||||
std::unique_lock lck(mtx);
|
|
||||||
cv_.wait(lck, [this]() {
|
|
||||||
return !populatingCacheAsync && deletedKeys.size() == 0;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
BackendIndexer::writeKeyFlagLedgerAsync(
|
BackendIndexer::writeKeyFlagLedgerAsync(
|
||||||
uint32_t ledgerSequence,
|
uint32_t ledgerSequence,
|
||||||
@@ -400,28 +194,8 @@ BackendIndexer::writeKeyFlagLedgerAsync(
|
|||||||
<< __func__
|
<< __func__
|
||||||
<< " starting. sequence = " << std::to_string(ledgerSequence);
|
<< " starting. sequence = " << std::to_string(ledgerSequence);
|
||||||
|
|
||||||
waitForCaches();
|
boost::asio::post(strand_, [this, ledgerSequence, &backend]() {
|
||||||
auto keysCopy = keysCumulative;
|
writeKeyFlagLedger(ledgerSequence, backend);
|
||||||
boost::asio::post(ioc_, [=, this, &backend]() {
|
|
||||||
writeKeyFlagLedger(ledgerSequence, keyShift_, backend, keysCopy);
|
|
||||||
});
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " finished. sequence = " << std::to_string(ledgerSequence);
|
|
||||||
}
|
|
||||||
void
|
|
||||||
BackendIndexer::writeBookFlagLedgerAsync(
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
BackendInterface const& backend)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
|
||||||
<< " starting. sequence = " << std::to_string(ledgerSequence);
|
|
||||||
|
|
||||||
waitForCaches();
|
|
||||||
auto booksCopy = booksCumulative;
|
|
||||||
boost::asio::post(ioc_, [=, this, &backend]() {
|
|
||||||
writeBookFlagLedger(ledgerSequence, bookShift_, backend, booksCopy);
|
|
||||||
});
|
});
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
<< __func__
|
<< __func__
|
||||||
@@ -431,34 +205,37 @@ BackendIndexer::writeBookFlagLedgerAsync(
|
|||||||
void
|
void
|
||||||
BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend)
|
BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__
|
<< __func__
|
||||||
<< " starting. sequence = " << std::to_string(ledgerSequence);
|
<< " starting. sequence = " << std::to_string(ledgerSequence);
|
||||||
bool isFirst = false;
|
bool isFirst = false;
|
||||||
uint32_t keyIndex = getKeyIndexOfSeq(ledgerSequence);
|
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
|
||||||
uint32_t bookIndex = getBookIndexOfSeq(ledgerSequence);
|
if (isFirst_)
|
||||||
|
{
|
||||||
auto rng = backend.fetchLedgerRangeNoThrow();
|
auto rng = backend.fetchLedgerRangeNoThrow();
|
||||||
if (!rng || rng->minSequence == ledgerSequence)
|
if (rng && rng->minSequence != ledgerSequence)
|
||||||
|
isFirst_ = false;
|
||||||
|
else
|
||||||
{
|
{
|
||||||
isFirst = true;
|
keyIndex = KeyIndex{ledgerSequence};
|
||||||
keyIndex = bookIndex = ledgerSequence;
|
}
|
||||||
}
|
}
|
||||||
backend.writeKeys(keys, keyIndex);
|
|
||||||
backend.writeBooks(books, bookIndex);
|
|
||||||
if (isFirst)
|
|
||||||
{
|
|
||||||
ripple::uint256 zero = {};
|
|
||||||
backend.writeBooks({{zero, {zero}}}, ledgerSequence);
|
|
||||||
backend.writeKeys({zero}, ledgerSequence);
|
|
||||||
writeBookFlagLedgerAsync(ledgerSequence, backend);
|
|
||||||
writeKeyFlagLedgerAsync(ledgerSequence, backend);
|
|
||||||
|
|
||||||
|
backend.writeKeys(keys, keyIndex);
|
||||||
|
if (isFirst_)
|
||||||
|
{
|
||||||
|
// write completion record
|
||||||
|
ripple::uint256 zero = {};
|
||||||
|
backend.writeKeys({zero}, keyIndex);
|
||||||
|
// write next flag sychronously
|
||||||
|
keyIndex = getKeyIndexOfSeq(ledgerSequence + 1);
|
||||||
|
backend.writeKeys(keys, keyIndex);
|
||||||
|
backend.writeKeys({zero}, keyIndex);
|
||||||
}
|
}
|
||||||
|
isFirst_ = false;
|
||||||
keys = {};
|
keys = {};
|
||||||
books = {};
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__
|
<< __func__
|
||||||
<< " finished. sequence = " << std::to_string(ledgerSequence);
|
<< " finished. sequence = " << std::to_string(ledgerSequence);
|
||||||
|
|
||||||
}
|
}
|
||||||
} // namespace Backend
|
} // namespace Backend
|
||||||
|
|||||||
335
reporting/BackendInterface.cpp
Normal file
335
reporting/BackendInterface.cpp
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
#include <ripple/protocol/Indexes.h>
|
||||||
|
#include <ripple/protocol/STLedgerEntry.h>
|
||||||
|
#include <reporting/BackendInterface.h>
|
||||||
|
namespace Backend {
|
||||||
|
bool
|
||||||
|
BackendInterface::finishWrites(uint32_t ledgerSequence) const
|
||||||
|
{
|
||||||
|
indexer_.finish(ledgerSequence, *this);
|
||||||
|
auto commitRes = doFinishWrites();
|
||||||
|
if (commitRes)
|
||||||
|
{
|
||||||
|
if (isFirst_)
|
||||||
|
indexer_.doKeysRepairAsync(*this, ledgerSequence);
|
||||||
|
if (indexer_.isKeyFlagLedger(ledgerSequence))
|
||||||
|
indexer_.writeKeyFlagLedgerAsync(ledgerSequence, *this);
|
||||||
|
isFirst_ = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// if commitRes is false, we are relinquishing control of ETL. We
|
||||||
|
// reset isFirst_ to true so that way if we later regain control of
|
||||||
|
// ETL, we trigger the index repair
|
||||||
|
isFirst_ = true;
|
||||||
|
}
|
||||||
|
return commitRes;
|
||||||
|
}
|
||||||
|
bool
|
||||||
|
BackendInterface::isLedgerIndexed(std::uint32_t ledgerSequence) const
|
||||||
|
{
|
||||||
|
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
|
||||||
|
if (keyIndex)
|
||||||
|
{
|
||||||
|
auto page = doFetchLedgerPage({}, ledgerSequence, 1);
|
||||||
|
return !page.warning.has_value();
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
void
|
||||||
|
BackendInterface::writeLedgerObject(
|
||||||
|
std::string&& key,
|
||||||
|
uint32_t seq,
|
||||||
|
std::string&& blob,
|
||||||
|
bool isCreated,
|
||||||
|
bool isDeleted,
|
||||||
|
std::optional<ripple::uint256>&& book) const
|
||||||
|
{
|
||||||
|
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
|
||||||
|
indexer_.addKey(std::move(key256));
|
||||||
|
doWriteLedgerObject(
|
||||||
|
std::move(key),
|
||||||
|
seq,
|
||||||
|
std::move(blob),
|
||||||
|
isCreated,
|
||||||
|
isDeleted,
|
||||||
|
std::move(book));
|
||||||
|
}
|
||||||
|
std::optional<LedgerRange>
|
||||||
|
BackendInterface::fetchLedgerRangeNoThrow() const
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning) << __func__;
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
return fetchLedgerRange();
|
||||||
|
}
|
||||||
|
catch (DatabaseTimeout& t)
|
||||||
|
{
|
||||||
|
;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::optional<KeyIndex>
|
||||||
|
BackendInterface::getKeyIndexOfSeq(uint32_t seq) const
|
||||||
|
{
|
||||||
|
if (indexer_.isKeyFlagLedger(seq))
|
||||||
|
return KeyIndex{seq};
|
||||||
|
auto rng = fetchLedgerRange();
|
||||||
|
if (!rng)
|
||||||
|
return {};
|
||||||
|
if (rng->minSequence == seq)
|
||||||
|
return KeyIndex{seq};
|
||||||
|
return indexer_.getKeyIndexOfSeq(seq);
|
||||||
|
}
|
||||||
|
BookOffersPage
|
||||||
|
BackendInterface::fetchBookOffers(
|
||||||
|
ripple::uint256 const& book,
|
||||||
|
uint32_t ledgerSequence,
|
||||||
|
std::uint32_t limit,
|
||||||
|
std::optional<ripple::uint256> const& cursor) const
|
||||||
|
{
|
||||||
|
// TODO try to speed this up. This can take a few seconds. The goal is to
|
||||||
|
// get it down to a few hundred milliseconds.
|
||||||
|
BookOffersPage page;
|
||||||
|
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
|
||||||
|
ripple::uint256 uTipIndex = book;
|
||||||
|
bool done = false;
|
||||||
|
std::vector<ripple::uint256> keys;
|
||||||
|
auto getMillis = [](auto diff) {
|
||||||
|
return std::chrono::duration_cast<std::chrono::milliseconds>(diff)
|
||||||
|
.count();
|
||||||
|
};
|
||||||
|
auto begin = std::chrono::system_clock::now();
|
||||||
|
uint32_t numSucc = 0;
|
||||||
|
uint32_t numPages = 0;
|
||||||
|
long succMillis = 0;
|
||||||
|
long pageMillis = 0;
|
||||||
|
while (keys.size() < limit)
|
||||||
|
{
|
||||||
|
auto mid1 = std::chrono::system_clock::now();
|
||||||
|
auto offerDir = fetchSuccessor(uTipIndex, ledgerSequence);
|
||||||
|
auto mid2 = std::chrono::system_clock::now();
|
||||||
|
numSucc++;
|
||||||
|
succMillis += getMillis(mid2 - mid1);
|
||||||
|
if (!offerDir || offerDir->key > bookEnd)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " - offerDir.has_value() "
|
||||||
|
<< offerDir.has_value() << " breaking";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
while (keys.size() < limit)
|
||||||
|
{
|
||||||
|
++numPages;
|
||||||
|
uTipIndex = offerDir->key;
|
||||||
|
ripple::STLedgerEntry sle{
|
||||||
|
ripple::SerialIter{
|
||||||
|
offerDir->blob.data(), offerDir->blob.size()},
|
||||||
|
offerDir->key};
|
||||||
|
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||||
|
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||||
|
// TODO we probably don't have to wait here. We can probably fetch
|
||||||
|
// these objects in another thread, and move on to another page of
|
||||||
|
// the book directory, or another directory. We also could just
|
||||||
|
// accumulate all of the keys before fetching the offers
|
||||||
|
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||||
|
if (!next)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " next is empty. breaking";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||||
|
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence);
|
||||||
|
assert(nextDir);
|
||||||
|
offerDir->blob = *nextDir;
|
||||||
|
offerDir->key = nextKey.key;
|
||||||
|
}
|
||||||
|
auto mid3 = std::chrono::system_clock::now();
|
||||||
|
pageMillis += getMillis(mid3 - mid2);
|
||||||
|
}
|
||||||
|
auto mid = std::chrono::system_clock::now();
|
||||||
|
auto objs = fetchLedgerObjects(keys, ledgerSequence);
|
||||||
|
for (size_t i = 0; i < keys.size(); ++i)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(trace)
|
||||||
|
<< __func__ << " key = " << ripple::strHex(keys[i])
|
||||||
|
<< " blob = " << ripple::strHex(objs[i]);
|
||||||
|
assert(objs[i].size());
|
||||||
|
page.offers.push_back({keys[i], objs[i]});
|
||||||
|
}
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " "
|
||||||
|
<< "Fetching " << std::to_string(keys.size()) << " keys took "
|
||||||
|
<< std::to_string(getMillis(mid - begin))
|
||||||
|
<< " milliseconds. Fetching next dir took "
|
||||||
|
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
|
||||||
|
<< std::to_string(numSucc) << " times"
|
||||||
|
<< " Fetching next page of dir took " << std::to_string(pageMillis)
|
||||||
|
<< ". num pages = " << std::to_string(numPages)
|
||||||
|
<< " milliseconds. Fetching all objects took "
|
||||||
|
<< std::to_string(getMillis(end - mid))
|
||||||
|
<< " milliseconds. total time = "
|
||||||
|
<< std::to_string(getMillis(end - begin)) << " milliseconds";
|
||||||
|
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<LedgerObject>
|
||||||
|
BackendInterface::fetchSuccessor(ripple::uint256 key, uint32_t ledgerSequence)
|
||||||
|
const
|
||||||
|
{
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
auto page = fetchLedgerPage({++key}, ledgerSequence, 1, 512);
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
|
||||||
|
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
|
||||||
|
.count();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " took " << std::to_string(ms) << " milliseconds";
|
||||||
|
if (page.objects.size())
|
||||||
|
return page.objects[0];
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
LedgerPage
|
||||||
|
BackendInterface::fetchLedgerPage(
|
||||||
|
std::optional<ripple::uint256> const& cursor,
|
||||||
|
std::uint32_t ledgerSequence,
|
||||||
|
std::uint32_t limit,
|
||||||
|
std::uint32_t limitHint) const
|
||||||
|
{
|
||||||
|
assert(limit != 0);
|
||||||
|
bool incomplete = !isLedgerIndexed(ledgerSequence);
|
||||||
|
// really low limits almost always miss
|
||||||
|
uint32_t adjustedLimit = std::max(limitHint, std::max(limit, (uint32_t)4));
|
||||||
|
LedgerPage page;
|
||||||
|
page.cursor = cursor;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
adjustedLimit = adjustedLimit >= 8192 ? 8192 : adjustedLimit * 2;
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
auto partial =
|
||||||
|
doFetchLedgerPage(page.cursor, ledgerSequence, adjustedLimit);
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " " << std::to_string(ledgerSequence) << " "
|
||||||
|
<< std::to_string(adjustedLimit) << " "
|
||||||
|
<< ripple::strHex(*page.cursor) << " - time = "
|
||||||
|
<< std::to_string(
|
||||||
|
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
end - start)
|
||||||
|
.count());
|
||||||
|
page.objects.insert(
|
||||||
|
page.objects.end(), partial.objects.begin(), partial.objects.end());
|
||||||
|
page.cursor = partial.cursor;
|
||||||
|
} while (page.objects.size() < limit && page.cursor);
|
||||||
|
if (incomplete)
|
||||||
|
{
|
||||||
|
auto rng = fetchLedgerRange();
|
||||||
|
if (!rng)
|
||||||
|
return page;
|
||||||
|
if (rng->minSequence == ledgerSequence)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(fatal)
|
||||||
|
<< __func__
|
||||||
|
<< " Database is populated but first flag ledger is "
|
||||||
|
"incomplete. This should never happen";
|
||||||
|
assert(false);
|
||||||
|
throw std::runtime_error("Missing base flag ledger");
|
||||||
|
}
|
||||||
|
uint32_t lowerSequence = (ledgerSequence - 1) >> indexer_.getKeyShift()
|
||||||
|
<< indexer_.getKeyShift();
|
||||||
|
if (lowerSequence < rng->minSequence)
|
||||||
|
lowerSequence = rng->minSequence;
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__
|
||||||
|
<< " recursing. ledgerSequence = " << std::to_string(ledgerSequence)
|
||||||
|
<< " , lowerSequence = " << std::to_string(lowerSequence);
|
||||||
|
auto lowerPage = fetchLedgerPage(cursor, lowerSequence, limit);
|
||||||
|
std::vector<ripple::uint256> keys;
|
||||||
|
std::transform(
|
||||||
|
std::move_iterator(lowerPage.objects.begin()),
|
||||||
|
std::move_iterator(lowerPage.objects.end()),
|
||||||
|
std::back_inserter(keys),
|
||||||
|
[](auto&& elt) { return std::move(elt.key); });
|
||||||
|
auto objs = fetchLedgerObjects(keys, ledgerSequence);
|
||||||
|
for (size_t i = 0; i < keys.size(); ++i)
|
||||||
|
{
|
||||||
|
auto& obj = objs[i];
|
||||||
|
auto& key = keys[i];
|
||||||
|
if (obj.size())
|
||||||
|
page.objects.push_back({std::move(key), std::move(obj)});
|
||||||
|
}
|
||||||
|
std::sort(page.objects.begin(), page.objects.end(), [](auto a, auto b) {
|
||||||
|
return a.key < b.key;
|
||||||
|
});
|
||||||
|
page.warning = "Data may be incomplete";
|
||||||
|
}
|
||||||
|
if (page.objects.size() >= limit)
|
||||||
|
{
|
||||||
|
page.objects.resize(limit);
|
||||||
|
page.cursor = page.objects.back().key;
|
||||||
|
}
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
BackendInterface::checkFlagLedgers() const
|
||||||
|
{
|
||||||
|
auto rng = fetchLedgerRangeNoThrow();
|
||||||
|
if (rng)
|
||||||
|
{
|
||||||
|
bool prevComplete = true;
|
||||||
|
uint32_t cur = rng->minSequence;
|
||||||
|
size_t numIncomplete = 0;
|
||||||
|
while (cur <= rng->maxSequence + 1)
|
||||||
|
{
|
||||||
|
auto keyIndex = getKeyIndexOfSeq(cur);
|
||||||
|
assert(keyIndex.has_value());
|
||||||
|
cur = keyIndex->keyIndex;
|
||||||
|
|
||||||
|
if (!isLedgerIndexed(cur))
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__ << " - flag ledger "
|
||||||
|
<< std::to_string(keyIndex->keyIndex) << " is incomplete";
|
||||||
|
++numIncomplete;
|
||||||
|
prevComplete = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!prevComplete)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(fatal)
|
||||||
|
<< __func__ << " - flag ledger "
|
||||||
|
<< std::to_string(keyIndex->keyIndex)
|
||||||
|
<< " is incomplete but the next is complete. This "
|
||||||
|
"should never happen";
|
||||||
|
assert(false);
|
||||||
|
throw std::runtime_error("missing prev flag ledger");
|
||||||
|
}
|
||||||
|
prevComplete = true;
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " - flag ledger "
|
||||||
|
<< std::to_string(keyIndex->keyIndex) << " is complete";
|
||||||
|
}
|
||||||
|
cur = cur + 1;
|
||||||
|
}
|
||||||
|
if (numIncomplete > 1)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__ << " " << std::to_string(numIncomplete)
|
||||||
|
<< " incomplete flag ledgers. "
|
||||||
|
"This can happen, but is unlikely. Check indexer_key_shift "
|
||||||
|
"in config";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " number of incomplete flag ledgers = "
|
||||||
|
<< std::to_string(numIncomplete);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace Backend
|
||||||
@@ -39,6 +39,8 @@ struct TransactionAndMetadata
|
|||||||
Blob transaction;
|
Blob transaction;
|
||||||
Blob metadata;
|
Blob metadata;
|
||||||
uint32_t ledgerSequence;
|
uint32_t ledgerSequence;
|
||||||
|
bool
|
||||||
|
operator==(const TransactionAndMetadata&) const = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AccountTransactionsCursor
|
struct AccountTransactionsCursor
|
||||||
@@ -53,6 +55,19 @@ struct LedgerRange
|
|||||||
uint32_t maxSequence;
|
uint32_t maxSequence;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// The below two structs exist to prevent developers from accidentally mixing up
|
||||||
|
// the two indexes.
|
||||||
|
struct BookIndex
|
||||||
|
{
|
||||||
|
uint32_t bookIndex;
|
||||||
|
explicit BookIndex(uint32_t v) : bookIndex(v){};
|
||||||
|
};
|
||||||
|
struct KeyIndex
|
||||||
|
{
|
||||||
|
uint32_t keyIndex;
|
||||||
|
explicit KeyIndex(uint32_t v) : keyIndex(v){};
|
||||||
|
};
|
||||||
|
|
||||||
class DatabaseTimeout : public std::exception
|
class DatabaseTimeout : public std::exception
|
||||||
{
|
{
|
||||||
const char*
|
const char*
|
||||||
@@ -65,60 +80,32 @@ class BackendInterface;
|
|||||||
class BackendIndexer
|
class BackendIndexer
|
||||||
{
|
{
|
||||||
boost::asio::io_context ioc_;
|
boost::asio::io_context ioc_;
|
||||||
|
boost::asio::io_context::strand strand_;
|
||||||
std::mutex mutex_;
|
std::mutex mutex_;
|
||||||
std::optional<boost::asio::io_context::work> work_;
|
std::optional<boost::asio::io_context::work> work_;
|
||||||
std::thread ioThread_;
|
std::thread ioThread_;
|
||||||
uint32_t keyShift_ = 20;
|
|
||||||
uint32_t bookShift_ = 10;
|
|
||||||
std::unordered_set<ripple::uint256> keys;
|
|
||||||
std::unordered_set<ripple::uint256> keysCumulative;
|
|
||||||
std::unordered_map<ripple::uint256, std::unordered_set<ripple::uint256>>
|
|
||||||
books;
|
|
||||||
std::unordered_map<ripple::uint256, std::unordered_set<ripple::uint256>>
|
|
||||||
booksCumulative;
|
|
||||||
bool populatingCacheAsync = false;
|
|
||||||
// These are only used when the cache is being populated asynchronously
|
|
||||||
std::unordered_set<ripple::uint256> deletedKeys;
|
|
||||||
std::unordered_map<ripple::uint256, std::unordered_set<ripple::uint256>>
|
|
||||||
deletedBooks;
|
|
||||||
std::unordered_set<ripple::uint256> keysRepair;
|
|
||||||
std::unordered_map<ripple::uint256, std::unordered_set<ripple::uint256>>
|
|
||||||
booksRepair;
|
|
||||||
std::mutex mtx;
|
|
||||||
std::condition_variable cv_;
|
|
||||||
|
|
||||||
|
std::atomic_uint32_t indexing_ = 0;
|
||||||
|
|
||||||
|
uint32_t keyShift_ = 20;
|
||||||
|
std::unordered_set<ripple::uint256> keys;
|
||||||
|
|
||||||
|
mutable bool isFirst_ = true;
|
||||||
void
|
void
|
||||||
addKeyAsync(ripple::uint256 const& key);
|
doKeysRepair(
|
||||||
|
BackendInterface const& backend,
|
||||||
|
std::optional<uint32_t> sequence);
|
||||||
void
|
void
|
||||||
addBookOfferAsync(
|
writeKeyFlagLedger(
|
||||||
ripple::uint256 const& book,
|
uint32_t ledgerSequence,
|
||||||
ripple::uint256 const& offerKey);
|
BackendInterface const& backend);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BackendIndexer(boost::json::object const& config);
|
BackendIndexer(boost::json::object const& config);
|
||||||
~BackendIndexer();
|
~BackendIndexer();
|
||||||
|
|
||||||
void
|
void
|
||||||
populateCachesAsync(BackendInterface const& backend);
|
addKey(ripple::uint256&& key);
|
||||||
void
|
|
||||||
populateCaches(BackendInterface const& backend);
|
|
||||||
void
|
|
||||||
clearCaches();
|
|
||||||
// Blocking, possibly for minutes
|
|
||||||
void
|
|
||||||
waitForCaches();
|
|
||||||
|
|
||||||
void
|
|
||||||
addKey(ripple::uint256 const& key);
|
|
||||||
void
|
|
||||||
deleteKey(ripple::uint256 const& key);
|
|
||||||
void
|
|
||||||
addBookOffer(ripple::uint256 const& book, ripple::uint256 const& offerKey);
|
|
||||||
|
|
||||||
void
|
|
||||||
deleteBookOffer(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
ripple::uint256 const& offerKey);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
finish(uint32_t ledgerSequence, BackendInterface const& backend);
|
finish(uint32_t ledgerSequence, BackendInterface const& backend);
|
||||||
@@ -127,59 +114,44 @@ public:
|
|||||||
uint32_t ledgerSequence,
|
uint32_t ledgerSequence,
|
||||||
BackendInterface const& backend);
|
BackendInterface const& backend);
|
||||||
void
|
void
|
||||||
writeBookFlagLedgerAsync(
|
doKeysRepairAsync(
|
||||||
uint32_t ledgerSequence,
|
|
||||||
BackendInterface const& backend);
|
|
||||||
void
|
|
||||||
doKeysRepair(
|
|
||||||
BackendInterface const& backend,
|
BackendInterface const& backend,
|
||||||
std::optional<uint32_t> sequence);
|
std::optional<uint32_t> sequence);
|
||||||
void
|
|
||||||
doBooksRepair(
|
|
||||||
BackendInterface const& backend,
|
|
||||||
std::optional<uint32_t> sequence);
|
|
||||||
uint32_t
|
|
||||||
getBookShift()
|
|
||||||
{
|
|
||||||
return bookShift_;
|
|
||||||
}
|
|
||||||
uint32_t
|
uint32_t
|
||||||
getKeyShift()
|
getKeyShift()
|
||||||
{
|
{
|
||||||
return keyShift_;
|
return keyShift_;
|
||||||
}
|
}
|
||||||
uint32_t
|
std::optional<uint32_t>
|
||||||
|
getCurrentlyIndexing()
|
||||||
|
{
|
||||||
|
uint32_t cur = indexing_.load();
|
||||||
|
if (cur != 0)
|
||||||
|
return cur;
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
KeyIndex
|
||||||
getKeyIndexOfSeq(uint32_t seq) const
|
getKeyIndexOfSeq(uint32_t seq) const
|
||||||
{
|
{
|
||||||
if (isKeyFlagLedger(seq))
|
if (isKeyFlagLedger(seq))
|
||||||
return seq;
|
return KeyIndex{seq};
|
||||||
auto incr = (1 << keyShift_);
|
auto incr = (1 << keyShift_);
|
||||||
return (seq >> keyShift_ << keyShift_) + incr;
|
KeyIndex index{(seq >> keyShift_ << keyShift_) + incr};
|
||||||
|
assert(isKeyFlagLedger(index.keyIndex));
|
||||||
|
return index;
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
isKeyFlagLedger(uint32_t ledgerSequence) const
|
isKeyFlagLedger(uint32_t ledgerSequence) const
|
||||||
{
|
{
|
||||||
return (ledgerSequence % (1 << keyShift_)) == 0;
|
return (ledgerSequence % (1 << keyShift_)) == 0;
|
||||||
}
|
}
|
||||||
uint32_t
|
|
||||||
getBookIndexOfSeq(uint32_t seq) const
|
|
||||||
{
|
|
||||||
if (isBookFlagLedger(seq))
|
|
||||||
return seq;
|
|
||||||
auto incr = (1 << bookShift_);
|
|
||||||
return (seq >> bookShift_ << bookShift_) + incr;
|
|
||||||
}
|
|
||||||
bool
|
|
||||||
isBookFlagLedger(uint32_t ledgerSequence) const
|
|
||||||
{
|
|
||||||
return (ledgerSequence % (1 << bookShift_)) == 0;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class BackendInterface
|
class BackendInterface
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
mutable BackendIndexer indexer_;
|
mutable BackendIndexer indexer_;
|
||||||
|
mutable bool isFirst_ = true;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// read methods
|
// read methods
|
||||||
@@ -193,45 +165,14 @@ public:
|
|||||||
return indexer_;
|
return indexer_;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<uint32_t>
|
void
|
||||||
getKeyIndexOfSeq(uint32_t seq) const
|
checkFlagLedgers() const;
|
||||||
{
|
|
||||||
if (indexer_.isKeyFlagLedger(seq))
|
std::optional<KeyIndex>
|
||||||
return seq;
|
getKeyIndexOfSeq(uint32_t seq) const;
|
||||||
auto rng = fetchLedgerRange();
|
|
||||||
if (!rng)
|
|
||||||
return {};
|
|
||||||
if (rng->minSequence == seq)
|
|
||||||
return seq;
|
|
||||||
return indexer_.getKeyIndexOfSeq(seq);
|
|
||||||
}
|
|
||||||
std::optional<uint32_t>
|
|
||||||
getBookIndexOfSeq(uint32_t seq) const
|
|
||||||
{
|
|
||||||
if (indexer_.isBookFlagLedger(seq))
|
|
||||||
return seq;
|
|
||||||
auto rng = fetchLedgerRange();
|
|
||||||
if (!rng)
|
|
||||||
return {};
|
|
||||||
if (rng->minSequence == seq)
|
|
||||||
return seq;
|
|
||||||
return indexer_.getBookIndexOfSeq(seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
finishWrites(uint32_t ledgerSequence) const
|
finishWrites(uint32_t ledgerSequence) const;
|
||||||
{
|
|
||||||
indexer_.finish(ledgerSequence, *this);
|
|
||||||
auto commitRes = doFinishWrites();
|
|
||||||
if (commitRes)
|
|
||||||
{
|
|
||||||
if (indexer_.isBookFlagLedger(ledgerSequence))
|
|
||||||
indexer_.writeBookFlagLedgerAsync(ledgerSequence, *this);
|
|
||||||
if (indexer_.isKeyFlagLedger(ledgerSequence))
|
|
||||||
indexer_.writeKeyFlagLedgerAsync(ledgerSequence, *this);
|
|
||||||
}
|
|
||||||
return commitRes;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual std::optional<uint32_t>
|
virtual std::optional<uint32_t>
|
||||||
fetchLatestLedgerSequence() const = 0;
|
fetchLatestLedgerSequence() const = 0;
|
||||||
@@ -243,20 +184,7 @@ public:
|
|||||||
fetchLedgerRange() const = 0;
|
fetchLedgerRange() const = 0;
|
||||||
|
|
||||||
std::optional<LedgerRange>
|
std::optional<LedgerRange>
|
||||||
fetchLedgerRangeNoThrow() const
|
fetchLedgerRangeNoThrow() const;
|
||||||
{
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
return fetchLedgerRange();
|
|
||||||
}
|
|
||||||
catch (DatabaseTimeout& t)
|
|
||||||
{
|
|
||||||
;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual std::optional<Blob>
|
virtual std::optional<Blob>
|
||||||
fetchLedgerObject(ripple::uint256 const& key, uint32_t sequence) const = 0;
|
fetchLedgerObject(ripple::uint256 const& key, uint32_t sequence) const = 0;
|
||||||
@@ -271,19 +199,32 @@ public:
|
|||||||
virtual std::vector<ripple::uint256>
|
virtual std::vector<ripple::uint256>
|
||||||
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const = 0;
|
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const = 0;
|
||||||
|
|
||||||
virtual LedgerPage
|
LedgerPage
|
||||||
fetchLedgerPage(
|
fetchLedgerPage(
|
||||||
|
std::optional<ripple::uint256> const& cursor,
|
||||||
|
std::uint32_t ledgerSequence,
|
||||||
|
std::uint32_t limit,
|
||||||
|
std::uint32_t limitHint = 0) const;
|
||||||
|
|
||||||
|
bool
|
||||||
|
isLedgerIndexed(std::uint32_t ledgerSequence) const;
|
||||||
|
|
||||||
|
std::optional<LedgerObject>
|
||||||
|
fetchSuccessor(ripple::uint256 key, uint32_t ledgerSequence) const;
|
||||||
|
|
||||||
|
virtual LedgerPage
|
||||||
|
doFetchLedgerPage(
|
||||||
std::optional<ripple::uint256> const& cursor,
|
std::optional<ripple::uint256> const& cursor,
|
||||||
std::uint32_t ledgerSequence,
|
std::uint32_t ledgerSequence,
|
||||||
std::uint32_t limit) const = 0;
|
std::uint32_t limit) const = 0;
|
||||||
|
|
||||||
// TODO add warning for incomplete data
|
// TODO add warning for incomplete data
|
||||||
virtual BookOffersPage
|
BookOffersPage
|
||||||
fetchBookOffers(
|
fetchBookOffers(
|
||||||
ripple::uint256 const& book,
|
ripple::uint256 const& book,
|
||||||
uint32_t ledgerSequence,
|
uint32_t ledgerSequence,
|
||||||
std::uint32_t limit,
|
std::uint32_t limit,
|
||||||
std::optional<ripple::uint256> const& cursor = {}) const = 0;
|
std::optional<ripple::uint256> const& cursor = {}) const;
|
||||||
|
|
||||||
virtual std::vector<TransactionAndMetadata>
|
virtual std::vector<TransactionAndMetadata>
|
||||||
fetchTransactions(std::vector<ripple::uint256> const& hashes) const = 0;
|
fetchTransactions(std::vector<ripple::uint256> const& hashes) const = 0;
|
||||||
@@ -316,28 +257,8 @@ public:
|
|||||||
std::string&& blob,
|
std::string&& blob,
|
||||||
bool isCreated,
|
bool isCreated,
|
||||||
bool isDeleted,
|
bool isDeleted,
|
||||||
std::optional<ripple::uint256>&& book) const
|
std::optional<ripple::uint256>&& book) const;
|
||||||
{
|
|
||||||
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
|
|
||||||
if (isCreated)
|
|
||||||
indexer_.addKey(key256);
|
|
||||||
if (isDeleted)
|
|
||||||
indexer_.deleteKey(key256);
|
|
||||||
if (book)
|
|
||||||
{
|
|
||||||
if (isCreated)
|
|
||||||
indexer_.addBookOffer(*book, key256);
|
|
||||||
if (isDeleted)
|
|
||||||
indexer_.deleteBookOffer(*book, key256);
|
|
||||||
}
|
|
||||||
doWriteLedgerObject(
|
|
||||||
std::move(key),
|
|
||||||
seq,
|
|
||||||
std::move(blob),
|
|
||||||
isCreated,
|
|
||||||
isDeleted,
|
|
||||||
std::move(book));
|
|
||||||
}
|
|
||||||
virtual void
|
virtual void
|
||||||
doWriteLedgerObject(
|
doWriteLedgerObject(
|
||||||
std::string&& key,
|
std::string&& key,
|
||||||
@@ -377,18 +298,11 @@ public:
|
|||||||
doFinishWrites() const = 0;
|
doFinishWrites() const = 0;
|
||||||
|
|
||||||
virtual bool
|
virtual bool
|
||||||
doOnlineDelete(uint32_t minLedgerToKeep) const = 0;
|
doOnlineDelete(uint32_t numLedgersToKeep) const = 0;
|
||||||
virtual bool
|
virtual bool
|
||||||
writeKeys(
|
writeKeys(
|
||||||
std::unordered_set<ripple::uint256> const& keys,
|
std::unordered_set<ripple::uint256> const& keys,
|
||||||
uint32_t ledgerSequence,
|
KeyIndex const& index,
|
||||||
bool isAsync = false) const = 0;
|
|
||||||
virtual bool
|
|
||||||
writeBooks(
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
bool isAsync = false) const = 0;
|
bool isAsync = false) const = 0;
|
||||||
|
|
||||||
virtual ~BackendInterface()
|
virtual ~BackendInterface()
|
||||||
|
|||||||
@@ -394,7 +394,7 @@ CassandraBackend::fetchLedgerDiff(uint32_t ledgerSequence) const
|
|||||||
return objects;
|
return objects;
|
||||||
}
|
}
|
||||||
LedgerPage
|
LedgerPage
|
||||||
CassandraBackend::fetchLedgerPage(
|
CassandraBackend::doFetchLedgerPage(
|
||||||
std::optional<ripple::uint256> const& cursor,
|
std::optional<ripple::uint256> const& cursor,
|
||||||
std::uint32_t ledgerSequence,
|
std::uint32_t ledgerSequence,
|
||||||
std::uint32_t limit) const
|
std::uint32_t limit) const
|
||||||
@@ -405,12 +405,12 @@ CassandraBackend::fetchLedgerPage(
|
|||||||
LedgerPage page;
|
LedgerPage page;
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " ledgerSequence = " << std::to_string(ledgerSequence)
|
<< __func__ << " ledgerSequence = " << std::to_string(ledgerSequence)
|
||||||
<< " index = " << std::to_string(*index);
|
<< " index = " << std::to_string(index->keyIndex);
|
||||||
if (cursor)
|
if (cursor)
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " - Cursor = " << ripple::strHex(*cursor);
|
<< __func__ << " - Cursor = " << ripple::strHex(*cursor);
|
||||||
CassandraStatement statement{selectKeys_};
|
CassandraStatement statement{selectKeys_};
|
||||||
statement.bindInt(*index);
|
statement.bindInt(index->keyIndex);
|
||||||
if (cursor)
|
if (cursor)
|
||||||
statement.bindBytes(*cursor);
|
statement.bindBytes(*cursor);
|
||||||
else
|
else
|
||||||
@@ -422,7 +422,7 @@ CassandraBackend::fetchLedgerPage(
|
|||||||
CassandraResult result = executeSyncRead(statement);
|
CassandraResult result = executeSyncRead(statement);
|
||||||
if (!!result)
|
if (!!result)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " - got keys - size = " << result.numRows();
|
<< __func__ << " - got keys - size = " << result.numRows();
|
||||||
std::vector<ripple::uint256> keys;
|
std::vector<ripple::uint256> keys;
|
||||||
|
|
||||||
@@ -430,17 +430,17 @@ CassandraBackend::fetchLedgerPage(
|
|||||||
{
|
{
|
||||||
keys.push_back(result.getUInt256());
|
keys.push_back(result.getUInt256());
|
||||||
} while (result.nextRow());
|
} while (result.nextRow());
|
||||||
if (keys.size() && keys.size() == limit)
|
if (keys.size() && keys.size() >= limit)
|
||||||
{
|
{
|
||||||
page.cursor = keys.back();
|
page.cursor = keys.back();
|
||||||
keys.pop_back();
|
++(*page.cursor);
|
||||||
}
|
}
|
||||||
auto objects = fetchLedgerObjects(keys, ledgerSequence);
|
auto objects = fetchLedgerObjects(keys, ledgerSequence);
|
||||||
if (objects.size() != keys.size())
|
if (objects.size() != keys.size())
|
||||||
throw std::runtime_error("Mismatch in size of objects and keys");
|
throw std::runtime_error("Mismatch in size of objects and keys");
|
||||||
|
|
||||||
if (cursor)
|
if (cursor)
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " Cursor = " << ripple::strHex(*page.cursor);
|
<< __func__ << " Cursor = " << ripple::strHex(*page.cursor);
|
||||||
|
|
||||||
for (size_t i = 0; i < objects.size(); ++i)
|
for (size_t i = 0; i < objects.size(); ++i)
|
||||||
@@ -494,129 +494,6 @@ CassandraBackend::fetchLedgerObjects(
|
|||||||
<< "Fetched " << numKeys << " records from Cassandra";
|
<< "Fetched " << numKeys << " records from Cassandra";
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
BookOffersPage
|
|
||||||
CassandraBackend::fetchBookOffers(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
std::uint32_t limit,
|
|
||||||
std::optional<ripple::uint256> const& cursor) const
|
|
||||||
{
|
|
||||||
auto rng = fetchLedgerRange();
|
|
||||||
auto limitTuningFactor = 50;
|
|
||||||
|
|
||||||
if(!rng)
|
|
||||||
return {{},{}};
|
|
||||||
|
|
||||||
auto readBooks =
|
|
||||||
[this, &book, &limit, &limitTuningFactor]
|
|
||||||
(std::uint32_t sequence)
|
|
||||||
-> std::pair<bool, std::vector<std::pair<std::uint64_t, ripple::uint256>>>
|
|
||||||
{
|
|
||||||
CassandraStatement completeQuery{completeBook_};
|
|
||||||
completeQuery.bindInt(sequence);
|
|
||||||
CassandraResult completeResult = executeSyncRead(completeQuery);
|
|
||||||
bool complete = completeResult.hasResult();
|
|
||||||
|
|
||||||
CassandraStatement statement{selectBook_};
|
|
||||||
std::vector<std::pair<std::uint64_t, ripple::uint256>> keys = {};
|
|
||||||
|
|
||||||
statement.bindBytes(book.data(), 24);
|
|
||||||
statement.bindInt(sequence);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " upper = " << std::to_string(sequence)
|
|
||||||
<< " book = " << ripple::strHex(std::string((char*)book.data(), 24));
|
|
||||||
|
|
||||||
ripple::uint256 zero = beast::zero;
|
|
||||||
statement.bindBytes(zero.data(), 8);
|
|
||||||
statement.bindBytes(zero);
|
|
||||||
|
|
||||||
statement.bindUInt(limit * limitTuningFactor);
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
CassandraResult result = executeSyncRead(statement);
|
|
||||||
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
auto duration = ((end - start).count()) / 1000000000.0;
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Book directory fetch took "
|
|
||||||
<< std::to_string(duration) << " seconds.";
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
|
|
||||||
if (!result)
|
|
||||||
{
|
|
||||||
return {false, {{}, {}}};
|
|
||||||
}
|
|
||||||
|
|
||||||
do
|
|
||||||
{
|
|
||||||
auto [quality, index] = result.getBytesTuple();
|
|
||||||
std::uint64_t q = 0;
|
|
||||||
memcpy(&q, quality.data(), 8);
|
|
||||||
keys.push_back({q, ripple::uint256::fromVoid(index.data())});
|
|
||||||
|
|
||||||
} while (result.nextRow());
|
|
||||||
|
|
||||||
return {complete, keys};
|
|
||||||
};
|
|
||||||
|
|
||||||
auto upper = indexer_.getBookIndexOfSeq(ledgerSequence);
|
|
||||||
auto [complete, quality_keys] = readBooks(upper);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
|
||||||
<< __func__ << " - populated keys. num keys = " << quality_keys.size();
|
|
||||||
|
|
||||||
std::optional<std::string> warning = {};
|
|
||||||
if (!complete)
|
|
||||||
{
|
|
||||||
warning = "Data may be incomplete";
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "May be incomplete. Fetching other page";
|
|
||||||
|
|
||||||
auto bookShift = indexer_.getBookShift();
|
|
||||||
std::uint32_t lower = upper - (1 << bookShift);
|
|
||||||
auto originalKeys = std::move(quality_keys);
|
|
||||||
auto [lowerComplete, otherKeys] = readBooks(lower);
|
|
||||||
|
|
||||||
assert(lowerComplete);
|
|
||||||
|
|
||||||
std::vector<std::pair<std::uint64_t, ripple::uint256>> merged_keys;
|
|
||||||
merged_keys.reserve(originalKeys.size() + otherKeys.size());
|
|
||||||
std::merge(originalKeys.begin(), originalKeys.end(),
|
|
||||||
otherKeys.begin(), otherKeys.end(),
|
|
||||||
std::back_inserter(merged_keys),
|
|
||||||
[](auto pair1, auto pair2)
|
|
||||||
{
|
|
||||||
return pair1.first < pair2.first;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ripple::uint256> merged(quality_keys.size());
|
|
||||||
std::transform(quality_keys.begin(), quality_keys.end(),
|
|
||||||
std::back_inserter(merged),
|
|
||||||
[](auto pair) { return pair.second; });
|
|
||||||
|
|
||||||
auto uniqEnd = std::unique(merged.begin(), merged.end());
|
|
||||||
std::vector<ripple::uint256> keys{merged.begin(), uniqEnd};
|
|
||||||
|
|
||||||
std::cout << keys.size() << std::endl;
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
std::vector<Blob> objs = fetchLedgerObjects(keys, ledgerSequence);
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
auto duration = ((end - start).count()) / 1000000000.0;
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Book object fetch took "
|
|
||||||
<< std::to_string(duration) << " seconds.";
|
|
||||||
|
|
||||||
std::vector<LedgerObject> results;
|
|
||||||
for (size_t i = 0; i < objs.size(); ++i)
|
|
||||||
{
|
|
||||||
if (objs[i].size() != 0)
|
|
||||||
results.push_back({keys[i], objs[i]});
|
|
||||||
}
|
|
||||||
|
|
||||||
return {results, {}, warning};
|
|
||||||
}
|
|
||||||
struct WriteBookCallbackData
|
struct WriteBookCallbackData
|
||||||
{
|
{
|
||||||
CassandraBackend const& backend;
|
CassandraBackend const& backend;
|
||||||
@@ -723,6 +600,87 @@ struct WriteKeyCallbackData
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
struct OnlineDeleteCallbackData
|
||||||
|
{
|
||||||
|
CassandraBackend const& backend;
|
||||||
|
ripple::uint256 key;
|
||||||
|
uint32_t ledgerSequence;
|
||||||
|
std::vector<unsigned char> object;
|
||||||
|
std::condition_variable& cv;
|
||||||
|
std::atomic_uint32_t& numOutstanding;
|
||||||
|
std::mutex& mtx;
|
||||||
|
uint32_t currentRetries = 0;
|
||||||
|
OnlineDeleteCallbackData(
|
||||||
|
CassandraBackend const& backend,
|
||||||
|
ripple::uint256&& key,
|
||||||
|
uint32_t ledgerSequence,
|
||||||
|
std::vector<unsigned char>&& object,
|
||||||
|
std::condition_variable& cv,
|
||||||
|
std::mutex& mtx,
|
||||||
|
std::atomic_uint32_t& numOutstanding)
|
||||||
|
: backend(backend)
|
||||||
|
, key(std::move(key))
|
||||||
|
, ledgerSequence(ledgerSequence)
|
||||||
|
, object(std::move(object))
|
||||||
|
, cv(cv)
|
||||||
|
, mtx(mtx)
|
||||||
|
, numOutstanding(numOutstanding)
|
||||||
|
|
||||||
|
{
|
||||||
|
}
|
||||||
|
};
|
||||||
|
void
|
||||||
|
onlineDeleteCallback(CassFuture* fut, void* cbData);
|
||||||
|
void
|
||||||
|
onlineDelete(OnlineDeleteCallbackData& cb)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
CassandraStatement statement{
|
||||||
|
cb.backend.getInsertObjectPreparedStatement()};
|
||||||
|
statement.bindBytes(cb.key);
|
||||||
|
statement.bindInt(cb.ledgerSequence);
|
||||||
|
statement.bindBytes(cb.object);
|
||||||
|
|
||||||
|
cb.backend.executeAsyncWrite(statement, onlineDeleteCallback, cb, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void
|
||||||
|
onlineDeleteCallback(CassFuture* fut, void* cbData)
|
||||||
|
{
|
||||||
|
OnlineDeleteCallbackData& requestParams =
|
||||||
|
*static_cast<OnlineDeleteCallbackData*>(cbData);
|
||||||
|
|
||||||
|
CassandraBackend const& backend = requestParams.backend;
|
||||||
|
auto rc = cass_future_error_code(fut);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
// exponential backoff with a max wait of 2^10 ms (about 1 second)
|
||||||
|
auto wait = std::chrono::milliseconds(
|
||||||
|
lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< "ERROR!!! Cassandra insert book error: " << rc << ", "
|
||||||
|
<< cass_error_desc(rc) << ", retrying in " << wait.count()
|
||||||
|
<< " milliseconds";
|
||||||
|
++requestParams.currentRetries;
|
||||||
|
std::shared_ptr<boost::asio::steady_timer> timer =
|
||||||
|
std::make_shared<boost::asio::steady_timer>(
|
||||||
|
backend.getIOContext(),
|
||||||
|
std::chrono::steady_clock::now() + wait);
|
||||||
|
timer->async_wait(
|
||||||
|
[timer, &requestParams](const boost::system::error_code& error) {
|
||||||
|
onlineDelete(requestParams);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(trace) << __func__ << " Successfully inserted a book";
|
||||||
|
{
|
||||||
|
std::lock_guard lck(requestParams.mtx);
|
||||||
|
--requestParams.numOutstanding;
|
||||||
|
requestParams.cv.notify_one();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
void
|
void
|
||||||
writeKeyCallback(CassFuture* fut, void* cbData);
|
writeKeyCallback(CassFuture* fut, void* cbData);
|
||||||
void
|
void
|
||||||
@@ -775,14 +733,9 @@ writeKeyCallback(CassFuture* fut, void* cbData)
|
|||||||
bool
|
bool
|
||||||
CassandraBackend::writeKeys(
|
CassandraBackend::writeKeys(
|
||||||
std::unordered_set<ripple::uint256> const& keys,
|
std::unordered_set<ripple::uint256> const& keys,
|
||||||
uint32_t ledgerSequence,
|
KeyIndex const& index,
|
||||||
bool isAsync) const
|
bool isAsync) const
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " Ledger = " << std::to_string(ledgerSequence)
|
|
||||||
<< " . num keys = " << std::to_string(keys.size())
|
|
||||||
<< " . concurrentLimit = "
|
|
||||||
<< std::to_string(indexerMaxRequestsOutstanding);
|
|
||||||
std::atomic_uint32_t numRemaining = keys.size();
|
std::atomic_uint32_t numRemaining = keys.size();
|
||||||
std::condition_variable cv;
|
std::condition_variable cv;
|
||||||
std::mutex mtx;
|
std::mutex mtx;
|
||||||
@@ -790,11 +743,16 @@ CassandraBackend::writeKeys(
|
|||||||
cbs.reserve(keys.size());
|
cbs.reserve(keys.size());
|
||||||
uint32_t concurrentLimit =
|
uint32_t concurrentLimit =
|
||||||
isAsync ? indexerMaxRequestsOutstanding : keys.size();
|
isAsync ? indexerMaxRequestsOutstanding : keys.size();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " Ledger = " << std::to_string(index.keyIndex)
|
||||||
|
<< " . num keys = " << std::to_string(keys.size())
|
||||||
|
<< " . concurrentLimit = "
|
||||||
|
<< std::to_string(indexerMaxRequestsOutstanding);
|
||||||
uint32_t numSubmitted = 0;
|
uint32_t numSubmitted = 0;
|
||||||
for (auto& key : keys)
|
for (auto& key : keys)
|
||||||
{
|
{
|
||||||
cbs.push_back(std::make_shared<WriteKeyCallbackData>(
|
cbs.push_back(std::make_shared<WriteKeyCallbackData>(
|
||||||
*this, key, ledgerSequence, cv, mtx, numRemaining));
|
*this, key, index.keyIndex, cv, mtx, numRemaining));
|
||||||
writeKey(*cbs.back());
|
writeKey(*cbs.back());
|
||||||
++numSubmitted;
|
++numSubmitted;
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << "Submitted a write request";
|
BOOST_LOG_TRIVIAL(trace) << __func__ << "Submitted a write request";
|
||||||
@@ -812,7 +770,7 @@ CassandraBackend::writeKeys(
|
|||||||
concurrentLimit;
|
concurrentLimit;
|
||||||
});
|
});
|
||||||
if (numSubmitted % 100000 == 0)
|
if (numSubmitted % 100000 == 0)
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " Submitted " << std::to_string(numSubmitted)
|
<< __func__ << " Submitted " << std::to_string(numSubmitted)
|
||||||
<< " write requests. Completed "
|
<< " write requests. Completed "
|
||||||
<< (keys.size() - numRemaining);
|
<< (keys.size() - numRemaining);
|
||||||
@@ -823,57 +781,6 @@ CassandraBackend::writeKeys(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
CassandraBackend::writeBooks(
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
bool isAsync) const
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(info)
|
|
||||||
<< __func__ << " Ledger = " << std::to_string(ledgerSequence)
|
|
||||||
<< " . num books = " << std::to_string(books.size());
|
|
||||||
std::condition_variable cv;
|
|
||||||
std::mutex mtx;
|
|
||||||
std::vector<std::shared_ptr<WriteBookCallbackData>> cbs;
|
|
||||||
uint32_t concurrentLimit =
|
|
||||||
isAsync ? indexerMaxRequestsOutstanding : maxRequestsOutstanding;
|
|
||||||
std::atomic_uint32_t numOutstanding = 0;
|
|
||||||
size_t count = 0;
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
for (auto& book : books)
|
|
||||||
{
|
|
||||||
for (auto& offer : book.second)
|
|
||||||
{
|
|
||||||
++numOutstanding;
|
|
||||||
++count;
|
|
||||||
cbs.push_back(std::make_shared<WriteBookCallbackData>(
|
|
||||||
*this,
|
|
||||||
book.first,
|
|
||||||
offer,
|
|
||||||
ledgerSequence,
|
|
||||||
cv,
|
|
||||||
mtx,
|
|
||||||
numOutstanding));
|
|
||||||
writeBook(*cbs.back());
|
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << "Submitted a write request";
|
|
||||||
std::unique_lock<std::mutex> lck(mtx);
|
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << "Got the mutex";
|
|
||||||
cv.wait(lck, [&numOutstanding, concurrentLimit]() {
|
|
||||||
return numOutstanding < concurrentLimit;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__
|
|
||||||
<< "Submitted all book writes. Waiting for them to "
|
|
||||||
"finish. num submitted = "
|
|
||||||
<< std::to_string(count);
|
|
||||||
std::unique_lock<std::mutex> lck(mtx);
|
|
||||||
cv.wait(lck, [&numOutstanding]() { return numOutstanding == 0; });
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << "Finished writing books";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
bool
|
bool
|
||||||
CassandraBackend::isIndexed(uint32_t ledgerSequence) const
|
CassandraBackend::isIndexed(uint32_t ledgerSequence) const
|
||||||
{
|
{
|
||||||
@@ -1100,10 +1007,79 @@ CassandraBackend::runIndexer(uint32_t ledgerSequence) const
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
CassandraBackend::doOnlineDelete(uint32_t minLedgerToKeep) const
|
CassandraBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
|
||||||
{
|
{
|
||||||
throw std::runtime_error("doOnlineDelete : unimplemented");
|
// calculate TTL
|
||||||
|
// ledgers close roughly every 4 seconds. We double the TTL so that way
|
||||||
|
// there is a window of time to update the database, to prevent unchanging
|
||||||
|
// records from being deleted.
|
||||||
|
auto rng = fetchLedgerRangeNoThrow();
|
||||||
|
if (!rng)
|
||||||
return false;
|
return false;
|
||||||
|
uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
|
||||||
|
if (minLedger <= rng->minSequence)
|
||||||
|
return false;
|
||||||
|
std::condition_variable cv;
|
||||||
|
std::mutex mtx;
|
||||||
|
std::vector<std::shared_ptr<OnlineDeleteCallbackData>> cbs;
|
||||||
|
uint32_t concurrentLimit = 10;
|
||||||
|
std::atomic_uint32_t numOutstanding = 0;
|
||||||
|
|
||||||
|
// iterate through latest ledger, updating TTL
|
||||||
|
std::optional<ripple::uint256> cursor;
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto [objects, curCursor, warning] =
|
||||||
|
fetchLedgerPage(cursor, minLedger, 256);
|
||||||
|
if (warning)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__
|
||||||
|
<< " online delete running but flag ledger is not complete";
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto& obj : objects)
|
||||||
|
{
|
||||||
|
++numOutstanding;
|
||||||
|
cbs.push_back(std::make_shared<OnlineDeleteCallbackData>(
|
||||||
|
*this,
|
||||||
|
std::move(obj.key),
|
||||||
|
minLedger,
|
||||||
|
std::move(obj.blob),
|
||||||
|
cv,
|
||||||
|
mtx,
|
||||||
|
numOutstanding));
|
||||||
|
|
||||||
|
onlineDelete(*cbs.back());
|
||||||
|
std::unique_lock<std::mutex> lck(mtx);
|
||||||
|
BOOST_LOG_TRIVIAL(trace) << __func__ << "Got the mutex";
|
||||||
|
cv.wait(lck, [&numOutstanding, concurrentLimit]() {
|
||||||
|
return numOutstanding < concurrentLimit;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
||||||
|
cursor = curCursor;
|
||||||
|
if (!cursor)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
catch (DatabaseTimeout const& e)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__ << " Database timeout fetching keys";
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::unique_lock<std::mutex> lck(mtx);
|
||||||
|
cv.wait(lck, [&numOutstanding]() { return numOutstanding == 0; });
|
||||||
|
CassandraStatement statement{deleteLedgerRange_};
|
||||||
|
statement.bindInt(minLedger);
|
||||||
|
executeSyncWrite(statement);
|
||||||
|
// update ledger_range
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -1117,6 +1093,11 @@ CassandraBackend::open(bool readOnly)
|
|||||||
}
|
}
|
||||||
return {""};
|
return {""};
|
||||||
};
|
};
|
||||||
|
auto getInt = [this](std::string const& field) -> std::optional<int> {
|
||||||
|
if (config_.contains(field) && config_.at(field).is_int64())
|
||||||
|
return config_[field].as_int64();
|
||||||
|
return {};
|
||||||
|
};
|
||||||
if (open_)
|
if (open_)
|
||||||
{
|
{
|
||||||
assert(false);
|
assert(false);
|
||||||
@@ -1169,14 +1150,14 @@ CassandraBackend::open(bool readOnly)
|
|||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
}
|
}
|
||||||
|
|
||||||
int port = config_.contains("port") ? config_["port"].as_int64() : 0;
|
auto port = getInt("port");
|
||||||
if (port)
|
if (port)
|
||||||
{
|
{
|
||||||
rc = cass_cluster_set_port(cluster, port);
|
rc = cass_cluster_set_port(cluster, *port);
|
||||||
if (rc != CASS_OK)
|
if (rc != CASS_OK)
|
||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << "nodestore: Error setting Cassandra port: " << port
|
ss << "nodestore: Error setting Cassandra port: " << *port
|
||||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||||
|
|
||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
@@ -1204,8 +1185,7 @@ CassandraBackend::open(bool readOnly)
|
|||||||
cass_cluster_set_credentials(
|
cass_cluster_set_credentials(
|
||||||
cluster, username.c_str(), getString("password").c_str());
|
cluster, username.c_str(), getString("password").c_str());
|
||||||
}
|
}
|
||||||
int threads = config_.contains("threads")
|
int threads = getInt("threads") ? *getInt("threads")
|
||||||
? config_["threads"].as_int64()
|
|
||||||
: std::thread::hardware_concurrency();
|
: std::thread::hardware_concurrency();
|
||||||
|
|
||||||
rc = cass_cluster_set_num_threads_io(cluster, threads);
|
rc = cass_cluster_set_num_threads_io(cluster, threads);
|
||||||
@@ -1216,6 +1196,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
}
|
}
|
||||||
|
if (getInt("max_requests_outstanding"))
|
||||||
|
maxRequestsOutstanding = *getInt("max_requests_outstanding");
|
||||||
|
|
||||||
cass_cluster_set_request_timeout(cluster, 10000);
|
cass_cluster_set_request_timeout(cluster, 10000);
|
||||||
|
|
||||||
@@ -1272,10 +1254,13 @@ CassandraBackend::open(bool readOnly)
|
|||||||
std::string keyspace = getString("keyspace");
|
std::string keyspace = getString("keyspace");
|
||||||
if (keyspace.empty())
|
if (keyspace.empty())
|
||||||
{
|
{
|
||||||
throw std::runtime_error(
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
"nodestore: Missing keyspace in Cassandra config");
|
<< "No keyspace specified. Using keyspace oceand";
|
||||||
|
keyspace = "oceand";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int rf = getInt("replication_factor") ? *getInt("replication_factor") : 3;
|
||||||
|
|
||||||
std::string tablePrefix = getString("table_prefix");
|
std::string tablePrefix = getString("table_prefix");
|
||||||
if (tablePrefix.empty())
|
if (tablePrefix.empty())
|
||||||
{
|
{
|
||||||
@@ -1284,6 +1269,19 @@ CassandraBackend::open(bool readOnly)
|
|||||||
|
|
||||||
cass_cluster_set_connect_timeout(cluster, 10000);
|
cass_cluster_set_connect_timeout(cluster, 10000);
|
||||||
|
|
||||||
|
int ttl = getInt("ttl") ? *getInt("ttl") * 2 : 0;
|
||||||
|
int keysTtl = (ttl != 0 ? pow(2, indexer_.getKeyShift()) * 4 * 2 : 0);
|
||||||
|
int incr = keysTtl;
|
||||||
|
while (keysTtl < ttl)
|
||||||
|
{
|
||||||
|
keysTtl += incr;
|
||||||
|
}
|
||||||
|
int booksTtl = 0;
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " setting ttl to " << std::to_string(ttl)
|
||||||
|
<< " , books ttl to " << std::to_string(booksTtl) << " , keys ttl to "
|
||||||
|
<< std::to_string(keysTtl);
|
||||||
|
|
||||||
auto executeSimpleStatement = [this](std::string const& query) {
|
auto executeSimpleStatement = [this](std::string const& query) {
|
||||||
CassStatement* statement = makeStatement(query.c_str(), 0);
|
CassStatement* statement = makeStatement(query.c_str(), 0);
|
||||||
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
||||||
@@ -1317,8 +1315,36 @@ CassandraBackend::open(bool readOnly)
|
|||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << "nodestore: Error connecting Cassandra session keyspace: "
|
ss << "nodestore: Error connecting Cassandra session keyspace: "
|
||||||
|
<< rc << ", " << cass_error_desc(rc)
|
||||||
|
<< ", trying to create it ourselves";
|
||||||
|
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||||
|
// if the keyspace doesn't exist, try to create it
|
||||||
|
session_.reset(cass_session_new());
|
||||||
|
fut = cass_session_connect(session_.get(), cluster);
|
||||||
|
rc = cass_future_error_code(fut);
|
||||||
|
cass_future_free(fut);
|
||||||
|
if (rc != CASS_OK)
|
||||||
|
{
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "nodestore: Error connecting Cassandra session at all: "
|
||||||
<< rc << ", " << cass_error_desc(rc);
|
<< rc << ", " << cass_error_desc(rc);
|
||||||
BOOST_LOG_TRIVIAL(error) << ss.str();
|
BOOST_LOG_TRIVIAL(error) << ss.str();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::stringstream query;
|
||||||
|
query << "CREATE KEYSPACE IF NOT EXISTS " << keyspace
|
||||||
|
<< " WITH replication = {'class': 'SimpleStrategy', "
|
||||||
|
"'replication_factor': '"
|
||||||
|
<< std::to_string(rf) << "'} AND durable_writes = true";
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << "USE " << keyspace;
|
||||||
|
if (!executeSimpleStatement(query.str()))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1326,7 +1352,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "objects"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "objects"
|
||||||
<< " ( key blob, sequence bigint, object blob, PRIMARY "
|
<< " ( key blob, sequence bigint, object blob, PRIMARY "
|
||||||
"KEY(key, "
|
"KEY(key, "
|
||||||
"sequence)) WITH CLUSTERING ORDER BY (sequence DESC)";
|
"sequence)) WITH CLUSTERING ORDER BY (sequence DESC) AND"
|
||||||
|
<< " default_time_to_live = " << std::to_string(ttl);
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1337,6 +1364,7 @@ CassandraBackend::open(bool readOnly)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
|
<<<<<<< HEAD
|
||||||
query << "CREATE INDEX ON " << tablePrefix << "objects(sequence)";
|
query << "CREATE INDEX ON " << tablePrefix << "objects(sequence)";
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
@@ -1352,6 +1380,13 @@ CassandraBackend::open(bool readOnly)
|
|||||||
<< "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
<< "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
||||||
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, transaction "
|
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, transaction "
|
||||||
"blob, metadata blob)";
|
"blob, metadata blob)";
|
||||||
|
=======
|
||||||
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
||||||
|
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, "
|
||||||
|
"transaction "
|
||||||
|
"blob, metadata blob)"
|
||||||
|
<< " WITH default_time_to_live = " << std::to_string(ttl);
|
||||||
|
>>>>>>> dev
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1376,7 +1411,9 @@ CassandraBackend::open(bool readOnly)
|
|||||||
query.str("");
|
query.str("");
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "keys"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "keys"
|
||||||
<< " ( sequence bigint, key blob, PRIMARY KEY "
|
<< " ( sequence bigint, key blob, PRIMARY KEY "
|
||||||
"(sequence, key))";
|
"(sequence, key))"
|
||||||
|
" WITH default_time_to_live = "
|
||||||
|
<< std::to_string(keysTtl);
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1386,24 +1423,14 @@ CassandraBackend::open(bool readOnly)
|
|||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
query.str("");
|
query.str("");
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "books"
|
|
||||||
<< " ( book blob, sequence bigint, quality_key tuple<blob, blob>, PRIMARY KEY "
|
|
||||||
"((book, sequence), quality_key)) WITH CLUSTERING ORDER BY (quality_key "
|
|
||||||
"ASC)";
|
|
||||||
if (!executeSimpleStatement(query.str()))
|
|
||||||
continue;
|
|
||||||
query.str("");
|
|
||||||
query << "SELECT * FROM " << tablePrefix << "books"
|
|
||||||
<< " LIMIT 1";
|
|
||||||
if (!executeSimpleStatement(query.str()))
|
|
||||||
continue;
|
|
||||||
query.str("");
|
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "account_tx"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "account_tx"
|
||||||
<< " ( account blob, seq_idx tuple<bigint, bigint>, "
|
<< " ( account blob, seq_idx tuple<bigint, bigint>, "
|
||||||
" hash blob, "
|
" hash blob, "
|
||||||
"PRIMARY KEY "
|
"PRIMARY KEY "
|
||||||
"(account, seq_idx)) WITH "
|
"(account, seq_idx)) WITH "
|
||||||
"CLUSTERING ORDER BY (seq_idx desc)";
|
"CLUSTERING ORDER BY (seq_idx desc)"
|
||||||
|
<< " AND default_time_to_live = " << std::to_string(ttl);
|
||||||
|
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1415,7 +1442,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledgers"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledgers"
|
||||||
<< " ( sequence bigint PRIMARY KEY, header blob )";
|
<< " ( sequence bigint PRIMARY KEY, header blob )"
|
||||||
|
<< " WITH default_time_to_live = " << std::to_string(ttl);
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1427,7 +1455,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_hashes"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_hashes"
|
||||||
<< " (hash blob PRIMARY KEY, sequence bigint)";
|
<< " (hash blob PRIMARY KEY, sequence bigint)"
|
||||||
|
<< " WITH default_time_to_live = " << std::to_string(ttl);
|
||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -1478,12 +1507,15 @@ CassandraBackend::open(bool readOnly)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
|
<<<<<<< HEAD
|
||||||
query << "INSERT INTO " << tablePrefix << "books"
|
query << "INSERT INTO " << tablePrefix << "books"
|
||||||
<< " (book, sequence, quality_key) VALUES (?, ?, (?, ?))";
|
<< " (book, sequence, quality_key) VALUES (?, ?, (?, ?))";
|
||||||
if (!insertBook2_.prepareStatement(query, session_.get()))
|
if (!insertBook2_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
|
=======
|
||||||
|
>>>>>>> dev
|
||||||
query << "SELECT key FROM " << tablePrefix << "keys"
|
query << "SELECT key FROM " << tablePrefix << "keys"
|
||||||
<< " WHERE sequence = ? AND key >= ? ORDER BY key ASC LIMIT ?";
|
<< " WHERE sequence = ? AND key >= ? ORDER BY key ASC LIMIT ?";
|
||||||
if (!selectKeys_.prepareStatement(query, session_.get()))
|
if (!selectKeys_.prepareStatement(query, session_.get()))
|
||||||
@@ -1541,24 +1573,6 @@ CassandraBackend::open(bool readOnly)
|
|||||||
if (!getToken_.prepareStatement(query, session_.get()))
|
if (!getToken_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
query.str("");
|
|
||||||
query << "SELECT quality_key FROM " << tablePrefix << "books "
|
|
||||||
<< " WHERE book = ? AND sequence = ?"
|
|
||||||
<< " AND quality_key >= (?, ?)"
|
|
||||||
" ORDER BY quality_key ASC "
|
|
||||||
" LIMIT ?";
|
|
||||||
if (!selectBook_.prepareStatement(query, session_.get()))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
query.str("");
|
|
||||||
query << "SELECT * FROM " << tablePrefix << "books "
|
|
||||||
<< "WHERE book = "
|
|
||||||
<< "0x000000000000000000000000000000000000000000000000"
|
|
||||||
<< " AND sequence = ?";
|
|
||||||
if (!completeBook_.prepareStatement(query, session_.get()))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << " INSERT INTO " << tablePrefix << "account_tx"
|
query << " INSERT INTO " << tablePrefix << "account_tx"
|
||||||
<< " (account, seq_idx, hash) "
|
<< " (account, seq_idx, hash) "
|
||||||
@@ -1591,6 +1605,11 @@ CassandraBackend::open(bool readOnly)
|
|||||||
"(?,null)";
|
"(?,null)";
|
||||||
if (!updateLedgerRange_.prepareStatement(query, session_.get()))
|
if (!updateLedgerRange_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
query = {};
|
||||||
|
query << " update " << tablePrefix << "ledger_range"
|
||||||
|
<< " set sequence = ? where is_latest = false";
|
||||||
|
if (!deleteLedgerRange_.prepareStatement(query, session_.get()))
|
||||||
|
continue;
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << " select header from " << tablePrefix
|
query << " select header from " << tablePrefix
|
||||||
@@ -1610,15 +1629,17 @@ CassandraBackend::open(bool readOnly)
|
|||||||
<< " is_latest IN (true, false)";
|
<< " is_latest IN (true, false)";
|
||||||
if (!selectLedgerRange_.prepareStatement(query, session_.get()))
|
if (!selectLedgerRange_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
/*
|
||||||
query.str("");
|
query.str("");
|
||||||
query << " SELECT key,object FROM " << tablePrefix
|
query << " SELECT key,object FROM " << tablePrefix
|
||||||
<< "objects WHERE sequence = ?";
|
<< "objects WHERE sequence = ?";
|
||||||
if (!selectLedgerDiff_.prepareStatement(query, session_.get()))
|
if (!selectLedgerDiff_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
*/
|
||||||
setupPreparedStatements = true;
|
setupPreparedStatements = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
if (config_.contains("max_requests_outstanding"))
|
if (config_.contains("max_requests_outstanding"))
|
||||||
{
|
{
|
||||||
maxRequestsOutstanding = config_["max_requests_outstanding"].as_int64();
|
maxRequestsOutstanding = config_["max_requests_outstanding"].as_int64();
|
||||||
@@ -1629,6 +1650,8 @@ CassandraBackend::open(bool readOnly)
|
|||||||
config_["indexer_max_requests_outstanding"].as_int64();
|
config_["indexer_max_requests_outstanding"].as_int64();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
=======
|
||||||
|
>>>>>>> dev
|
||||||
work_.emplace(ioContext_);
|
work_.emplace(ioContext_);
|
||||||
ioThread_ = std::thread{[this]() { ioContext_.run(); }};
|
ioThread_ = std::thread{[this]() { ioContext_.run(); }};
|
||||||
open_ = true;
|
open_ = true;
|
||||||
|
|||||||
@@ -166,6 +166,11 @@ public:
|
|||||||
bindBytes(data.data(), data.size());
|
bindBytes(data.data(), data.size());
|
||||||
}
|
}
|
||||||
void
|
void
|
||||||
|
bindBytes(std::vector<unsigned char> const& data)
|
||||||
|
{
|
||||||
|
bindBytes(data.data(), data.size());
|
||||||
|
}
|
||||||
|
void
|
||||||
bindBytes(ripple::AccountID const& data)
|
bindBytes(ripple::AccountID const& data)
|
||||||
{
|
{
|
||||||
bindBytes(data.data(), data.size());
|
bindBytes(data.data(), data.size());
|
||||||
@@ -649,6 +654,7 @@ private:
|
|||||||
CassandraPreparedStatement insertLedgerHeader_;
|
CassandraPreparedStatement insertLedgerHeader_;
|
||||||
CassandraPreparedStatement insertLedgerHash_;
|
CassandraPreparedStatement insertLedgerHash_;
|
||||||
CassandraPreparedStatement updateLedgerRange_;
|
CassandraPreparedStatement updateLedgerRange_;
|
||||||
|
CassandraPreparedStatement deleteLedgerRange_;
|
||||||
CassandraPreparedStatement updateLedgerHeader_;
|
CassandraPreparedStatement updateLedgerHeader_;
|
||||||
CassandraPreparedStatement selectLedgerBySeq_;
|
CassandraPreparedStatement selectLedgerBySeq_;
|
||||||
CassandraPreparedStatement selectLatestLedger_;
|
CassandraPreparedStatement selectLatestLedger_;
|
||||||
@@ -735,6 +741,11 @@ public:
|
|||||||
{
|
{
|
||||||
return insertBook2_;
|
return insertBook2_;
|
||||||
}
|
}
|
||||||
|
CassandraPreparedStatement const&
|
||||||
|
getInsertObjectPreparedStatement() const
|
||||||
|
{
|
||||||
|
return insertObject_;
|
||||||
|
}
|
||||||
|
|
||||||
CassandraPreparedStatement const&
|
CassandraPreparedStatement const&
|
||||||
getSelectLedgerDiffPreparedStatement() const
|
getSelectLedgerDiffPreparedStatement() const
|
||||||
@@ -830,14 +841,6 @@ public:
|
|||||||
{
|
{
|
||||||
// wait for all other writes to finish
|
// wait for all other writes to finish
|
||||||
sync();
|
sync();
|
||||||
auto rng = fetchLedgerRangeNoThrow();
|
|
||||||
if (rng && rng->maxSequence >= ledgerSequence_)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
|
||||||
<< __func__ << " Ledger " << std::to_string(ledgerSequence_)
|
|
||||||
<< " already written. Returning";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// write range
|
// write range
|
||||||
if (isFirstLedger_)
|
if (isFirstLedger_)
|
||||||
{
|
{
|
||||||
@@ -954,7 +957,7 @@ public:
|
|||||||
CassandraResult result = executeSyncRead(statement);
|
CassandraResult result = executeSyncRead(statement);
|
||||||
if (!result)
|
if (!result)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - no rows";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " - no rows";
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
return result.getBytes();
|
return result.getBytes();
|
||||||
@@ -994,7 +997,7 @@ public:
|
|||||||
return {{result.getBytes(), result.getBytes(), result.getUInt32()}};
|
return {{result.getBytes(), result.getBytes(), result.getUInt32()}};
|
||||||
}
|
}
|
||||||
LedgerPage
|
LedgerPage
|
||||||
fetchLedgerPage(
|
doFetchLedgerPage(
|
||||||
std::optional<ripple::uint256> const& cursor,
|
std::optional<ripple::uint256> const& cursor,
|
||||||
std::uint32_t ledgerSequence,
|
std::uint32_t ledgerSequence,
|
||||||
std::uint32_t limit) const override;
|
std::uint32_t limit) const override;
|
||||||
@@ -1014,21 +1017,8 @@ public:
|
|||||||
bool
|
bool
|
||||||
writeKeys(
|
writeKeys(
|
||||||
std::unordered_set<ripple::uint256> const& keys,
|
std::unordered_set<ripple::uint256> const& keys,
|
||||||
uint32_t ledgerSequence,
|
KeyIndex const& index,
|
||||||
bool isAsync = false) const;
|
|
||||||
bool
|
|
||||||
writeBooks(
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
bool isAsync = false) const override;
|
bool isAsync = false) const override;
|
||||||
BookOffersPage
|
|
||||||
fetchBookOffers(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
uint32_t sequence,
|
|
||||||
std::uint32_t limit,
|
|
||||||
std::optional<ripple::uint256> const& cursor) const override;
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
canFetchBatch()
|
canFetchBatch()
|
||||||
@@ -1353,7 +1343,7 @@ public:
|
|||||||
syncCv_.wait(lck, [this]() { return finishedAllRequests(); });
|
syncCv_.wait(lck, [this]() { return finishedAllRequests(); });
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
doOnlineDelete(uint32_t minLedgerToKeep) const override;
|
doOnlineDelete(uint32_t numLedgersToKeep) const override;
|
||||||
|
|
||||||
boost::asio::io_context&
|
boost::asio::io_context&
|
||||||
getIOContext() const
|
getIOContext() const
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ struct AccountTransactionsData
|
|||||||
, txHash(txHash)
|
, txHash(txHash)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AccountTransactionsData() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
|
|||||||
@@ -253,10 +253,29 @@ public:
|
|||||||
", grpc port : " + grpcPort_ + " }";
|
", grpc port : " + grpcPort_ + " }";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
boost::json::value
|
boost::json::value
|
||||||
toJson() const
|
toJson() const
|
||||||
{
|
{
|
||||||
return boost::json::string(toString());
|
return boost::json::string(toString());
|
||||||
|
=======
|
||||||
|
boost::json::object
|
||||||
|
toJson() const
|
||||||
|
{
|
||||||
|
boost::json::object res;
|
||||||
|
res["validated_range"] = getValidatedRange();
|
||||||
|
res["is_connected"] = std::to_string(isConnected());
|
||||||
|
res["ip"] = ip_;
|
||||||
|
res["ws_port"] = wsPort_;
|
||||||
|
res["grpc_port"] = grpcPort_;
|
||||||
|
auto last = getLastMsgTime();
|
||||||
|
if (last.time_since_epoch().count() != 0)
|
||||||
|
res["last_msg_arrival_time"] = std::to_string(
|
||||||
|
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
std::chrono::system_clock::now() - getLastMsgTime())
|
||||||
|
.count());
|
||||||
|
return res;
|
||||||
|
>>>>>>> dev
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download a ledger in full
|
/// Download a ledger in full
|
||||||
@@ -377,6 +396,7 @@ public:
|
|||||||
/// to clients).
|
/// to clients).
|
||||||
/// @param in ETLSource in question
|
/// @param in ETLSource in question
|
||||||
/// @return true if messages should be forwarded
|
/// @return true if messages should be forwarded
|
||||||
|
<<<<<<< HEAD
|
||||||
bool
|
bool
|
||||||
shouldPropagateTxnStream(ETLSource* in) const
|
shouldPropagateTxnStream(ETLSource* in) const
|
||||||
{
|
{
|
||||||
@@ -398,11 +418,35 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::json::value
|
boost::json::value
|
||||||
|
=======
|
||||||
|
// bool
|
||||||
|
// shouldPropagateTxnStream(ETLSource* in) const
|
||||||
|
// {
|
||||||
|
// for (auto& src : sources_)
|
||||||
|
// {
|
||||||
|
// assert(src);
|
||||||
|
// // We pick the first ETLSource encountered that is connected
|
||||||
|
// if (src->isConnected())
|
||||||
|
// {
|
||||||
|
// if (src.get() == in)
|
||||||
|
// return true;
|
||||||
|
// else
|
||||||
|
// return false;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // If no sources connected, then this stream has not been
|
||||||
|
// forwarded. return true;
|
||||||
|
// }
|
||||||
|
|
||||||
|
boost::json::array
|
||||||
|
>>>>>>> dev
|
||||||
toJson() const
|
toJson() const
|
||||||
{
|
{
|
||||||
boost::json::array ret;
|
boost::json::array ret;
|
||||||
for (auto& src : sources_)
|
for (auto& src : sources_)
|
||||||
{
|
{
|
||||||
|
<<<<<<< HEAD
|
||||||
ret.push_back(src->toJson());
|
ret.push_back(src->toJson());
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@@ -418,6 +462,23 @@ public:
|
|||||||
/// @return response received from p2p node
|
/// @return response received from p2p node
|
||||||
boost::json::object
|
boost::json::object
|
||||||
forwardToP2p(boost::json::object const& request) const;
|
forwardToP2p(boost::json::object const& request) const;
|
||||||
|
=======
|
||||||
|
ret.emplace_back(src->toJson());
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
//
|
||||||
|
// /// Randomly select a p2p node to forward a gRPC request to
|
||||||
|
// /// @return gRPC stub to forward requests to p2p node
|
||||||
|
// std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||||
|
// getP2pForwardingStub() const;
|
||||||
|
//
|
||||||
|
// /// Forward a JSON RPC request to a randomly selected p2p node
|
||||||
|
// /// @param context context of the request
|
||||||
|
// /// @return response received from p2p node
|
||||||
|
// Json::Value
|
||||||
|
// forwardToP2p(RPC::JsonContext& context) const;
|
||||||
|
>>>>>>> dev
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// f is a function that takes an ETLSource as an argument and returns a
|
/// f is a function that takes an ETLSource as an argument and returns a
|
||||||
|
|||||||
@@ -40,6 +40,7 @@
|
|||||||
#include <functional>
|
#include <functional>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <reporting/Pg.h>
|
#include <reporting/Pg.h>
|
||||||
|
#include <signal.h>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
@@ -47,12 +48,11 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <signal.h>
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
noticeReceiver(void* arg, PGresult const* res)
|
noticeReceiver(void* arg, PGresult const* res)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << "server message: " << PQresultErrorMessage(res);
|
BOOST_LOG_TRIVIAL(trace) << "server message: " << PQresultErrorMessage(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
@@ -242,8 +242,10 @@ Pg::bulkInsert(char const* table, std::string const& records)
|
|||||||
{
|
{
|
||||||
// https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND
|
// https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND
|
||||||
assert(conn_.get());
|
assert(conn_.get());
|
||||||
static auto copyCmd = boost::format(R"(COPY %s FROM stdin)");
|
auto copyCmd = boost::format(R"(COPY %s FROM stdin)");
|
||||||
auto res = query(boost::str(copyCmd % table).c_str());
|
auto formattedCmd = boost::str(copyCmd % table);
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " " << formattedCmd;
|
||||||
|
auto res = query(formattedCmd.c_str());
|
||||||
if (!res || res.status() != PGRES_COPY_IN)
|
if (!res || res.status() != PGRES_COPY_IN)
|
||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
@@ -284,7 +286,8 @@ Pg::bulkInsert(char const* table, std::string const& records)
|
|||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << "bulkInsert to " << table
|
ss << "bulkInsert to " << table
|
||||||
<< ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status;
|
<< ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status
|
||||||
|
<< " message = " << PQerrorMessage(conn_.get());
|
||||||
disconnect();
|
disconnect();
|
||||||
BOOST_LOG_TRIVIAL(error) << __func__ << " " << records;
|
BOOST_LOG_TRIVIAL(error) << __func__ << " " << records;
|
||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
@@ -347,11 +350,27 @@ PgPool::PgPool(boost::json::object const& config)
|
|||||||
*/
|
*/
|
||||||
constexpr std::size_t maxFieldSize = 1024;
|
constexpr std::size_t maxFieldSize = 1024;
|
||||||
constexpr std::size_t maxFields = 1000;
|
constexpr std::size_t maxFields = 1000;
|
||||||
|
std::string conninfo = "postgres://";
|
||||||
|
auto getFieldAsString = [&config](auto field) {
|
||||||
|
if (!config.contains(field))
|
||||||
|
throw std::runtime_error(
|
||||||
|
field + std::string{" missing from postgres config"});
|
||||||
|
if (!config.at(field).is_string())
|
||||||
|
throw std::runtime_error(
|
||||||
|
field + std::string{" in postgres config is not a string"});
|
||||||
|
return std::string{config.at(field).as_string().c_str()};
|
||||||
|
};
|
||||||
|
conninfo += getFieldAsString("username");
|
||||||
|
conninfo += ":";
|
||||||
|
conninfo += getFieldAsString("password");
|
||||||
|
conninfo += "@";
|
||||||
|
conninfo += getFieldAsString("contact_point");
|
||||||
|
conninfo += "/";
|
||||||
|
conninfo += getFieldAsString("database");
|
||||||
|
|
||||||
// The connection object must be freed using the libpq API PQfinish() call.
|
// The connection object must be freed using the libpq API PQfinish() call.
|
||||||
pg_connection_type conn(
|
pg_connection_type conn(
|
||||||
PQconnectdb(config.at("conninfo").as_string().c_str()),
|
PQconnectdb(conninfo.c_str()), [](PGconn* conn) { PQfinish(conn); });
|
||||||
[](PGconn* conn) { PQfinish(conn); });
|
|
||||||
if (!conn)
|
if (!conn)
|
||||||
throw std::runtime_error("Can't create DB connection.");
|
throw std::runtime_error("Can't create DB connection.");
|
||||||
if (PQstatus(conn.get()) != CONNECTION_OK)
|
if (PQstatus(conn.get()) != CONNECTION_OK)
|
||||||
@@ -601,11 +620,28 @@ PgPool::checkin(std::unique_ptr<Pg>& pg)
|
|||||||
|
|
||||||
std::shared_ptr<PgPool>
|
std::shared_ptr<PgPool>
|
||||||
make_PgPool(boost::json::object const& config)
|
make_PgPool(boost::json::object const& config)
|
||||||
|
{
|
||||||
|
try
|
||||||
{
|
{
|
||||||
auto ret = std::make_shared<PgPool>(config);
|
auto ret = std::make_shared<PgPool>(config);
|
||||||
ret->setup();
|
ret->setup();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
catch (std::runtime_error& e)
|
||||||
|
{
|
||||||
|
boost::json::object configCopy = config;
|
||||||
|
configCopy["database"] = "postgres";
|
||||||
|
auto ret = std::make_shared<PgPool>(configCopy);
|
||||||
|
ret->setup();
|
||||||
|
PgQuery pgQuery{ret};
|
||||||
|
std::string query = "CREATE DATABASE " +
|
||||||
|
std::string{config.at("database").as_string().c_str()};
|
||||||
|
pgQuery(query.c_str());
|
||||||
|
ret = std::make_shared<PgPool>(config);
|
||||||
|
ret->setup();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -750,11 +786,12 @@ CREATE TABLE IF NOT EXISTS ledgers (
|
|||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS objects (
|
CREATE TABLE IF NOT EXISTS objects (
|
||||||
key bytea NOT NULL,
|
key bytea NOT NULL,
|
||||||
ledger_seq bigint NOT NULL,
|
ledger_seq bigint NOT NULL REFERENCES ledgers ON DELETE CASCADE,
|
||||||
object bytea,
|
object bytea
|
||||||
PRIMARY KEY(key, ledger_seq)
|
|
||||||
) PARTITION BY RANGE (ledger_seq);
|
) PARTITION BY RANGE (ledger_seq);
|
||||||
|
|
||||||
|
CREATE INDEX objects_idx ON objects USING btree(key,ledger_seq);
|
||||||
|
|
||||||
create table if not exists objects1 partition of objects for values from (0) to (10000000);
|
create table if not exists objects1 partition of objects for values from (0) to (10000000);
|
||||||
create table if not exists objects2 partition of objects for values from (10000000) to (20000000);
|
create table if not exists objects2 partition of objects for values from (10000000) to (20000000);
|
||||||
create table if not exists objects3 partition of objects for values from (20000000) to (30000000);
|
create table if not exists objects3 partition of objects for values from (20000000) to (30000000);
|
||||||
@@ -772,7 +809,7 @@ CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
|
|||||||
-- cascade here based on ledger_seq.
|
-- cascade here based on ledger_seq.
|
||||||
CREATE TABLE IF NOT EXISTS transactions (
|
CREATE TABLE IF NOT EXISTS transactions (
|
||||||
hash bytea NOT NULL,
|
hash bytea NOT NULL,
|
||||||
ledger_seq bigint NOT NULL ,
|
ledger_seq bigint NOT NULL REFERENCES ledgers ON DELETE CASCADE,
|
||||||
transaction bytea NOT NULL,
|
transaction bytea NOT NULL,
|
||||||
metadata bytea NOT NULL
|
metadata bytea NOT NULL
|
||||||
) PARTITION BY RANGE(ledger_seq);
|
) PARTITION BY RANGE(ledger_seq);
|
||||||
@@ -791,7 +828,7 @@ create index if not exists tx_by_lgr_seq on transactions using hash (ledger_seq)
|
|||||||
-- ledger table cascade here based on ledger_seq.
|
-- ledger table cascade here based on ledger_seq.
|
||||||
CREATE TABLE IF NOT EXISTS account_transactions (
|
CREATE TABLE IF NOT EXISTS account_transactions (
|
||||||
account bytea NOT NULL,
|
account bytea NOT NULL,
|
||||||
ledger_seq bigint NOT NULL ,
|
ledger_seq bigint NOT NULL REFERENCES ledgers ON DELETE CASCADE,
|
||||||
transaction_index bigint NOT NULL,
|
transaction_index bigint NOT NULL,
|
||||||
hash bytea NOT NULL,
|
hash bytea NOT NULL,
|
||||||
PRIMARY KEY (account, ledger_seq, transaction_index, hash)
|
PRIMARY KEY (account, ledger_seq, transaction_index, hash)
|
||||||
@@ -804,23 +841,13 @@ create table if not exists account_transactions5 partition of account_transactio
|
|||||||
create table if not exists account_transactions6 partition of account_transactions for values from (50000000) to (60000000);
|
create table if not exists account_transactions6 partition of account_transactions for values from (50000000) to (60000000);
|
||||||
create table if not exists account_transactions7 partition of account_transactions for values from (60000000) to (70000000);
|
create table if not exists account_transactions7 partition of account_transactions for values from (60000000) to (70000000);
|
||||||
|
|
||||||
-- Table that maps a book to a list of offers in that book. Deletes from the ledger table
|
|
||||||
-- cascade here based on ledger_seq.
|
|
||||||
CREATE TABLE IF NOT EXISTS books (
|
|
||||||
ledger_seq bigint NOT NULL,
|
|
||||||
book bytea NOT NULL,
|
|
||||||
offer_key bytea NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX book_idx ON books using btree(ledger_seq, book, offer_key);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS keys (
|
CREATE TABLE IF NOT EXISTS keys (
|
||||||
ledger_seq bigint NOT NULL,
|
ledger_seq bigint NOT NULL,
|
||||||
key bytea NOT NULL
|
key bytea NOT NULL,
|
||||||
|
PRIMARY KEY(ledger_seq, key)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX key_idx ON keys USING btree(ledger_seq, key);
|
|
||||||
|
|
||||||
-- account_tx() RPC helper. From the rippled reporting process, only the
|
-- account_tx() RPC helper. From the rippled reporting process, only the
|
||||||
-- parameters without defaults are required. For the parameters with
|
-- parameters without defaults are required. For the parameters with
|
||||||
-- defaults, validation should be done by rippled, such as:
|
-- defaults, validation should be done by rippled, such as:
|
||||||
@@ -937,8 +964,6 @@ CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO
|
|||||||
account_transactions DO INSTEAD NOTHING;
|
account_transactions DO INSTEAD NOTHING;
|
||||||
CREATE OR REPLACE RULE objects_update_protect AS ON UPDATE TO
|
CREATE OR REPLACE RULE objects_update_protect AS ON UPDATE TO
|
||||||
objects DO INSTEAD NOTHING;
|
objects DO INSTEAD NOTHING;
|
||||||
CREATE OR REPLACE RULE books_update_protect AS ON UPDATE TO
|
|
||||||
books DO INSTEAD NOTHING;
|
|
||||||
|
|
||||||
|
|
||||||
-- Return the earliest ledger sequence intended for range operations
|
-- Return the earliest ledger sequence intended for range operations
|
||||||
|
|||||||
@@ -476,6 +476,10 @@ public:
|
|||||||
pool_->checkin(pg_);
|
pool_->checkin(pg_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO. add sendQuery and getResult, for sending the query and getting the
|
||||||
|
// result asynchronously. This could be useful for sending a bunch of
|
||||||
|
// requests concurrently
|
||||||
|
|
||||||
/** Execute postgres query with parameters.
|
/** Execute postgres query with parameters.
|
||||||
*
|
*
|
||||||
* @param dbParams Database command with parameters.
|
* @param dbParams Database command with parameters.
|
||||||
|
|||||||
@@ -324,7 +324,7 @@ PostgresBackend::fetchAllTransactionHashesInLedger(
|
|||||||
}
|
}
|
||||||
|
|
||||||
LedgerPage
|
LedgerPage
|
||||||
PostgresBackend::fetchLedgerPage(
|
PostgresBackend::doFetchLedgerPage(
|
||||||
std::optional<ripple::uint256> const& cursor,
|
std::optional<ripple::uint256> const& cursor,
|
||||||
std::uint32_t ledgerSequence,
|
std::uint32_t ledgerSequence,
|
||||||
std::uint32_t limit) const
|
std::uint32_t limit) const
|
||||||
@@ -335,9 +335,10 @@ PostgresBackend::fetchLedgerPage(
|
|||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
pgQuery("SET statement_timeout TO 10000");
|
pgQuery("SET statement_timeout TO 10000");
|
||||||
std::stringstream sql;
|
std::stringstream sql;
|
||||||
sql << "SELECT key FROM keys WHERE ledger_seq = " << std::to_string(*index);
|
sql << "SELECT key FROM keys WHERE ledger_seq = "
|
||||||
|
<< std::to_string(index->keyIndex);
|
||||||
if (cursor)
|
if (cursor)
|
||||||
sql << " AND key > \'\\x" << ripple::strHex(*cursor) << "\'";
|
sql << " AND key >= \'\\x" << ripple::strHex(*cursor) << "\'";
|
||||||
sql << " ORDER BY key ASC LIMIT " << std::to_string(limit);
|
sql << " ORDER BY key ASC LIMIT " << std::to_string(limit);
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << sql.str();
|
BOOST_LOG_TRIVIAL(debug) << __func__ << sql.str();
|
||||||
auto res = pgQuery(sql.str().data());
|
auto res = pgQuery(sql.str().data());
|
||||||
@@ -350,8 +351,11 @@ PostgresBackend::fetchLedgerPage(
|
|||||||
{
|
{
|
||||||
keys.push_back({res.asUInt256(i, 0)});
|
keys.push_back({res.asUInt256(i, 0)});
|
||||||
}
|
}
|
||||||
if (numRows == limit)
|
if (numRows >= limit)
|
||||||
|
{
|
||||||
returnCursor = keys.back();
|
returnCursor = keys.back();
|
||||||
|
++(*returnCursor);
|
||||||
|
}
|
||||||
|
|
||||||
auto objs = fetchLedgerObjects(keys, ledgerSequence);
|
auto objs = fetchLedgerObjects(keys, ledgerSequence);
|
||||||
std::vector<LedgerObject> results;
|
std::vector<LedgerObject> results;
|
||||||
@@ -371,168 +375,6 @@ PostgresBackend::fetchLedgerPage(
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
BookOffersPage
|
|
||||||
PostgresBackend::fetchBookOffers(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
std::uint32_t limit,
|
|
||||||
std::optional<ripple::uint256> const& cursor) const
|
|
||||||
{
|
|
||||||
auto rng = fetchLedgerRange();
|
|
||||||
auto limitTuningFactor = 50;
|
|
||||||
|
|
||||||
if(!rng)
|
|
||||||
return {{},{}};
|
|
||||||
|
|
||||||
ripple::uint256 bookBase =
|
|
||||||
ripple::keylet::quality({ripple::ltDIR_NODE, book}, 0).key;
|
|
||||||
ripple::uint256 bookEnd = ripple::getQualityNext(bookBase);
|
|
||||||
|
|
||||||
using bookKeyPair = std::pair<ripple::uint256, ripple::uint256>;
|
|
||||||
auto getBooks =
|
|
||||||
[this, &bookBase, &bookEnd, &limit, &limitTuningFactor]
|
|
||||||
(std::uint32_t sequence)
|
|
||||||
-> std::pair<bool, std::vector<bookKeyPair>>
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Fetching books between "
|
|
||||||
<< "0x" << ripple::strHex(bookBase) << " and "
|
|
||||||
<< "0x" << ripple::strHex(bookEnd) << "at ledger "
|
|
||||||
<< std::to_string(sequence);
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
std::stringstream sql;
|
|
||||||
sql << "SELECT COUNT(*) FROM books WHERE "
|
|
||||||
<< "book = \'\\x" << ripple::strHex(ripple::uint256(beast::zero))
|
|
||||||
<< "\' AND ledger_seq = " << std::to_string(sequence);
|
|
||||||
|
|
||||||
bool complete;
|
|
||||||
PgQuery pgQuery(this->pgPool_);
|
|
||||||
auto res = pgQuery(sql.str().data());
|
|
||||||
if (size_t numRows = checkResult(res, 1))
|
|
||||||
complete = res.asInt(0, 0) != 0;
|
|
||||||
else
|
|
||||||
return {false, {}};
|
|
||||||
|
|
||||||
sql.str("");
|
|
||||||
sql << "SELECT book, offer_key FROM books "
|
|
||||||
<< "WHERE ledger_seq = " << std::to_string(sequence)
|
|
||||||
<< " AND book >= "
|
|
||||||
<< "\'\\x" << ripple::strHex(bookBase) << "\' "
|
|
||||||
<< "AND book < "
|
|
||||||
<< "\'\\x" << ripple::strHex(bookEnd) << "\' "
|
|
||||||
<< "ORDER BY book ASC "
|
|
||||||
<< "LIMIT " << std::to_string(limit * limitTuningFactor);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << sql.str();
|
|
||||||
|
|
||||||
res = pgQuery(sql.str().data());
|
|
||||||
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
auto duration = ((end - start).count()) / 1000000000.0;
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Postgres book key fetch took "
|
|
||||||
<< std::to_string(duration)
|
|
||||||
<< " seconds";
|
|
||||||
|
|
||||||
if (size_t numRows = checkResult(res, 2))
|
|
||||||
{
|
|
||||||
std::vector<bookKeyPair> results(numRows);
|
|
||||||
for (size_t i = 0; i < numRows; ++i)
|
|
||||||
{
|
|
||||||
auto book = res.asUInt256(i, 0);
|
|
||||||
auto key = res.asUInt256(i, 1);
|
|
||||||
|
|
||||||
results.push_back({std::move(book), std::move(key)});
|
|
||||||
}
|
|
||||||
|
|
||||||
return {complete, results};
|
|
||||||
}
|
|
||||||
|
|
||||||
return {complete, {}};
|
|
||||||
};
|
|
||||||
|
|
||||||
auto fetchObjects =
|
|
||||||
[this]
|
|
||||||
(std::vector<bookKeyPair> const& pairs,
|
|
||||||
std::uint32_t sequence,
|
|
||||||
std::uint32_t limit,
|
|
||||||
std::optional<std::string> warning)
|
|
||||||
-> BookOffersPage
|
|
||||||
{
|
|
||||||
std::vector<ripple::uint256> allKeys(pairs.size());
|
|
||||||
for (auto const& pair : pairs)
|
|
||||||
allKeys.push_back(pair.second);
|
|
||||||
|
|
||||||
auto uniqEnd = std::unique(allKeys.begin(), allKeys.end());
|
|
||||||
std::vector<ripple::uint256> keys{allKeys.begin(), uniqEnd};
|
|
||||||
|
|
||||||
auto start = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
auto ledgerEntries = fetchLedgerObjects(keys, sequence);
|
|
||||||
|
|
||||||
auto end = std::chrono::system_clock::now();
|
|
||||||
auto duration = ((end - start).count()) / 1000000000.0;
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Postgres book objects fetch took "
|
|
||||||
<< std::to_string(duration)
|
|
||||||
<< " seconds. "
|
|
||||||
<< "Fetched "
|
|
||||||
<< std::to_string(ledgerEntries.size())
|
|
||||||
<< " ledger entries";
|
|
||||||
|
|
||||||
std::vector<LedgerObject> objects;
|
|
||||||
for (auto i = 0; i < ledgerEntries.size(); ++i)
|
|
||||||
{
|
|
||||||
if(ledgerEntries[i].size() != 0)
|
|
||||||
objects.push_back(LedgerObject{keys[i], ledgerEntries[i]});
|
|
||||||
}
|
|
||||||
|
|
||||||
return {objects, {}, warning};
|
|
||||||
};
|
|
||||||
|
|
||||||
std::uint32_t bookShift = indexer_.getBookShift();
|
|
||||||
auto upper = indexer_.getBookIndexOfSeq(ledgerSequence);
|
|
||||||
|
|
||||||
auto [upperComplete, upperResults] = getBooks(upper);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Upper results found "
|
|
||||||
<< upperResults.size() << " books.";
|
|
||||||
|
|
||||||
if (upperComplete)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Upper book page is complete";
|
|
||||||
return fetchObjects(upperResults, ledgerSequence, limit, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << "Upper book page is not complete "
|
|
||||||
<< "fetching again";
|
|
||||||
|
|
||||||
auto lower = upper - (1 << bookShift);
|
|
||||||
if (lower < rng->minSequence)
|
|
||||||
lower = rng->minSequence;
|
|
||||||
|
|
||||||
auto [lowerComplete, lowerResults] = getBooks(lower);
|
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Lower results found "
|
|
||||||
<< lowerResults.size() << " books.";
|
|
||||||
|
|
||||||
assert(lowerComplete);
|
|
||||||
|
|
||||||
std::vector<bookKeyPair> pairs;
|
|
||||||
pairs.reserve(upperResults.size() + lowerResults.size());
|
|
||||||
std::merge(upperResults.begin(), upperResults.end(),
|
|
||||||
lowerResults.begin(), lowerResults.end(),
|
|
||||||
std::back_inserter(pairs),
|
|
||||||
[](bookKeyPair pair1, bookKeyPair pair2) -> bool
|
|
||||||
{
|
|
||||||
return pair1.first < pair2.first;
|
|
||||||
});
|
|
||||||
|
|
||||||
std::optional<std::string> warning = "book data may be incomplete";
|
|
||||||
return fetchObjects(pairs, ledgerSequence, limit, warning);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<TransactionAndMetadata>
|
std::vector<TransactionAndMetadata>
|
||||||
PostgresBackend::fetchTransactions(
|
PostgresBackend::fetchTransactions(
|
||||||
std::vector<ripple::uint256> const& hashes) const
|
std::vector<ripple::uint256> const& hashes) const
|
||||||
@@ -778,12 +620,16 @@ PostgresBackend::doFinishWrites() const
|
|||||||
{
|
{
|
||||||
if (!abortWrite_)
|
if (!abortWrite_)
|
||||||
{
|
{
|
||||||
writeConnection_.bulkInsert("transactions", transactionsBuffer_.str());
|
std::string txStr = transactionsBuffer_.str();
|
||||||
|
writeConnection_.bulkInsert("transactions", txStr);
|
||||||
writeConnection_.bulkInsert(
|
writeConnection_.bulkInsert(
|
||||||
"account_transactions", accountTxBuffer_.str());
|
"account_transactions", accountTxBuffer_.str());
|
||||||
std::string objectsStr = objectsBuffer_.str();
|
std::string objectsStr = objectsBuffer_.str();
|
||||||
if (objectsStr.size())
|
if (objectsStr.size())
|
||||||
writeConnection_.bulkInsert("objects", objectsStr);
|
writeConnection_.bulkInsert("objects", objectsStr);
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " objects size = " << objectsStr.size()
|
||||||
|
<< " txns size = " << txStr.size();
|
||||||
}
|
}
|
||||||
auto res = writeConnection_("COMMIT");
|
auto res = writeConnection_("COMMIT");
|
||||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||||
@@ -796,8 +642,6 @@ PostgresBackend::doFinishWrites() const
|
|||||||
transactionsBuffer_.clear();
|
transactionsBuffer_.clear();
|
||||||
objectsBuffer_.str("");
|
objectsBuffer_.str("");
|
||||||
objectsBuffer_.clear();
|
objectsBuffer_.clear();
|
||||||
booksBuffer_.str("");
|
|
||||||
booksBuffer_.clear();
|
|
||||||
accountTxBuffer_.str("");
|
accountTxBuffer_.str("");
|
||||||
accountTxBuffer_.clear();
|
accountTxBuffer_.clear();
|
||||||
numRowsInObjectsBuffer_ = 0;
|
numRowsInObjectsBuffer_ = 0;
|
||||||
@@ -806,173 +650,141 @@ PostgresBackend::doFinishWrites() const
|
|||||||
bool
|
bool
|
||||||
PostgresBackend::writeKeys(
|
PostgresBackend::writeKeys(
|
||||||
std::unordered_set<ripple::uint256> const& keys,
|
std::unordered_set<ripple::uint256> const& keys,
|
||||||
uint32_t ledgerSequence,
|
KeyIndex const& index,
|
||||||
bool isAsync) const
|
bool isAsync) const
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
if (abortWrite_)
|
||||||
|
return false;
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
pgQuery("BEGIN");
|
PgQuery& conn = isAsync ? pgQuery : writeConnection_;
|
||||||
std::stringstream keysBuffer;
|
std::stringstream sql;
|
||||||
size_t numRows = 0;
|
size_t numRows = 0;
|
||||||
for (auto& key : keys)
|
for (auto& key : keys)
|
||||||
{
|
{
|
||||||
keysBuffer << std::to_string(ledgerSequence) << '\t' << "\\\\x"
|
|
||||||
<< ripple::strHex(key) << '\n';
|
|
||||||
numRows++;
|
numRows++;
|
||||||
// If the buffer gets too large, the insert fails. Not sure why. So we
|
sql << "INSERT INTO keys (ledger_seq, key) VALUES ("
|
||||||
// insert after 1 million records
|
<< std::to_string(index.keyIndex) << ", \'\\x"
|
||||||
if (numRows == 100000)
|
<< ripple::strHex(key) << "\') ON CONFLICT DO NOTHING; ";
|
||||||
|
if (numRows > 10000)
|
||||||
{
|
{
|
||||||
pgQuery.bulkInsert("keys", keysBuffer.str());
|
conn(sql.str().c_str());
|
||||||
std::stringstream temp;
|
sql.str("");
|
||||||
keysBuffer.swap(temp);
|
sql.clear();
|
||||||
numRows = 0;
|
numRows = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (numRows > 0)
|
if (numRows > 0)
|
||||||
{
|
conn(sql.str().c_str());
|
||||||
pgQuery.bulkInsert("keys", keysBuffer.str());
|
|
||||||
}
|
|
||||||
pgQuery("COMMIT");
|
|
||||||
return true;
|
return true;
|
||||||
}
|
/*
|
||||||
bool
|
|
||||||
PostgresBackend::writeBooks(
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
bool isAsync) const
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||||
|
std::condition_variable cv;
|
||||||
|
std::mutex mtx;
|
||||||
|
std::atomic_uint numRemaining = keys.size();
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
for (auto& key : keys)
|
||||||
|
{
|
||||||
|
boost::asio::post(
|
||||||
|
pool_, [this, key, &numRemaining, &cv, &mtx, &index]() {
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
pgQuery("BEGIN");
|
std::stringstream sql;
|
||||||
std::stringstream booksBuffer;
|
sql << "INSERT INTO keys (ledger_seq, key) VALUES ("
|
||||||
size_t numRows = 0;
|
<< std::to_string(index.keyIndex) << ", \'\\x"
|
||||||
for (auto& book : books)
|
<< ripple::strHex(key) << "\') ON CONFLICT DO NOTHING";
|
||||||
|
|
||||||
|
auto res = pgQuery(sql.str().data());
|
||||||
|
if (--numRemaining == 0)
|
||||||
{
|
{
|
||||||
for (auto& offer : book.second)
|
std::unique_lock lck(mtx);
|
||||||
{
|
cv.notify_one();
|
||||||
booksBuffer << std::to_string(ledgerSequence) << '\t' << "\\\\x"
|
|
||||||
<< ripple::strHex(book.first) << '\t' << "\\\\x"
|
|
||||||
<< ripple::strHex(offer) << '\n';
|
|
||||||
numRows++;
|
|
||||||
// If the buffer gets too large, the insert fails. Not sure why. So
|
|
||||||
// we insert after 1 million records
|
|
||||||
if (numRows == 1000000)
|
|
||||||
{
|
|
||||||
pgQuery.bulkInsert("books", booksBuffer.str());
|
|
||||||
std::stringstream temp;
|
|
||||||
booksBuffer.swap(temp);
|
|
||||||
numRows = 0;
|
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
std::unique_lock lck(mtx);
|
||||||
if (numRows > 0)
|
cv.wait(lck, [&numRemaining]() { return numRemaining == 0; });
|
||||||
{
|
auto end = std::chrono::system_clock::now();
|
||||||
pgQuery.bulkInsert("books", booksBuffer.str());
|
auto duration =
|
||||||
}
|
std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
|
||||||
pgQuery("COMMIT");
|
.count();
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " wrote " << std::to_string(keys.size())
|
||||||
|
<< " keys with threadpool. took " << std::to_string(duration);
|
||||||
|
*/
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
PostgresBackend::doOnlineDelete(uint32_t minLedgerToKeep) const
|
PostgresBackend::doOnlineDelete(uint32_t numLedgersToKeep) const
|
||||||
{
|
{
|
||||||
|
auto rng = fetchLedgerRangeNoThrow();
|
||||||
|
if (!rng)
|
||||||
|
return false;
|
||||||
|
uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
|
||||||
|
if (minLedger <= rng->minSequence)
|
||||||
|
return false;
|
||||||
uint32_t limit = 2048;
|
uint32_t limit = 2048;
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
|
pgQuery("SET statement_timeout TO 0");
|
||||||
|
std::optional<ripple::uint256> cursor;
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto [objects, curCursor, warning] =
|
||||||
|
fetchLedgerPage(cursor, minLedger, 256);
|
||||||
|
if (warning)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning) << __func__
|
||||||
|
<< " online delete running but "
|
||||||
|
"flag ledger is not complete";
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
||||||
|
std::stringstream objectsBuffer;
|
||||||
|
|
||||||
|
for (auto& obj : objects)
|
||||||
|
{
|
||||||
|
objectsBuffer << "\\\\x" << ripple::strHex(obj.key) << '\t'
|
||||||
|
<< std::to_string(minLedger) << '\t' << "\\\\x"
|
||||||
|
<< ripple::strHex(obj.blob) << '\n';
|
||||||
|
}
|
||||||
|
pgQuery.bulkInsert("objects", objectsBuffer.str());
|
||||||
|
cursor = curCursor;
|
||||||
|
if (!cursor)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
catch (DatabaseTimeout const& e)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning)
|
||||||
|
<< __func__ << " Database timeout fetching keys";
|
||||||
|
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(info) << __func__ << " finished inserting into objects";
|
||||||
{
|
{
|
||||||
std::stringstream sql;
|
std::stringstream sql;
|
||||||
sql << "DELETE FROM ledgers WHERE ledger_seq < "
|
sql << "DELETE FROM ledgers WHERE ledger_seq < "
|
||||||
<< std::to_string(minLedgerToKeep);
|
<< std::to_string(minLedger);
|
||||||
auto res = pgQuery(sql.str().data());
|
auto res = pgQuery(sql.str().data());
|
||||||
if (res.msg() != "ok")
|
if (res.msg() != "ok")
|
||||||
throw std::runtime_error("Error deleting from ledgers table");
|
throw std::runtime_error("Error deleting from ledgers table");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cursor;
|
|
||||||
do
|
|
||||||
{
|
{
|
||||||
std::stringstream sql;
|
std::stringstream sql;
|
||||||
sql << "SELECT DISTINCT ON (key) key,ledger_seq,object FROM objects"
|
sql << "DELETE FROM keys WHERE ledger_seq < "
|
||||||
<< " WHERE ledger_seq <= " << std::to_string(minLedgerToKeep);
|
<< std::to_string(minLedger);
|
||||||
if (cursor.size())
|
|
||||||
sql << " AND key < \'\\x" << cursor << "\'";
|
|
||||||
sql << " ORDER BY key DESC, ledger_seq DESC"
|
|
||||||
<< " LIMIT " << std::to_string(limit);
|
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << sql.str();
|
|
||||||
auto res = pgQuery(sql.str().data());
|
auto res = pgQuery(sql.str().data());
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << "Fetched a page";
|
|
||||||
if (size_t numRows = checkResult(res, 3))
|
|
||||||
{
|
|
||||||
std::stringstream deleteSql;
|
|
||||||
std::stringstream deleteOffersSql;
|
|
||||||
deleteSql << "DELETE FROM objects WHERE (";
|
|
||||||
deleteOffersSql << "DELETE FROM books WHERE (";
|
|
||||||
bool firstOffer = true;
|
|
||||||
for (size_t i = 0; i < numRows; ++i)
|
|
||||||
{
|
|
||||||
std::string_view keyView{res.c_str(i, 0) + 2};
|
|
||||||
int64_t sequence = res.asBigInt(i, 1);
|
|
||||||
std::string_view objView{res.c_str(i, 2) + 2};
|
|
||||||
if (i != 0)
|
|
||||||
deleteSql << " OR ";
|
|
||||||
|
|
||||||
deleteSql << "(key = "
|
|
||||||
<< "\'\\x" << keyView << "\'";
|
|
||||||
if (objView.size() == 0)
|
|
||||||
deleteSql << " AND ledger_seq <= "
|
|
||||||
<< std::to_string(sequence);
|
|
||||||
else
|
|
||||||
deleteSql << " AND ledger_seq < "
|
|
||||||
<< std::to_string(sequence);
|
|
||||||
deleteSql << ")";
|
|
||||||
bool deleteOffer = false;
|
|
||||||
if (objView.size())
|
|
||||||
{
|
|
||||||
deleteOffer = isOfferHex(objView);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// This is rather unelegant. For a deleted object, we
|
|
||||||
// don't know its type just from the key (or do we?).
|
|
||||||
// So, we just assume it is an offer and try to delete
|
|
||||||
// it. The alternative is to read the actual object out
|
|
||||||
// of the db from before it was deleted. This could
|
|
||||||
// result in a lot of individual reads though, so we
|
|
||||||
// chose to just delete
|
|
||||||
deleteOffer = true;
|
|
||||||
}
|
|
||||||
if (deleteOffer)
|
|
||||||
{
|
|
||||||
if (!firstOffer)
|
|
||||||
deleteOffersSql << " OR ";
|
|
||||||
deleteOffersSql << "( offer_key = "
|
|
||||||
<< "\'\\x" << keyView << "\')";
|
|
||||||
firstOffer = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (numRows == limit)
|
|
||||||
cursor = res.c_str(numRows - 1, 0) + 2;
|
|
||||||
else
|
|
||||||
cursor = {};
|
|
||||||
deleteSql << ")";
|
|
||||||
deleteOffersSql << ")";
|
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << deleteSql.str();
|
|
||||||
res = pgQuery(deleteSql.str().data());
|
|
||||||
if (res.msg() != "ok")
|
if (res.msg() != "ok")
|
||||||
throw std::runtime_error("Error deleting from objects table");
|
throw std::runtime_error("Error deleting from keys table");
|
||||||
if (!firstOffer)
|
}
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << deleteOffersSql.str();
|
std::stringstream sql;
|
||||||
res = pgQuery(deleteOffersSql.str().data());
|
sql << "DELETE FROM books WHERE ledger_seq < "
|
||||||
|
<< std::to_string(minLedger);
|
||||||
|
auto res = pgQuery(sql.str().data());
|
||||||
if (res.msg() != "ok")
|
if (res.msg() != "ok")
|
||||||
throw std::runtime_error("Error deleting from books table");
|
throw std::runtime_error("Error deleting from books table");
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
|
||||||
<< __func__ << "Deleted a page. Cursor = " << cursor;
|
|
||||||
}
|
|
||||||
} while (cursor.size());
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,13 +9,13 @@ class PostgresBackend : public BackendInterface
|
|||||||
private:
|
private:
|
||||||
mutable size_t numRowsInObjectsBuffer_ = 0;
|
mutable size_t numRowsInObjectsBuffer_ = 0;
|
||||||
mutable std::stringstream objectsBuffer_;
|
mutable std::stringstream objectsBuffer_;
|
||||||
|
mutable std::stringstream keysBuffer_;
|
||||||
mutable std::stringstream transactionsBuffer_;
|
mutable std::stringstream transactionsBuffer_;
|
||||||
mutable std::stringstream booksBuffer_;
|
|
||||||
mutable std::stringstream accountTxBuffer_;
|
mutable std::stringstream accountTxBuffer_;
|
||||||
std::shared_ptr<PgPool> pgPool_;
|
std::shared_ptr<PgPool> pgPool_;
|
||||||
mutable PgQuery writeConnection_;
|
mutable PgQuery writeConnection_;
|
||||||
mutable bool abortWrite_ = false;
|
mutable bool abortWrite_ = false;
|
||||||
mutable boost::asio::thread_pool pool_{200};
|
mutable boost::asio::thread_pool pool_{16};
|
||||||
uint32_t writeInterval_ = 1000000;
|
uint32_t writeInterval_ = 1000000;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -45,18 +45,11 @@ public:
|
|||||||
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const override;
|
fetchAllTransactionHashesInLedger(uint32_t ledgerSequence) const override;
|
||||||
|
|
||||||
LedgerPage
|
LedgerPage
|
||||||
fetchLedgerPage(
|
doFetchLedgerPage(
|
||||||
std::optional<ripple::uint256> const& cursor,
|
std::optional<ripple::uint256> const& cursor,
|
||||||
std::uint32_t ledgerSequence,
|
std::uint32_t ledgerSequence,
|
||||||
std::uint32_t limit) const override;
|
std::uint32_t limit) const override;
|
||||||
|
|
||||||
BookOffersPage
|
|
||||||
fetchBookOffers(
|
|
||||||
ripple::uint256 const& book,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
std::uint32_t limit,
|
|
||||||
std::optional<ripple::uint256> const& cursor) const override;
|
|
||||||
|
|
||||||
std::vector<TransactionAndMetadata>
|
std::vector<TransactionAndMetadata>
|
||||||
fetchTransactions(
|
fetchTransactions(
|
||||||
std::vector<ripple::uint256> const& hashes) const override;
|
std::vector<ripple::uint256> const& hashes) const override;
|
||||||
@@ -113,18 +106,11 @@ public:
|
|||||||
doFinishWrites() const override;
|
doFinishWrites() const override;
|
||||||
|
|
||||||
bool
|
bool
|
||||||
doOnlineDelete(uint32_t minLedgerToKeep) const override;
|
doOnlineDelete(uint32_t numLedgersToKeep) const override;
|
||||||
bool
|
bool
|
||||||
writeKeys(
|
writeKeys(
|
||||||
std::unordered_set<ripple::uint256> const& keys,
|
std::unordered_set<ripple::uint256> const& keys,
|
||||||
uint32_t ledgerSequence,
|
KeyIndex const& index,
|
||||||
bool isAsync = false) const override;
|
|
||||||
bool
|
|
||||||
writeBooks(
|
|
||||||
std::unordered_map<
|
|
||||||
ripple::uint256,
|
|
||||||
std::unordered_set<ripple::uint256>> const& books,
|
|
||||||
uint32_t ledgerSequence,
|
|
||||||
bool isAsync = false) const override;
|
bool isAsync = false) const override;
|
||||||
};
|
};
|
||||||
} // namespace Backend
|
} // namespace Backend
|
||||||
|
|||||||
@@ -239,6 +239,13 @@ ReportingETL::publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts)
|
|||||||
++numAttempts;
|
++numAttempts;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto lgr =
|
||||||
|
flatMapBackend_->fetchLedgerBySequence(ledgerSequence);
|
||||||
|
assert(lgr);
|
||||||
|
publishLedger(*lgr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (Backend::DatabaseTimeout const& e)
|
catch (Backend::DatabaseTimeout const& e)
|
||||||
{
|
{
|
||||||
@@ -291,7 +298,7 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t idx)
|
|||||||
std::pair<ripple::LedgerInfo, bool>
|
std::pair<ripple::LedgerInfo, bool>
|
||||||
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
<< "Beginning ledger update";
|
<< "Beginning ledger update";
|
||||||
|
|
||||||
ripple::LedgerInfo lgrInfo =
|
ripple::LedgerInfo lgrInfo =
|
||||||
@@ -302,15 +309,16 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
|
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
|
||||||
backend_->startWrites();
|
backend_->startWrites();
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
backend_->writeLedger(
|
backend_->writeLedger(
|
||||||
|
=======
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
|
<< "started writes";
|
||||||
|
flatMapBackend_->writeLedger(
|
||||||
|
>>>>>>> dev
|
||||||
lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
||||||
std::vector<AccountTransactionsData> accountTxData{
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
insertTransactions(lgrInfo, rawData)};
|
<< "wrote ledger header";
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
|
||||||
<< __func__ << " : "
|
|
||||||
<< "Inserted all transactions. Number of transactions = "
|
|
||||||
<< rawData.transactions_list().transactions_size();
|
|
||||||
|
|
||||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
||||||
{
|
{
|
||||||
@@ -343,7 +351,24 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
isDeleted,
|
isDeleted,
|
||||||
std::move(bookDir));
|
std::move(bookDir));
|
||||||
}
|
}
|
||||||
|
<<<<<<< HEAD
|
||||||
backend_->writeAccountTransactions(std::move(accountTxData));
|
backend_->writeAccountTransactions(std::move(accountTxData));
|
||||||
|
=======
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " : "
|
||||||
|
<< "wrote objects. num objects = "
|
||||||
|
<< std::to_string(rawData.ledger_objects().objects_size());
|
||||||
|
std::vector<AccountTransactionsData> accountTxData{
|
||||||
|
insertTransactions(lgrInfo, rawData)};
|
||||||
|
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " : "
|
||||||
|
<< "Inserted all transactions. Number of transactions = "
|
||||||
|
<< rawData.transactions_list().transactions_size();
|
||||||
|
flatMapBackend_->writeAccountTransactions(std::move(accountTxData));
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
|
<< "wrote account_tx";
|
||||||
|
>>>>>>> dev
|
||||||
accumTxns_ += rawData.transactions_list().transactions_size();
|
accumTxns_ += rawData.transactions_list().transactions_size();
|
||||||
bool success = true;
|
bool success = true;
|
||||||
if (accumTxns_ >= txnThreshold_)
|
if (accumTxns_ >= txnThreshold_)
|
||||||
@@ -353,7 +378,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
auto end = std::chrono::system_clock::now();
|
auto end = std::chrono::system_clock::now();
|
||||||
|
|
||||||
auto duration = ((end - start).count()) / 1000000000.0;
|
auto duration = ((end - start).count()) / 1000000000.0;
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " Accumulated " << std::to_string(accumTxns_)
|
<< __func__ << " Accumulated " << std::to_string(accumTxns_)
|
||||||
<< " transactions. Wrote in " << std::to_string(duration)
|
<< " transactions. Wrote in " << std::to_string(duration)
|
||||||
<< " transactions per second = "
|
<< " transactions per second = "
|
||||||
@@ -361,7 +386,7 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
accumTxns_ = 0;
|
accumTxns_ = 0;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " skipping commit";
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " skipping commit";
|
||||||
BOOST_LOG_TRIVIAL(debug)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Inserted/modified/deleted all objects. Number of objects = "
|
<< "Inserted/modified/deleted all objects. Number of objects = "
|
||||||
@@ -412,10 +437,14 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
assert(false);
|
assert(false);
|
||||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||||
}
|
}
|
||||||
|
std::atomic<uint32_t> minSequence = rng->minSequence;
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
||||||
<< "Populating caches";
|
<< "Populating caches";
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
backend_->getIndexer().populateCachesAsync(*backend_);
|
backend_->getIndexer().populateCachesAsync(*backend_);
|
||||||
|
=======
|
||||||
|
>>>>>>> dev
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
||||||
<< "Populated caches";
|
<< "Populated caches";
|
||||||
|
|
||||||
@@ -501,6 +530,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::thread transformer{[this,
|
std::thread transformer{[this,
|
||||||
|
&minSequence,
|
||||||
&writeConflict,
|
&writeConflict,
|
||||||
&startSequence,
|
&startSequence,
|
||||||
&getNext,
|
&getNext,
|
||||||
@@ -549,17 +579,25 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
lastPublishedSequence = lgrInfo.seq;
|
lastPublishedSequence = lgrInfo.seq;
|
||||||
}
|
}
|
||||||
writeConflict = !success;
|
writeConflict = !success;
|
||||||
|
<<<<<<< HEAD
|
||||||
auto range = backend_->fetchLedgerRangeNoThrow();
|
auto range = backend_->fetchLedgerRangeNoThrow();
|
||||||
|
=======
|
||||||
|
>>>>>>> dev
|
||||||
if (onlineDeleteInterval_ && !deleting_ &&
|
if (onlineDeleteInterval_ && !deleting_ &&
|
||||||
range->maxSequence - range->minSequence >
|
lgrInfo.seq - minSequence > *onlineDeleteInterval_)
|
||||||
*onlineDeleteInterval_)
|
|
||||||
{
|
{
|
||||||
deleting_ = true;
|
deleting_ = true;
|
||||||
ioContext_.post([this, &range]() {
|
ioContext_.post([this, &minSequence]() {
|
||||||
BOOST_LOG_TRIVIAL(info) << "Running online delete";
|
BOOST_LOG_TRIVIAL(info) << "Running online delete";
|
||||||
|
<<<<<<< HEAD
|
||||||
backend_->doOnlineDelete(
|
backend_->doOnlineDelete(
|
||||||
range->maxSequence - *onlineDeleteInterval_);
|
range->maxSequence - *onlineDeleteInterval_);
|
||||||
|
=======
|
||||||
|
flatMapBackend_->doOnlineDelete(*onlineDeleteInterval_);
|
||||||
|
>>>>>>> dev
|
||||||
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
|
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
|
||||||
|
auto rng = flatMapBackend_->fetchLedgerRangeNoThrow();
|
||||||
|
minSequence = rng->minSequence;
|
||||||
deleting_ = false;
|
deleting_ = false;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -580,7 +618,10 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
<< "Extracted and wrote " << *lastPublishedSequence - startSequence
|
<< "Extracted and wrote " << *lastPublishedSequence - startSequence
|
||||||
<< " in " << ((end - begin).count()) / 1000000000.0;
|
<< " in " << ((end - begin).count()) / 1000000000.0;
|
||||||
writing_ = false;
|
writing_ = false;
|
||||||
|
<<<<<<< HEAD
|
||||||
backend_->getIndexer().clearCaches();
|
backend_->getIndexer().clearCaches();
|
||||||
|
=======
|
||||||
|
>>>>>>> dev
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
<< "Stopping etl pipeline";
|
<< "Stopping etl pipeline";
|
||||||
@@ -772,10 +813,27 @@ ReportingETL::ReportingETL(
|
|||||||
if (config.contains("read_only"))
|
if (config.contains("read_only"))
|
||||||
readOnly_ = config.at("read_only").as_bool();
|
readOnly_ = config.at("read_only").as_bool();
|
||||||
if (config.contains("online_delete"))
|
if (config.contains("online_delete"))
|
||||||
onlineDeleteInterval_ = config.at("online_delete").as_int64();
|
{
|
||||||
|
int64_t interval = config.at("online_delete").as_int64();
|
||||||
|
uint32_t max = std::numeric_limits<uint32_t>::max();
|
||||||
|
if (interval > max)
|
||||||
|
{
|
||||||
|
std::stringstream msg;
|
||||||
|
msg << "online_delete cannot be greater than "
|
||||||
|
<< std::to_string(max);
|
||||||
|
throw std::runtime_error(msg.str());
|
||||||
|
}
|
||||||
|
if (interval > 0)
|
||||||
|
onlineDeleteInterval_ = static_cast<uint32_t>(interval);
|
||||||
|
}
|
||||||
if (config.contains("extractor_threads"))
|
if (config.contains("extractor_threads"))
|
||||||
extractorThreads_ = config.at("extractor_threads").as_int64();
|
extractorThreads_ = config.at("extractor_threads").as_int64();
|
||||||
if (config.contains("txn_threshold"))
|
if (config.contains("txn_threshold"))
|
||||||
txnThreshold_ = config.at("txn_threshold").as_int64();
|
txnThreshold_ = config.at("txn_threshold").as_int64();
|
||||||
|
<<<<<<< HEAD
|
||||||
|
=======
|
||||||
|
flatMapBackend_->open(readOnly_);
|
||||||
|
flatMapBackend_->checkFlagLedgers();
|
||||||
|
>>>>>>> dev
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -271,6 +271,28 @@ private:
|
|||||||
return numMarkers_;
|
return numMarkers_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
|
=======
|
||||||
|
boost::json::object
|
||||||
|
getInfo()
|
||||||
|
{
|
||||||
|
boost::json::object result;
|
||||||
|
|
||||||
|
result["etl_sources"] = loadBalancer_.toJson();
|
||||||
|
result["is_writer"] = writing_.load();
|
||||||
|
result["read_only"] = readOnly_;
|
||||||
|
auto last = getLastPublish();
|
||||||
|
if (last.time_since_epoch().count() != 0)
|
||||||
|
result["last_publish_time"] = std::to_string(
|
||||||
|
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
std::chrono::system_clock::now() - getLastPublish())
|
||||||
|
.count());
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// start all of the necessary components and begin ETL
|
||||||
|
>>>>>>> dev
|
||||||
void
|
void
|
||||||
run()
|
run()
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ class listener : public std::enable_shared_from_this<listener>
|
|||||||
std::shared_ptr<BackendInterface> backend_;
|
std::shared_ptr<BackendInterface> backend_;
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions_;
|
std::shared_ptr<SubscriptionManager> subscriptions_;
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer_;
|
std::shared_ptr<ETLLoadBalancer> balancer_;
|
||||||
|
DOSGuard& dosGuard_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void
|
static void
|
||||||
@@ -50,12 +51,8 @@ public:
|
|||||||
std::shared_ptr<ETLLoadBalancer> balancer)
|
std::shared_ptr<ETLLoadBalancer> balancer)
|
||||||
{
|
{
|
||||||
std::make_shared<listener>(
|
std::make_shared<listener>(
|
||||||
ioc,
|
ioc, endpoint, backend, subscriptions, balancer)
|
||||||
endpoint,
|
->run();
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
balancer
|
|
||||||
)->run();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
listener(
|
listener(
|
||||||
@@ -63,7 +60,8 @@ public:
|
|||||||
boost::asio::ip::tcp::endpoint endpoint,
|
boost::asio::ip::tcp::endpoint endpoint,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer)
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
|
DOSGuard& dosGuard)
|
||||||
: ioc_(ioc)
|
: ioc_(ioc)
|
||||||
, acceptor_(ioc)
|
, acceptor_(ioc)
|
||||||
, backend_(backend)
|
, backend_(backend)
|
||||||
@@ -108,7 +106,6 @@ public:
|
|||||||
~listener() = default;
|
~listener() = default;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
void
|
void
|
||||||
run()
|
run()
|
||||||
{
|
{
|
||||||
@@ -134,7 +131,12 @@ private:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
session::make_session(std::move(socket), backend_, subscriptions_, balancer_);
|
session::make_session(
|
||||||
|
std::move(socket),
|
||||||
|
backend_,
|
||||||
|
subscriptions_,
|
||||||
|
balancer_,
|
||||||
|
dosGuard_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accept another connection
|
// Accept another connection
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#include <reporting/server/session.h>
|
|
||||||
#include <reporting/P2pProxy.h>
|
#include <reporting/P2pProxy.h>
|
||||||
|
#include <reporting/server/session.h>
|
||||||
|
|
||||||
void
|
void
|
||||||
fail(boost::beast::error_code ec, char const* what)
|
fail(boost::beast::error_code ec, char const* what)
|
||||||
@@ -7,7 +7,7 @@ fail(boost::beast::error_code ec, char const* what)
|
|||||||
std::cerr << what << ": " << ec.message() << "\n";
|
std::cerr << what << ": " << ec.message() << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::json::object
|
std::pair<boost::json::object, uint32_t>
|
||||||
buildResponse(
|
buildResponse(
|
||||||
boost::json::object const& request,
|
boost::json::object const& request,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
@@ -25,41 +25,86 @@ buildResponse(
|
|||||||
switch (commandMap[command])
|
switch (commandMap[command])
|
||||||
{
|
{
|
||||||
case tx:
|
case tx:
|
||||||
return doTx(request, *backend);
|
return {doTx(request, *backend), 1};
|
||||||
case account_tx:
|
case account_tx: {
|
||||||
return doAccountTx(request, *backend);
|
auto res = doAccountTx(request, backend);
|
||||||
case ledger:
|
if (res.contains("transactions"))
|
||||||
return doLedger(request, *backend);
|
return {res, res["transactions"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
case ledger: {
|
||||||
|
auto res = doLedger(request, backend);
|
||||||
|
if (res.contains("transactions"))
|
||||||
|
return {res, res["transactions"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
case ledger_entry:
|
case ledger_entry:
|
||||||
return doLedgerEntry(request, *backend);
|
return {doLedgerEntry(request, *backend), 1};
|
||||||
case ledger_range:
|
case ledger_range:
|
||||||
return doLedgerRange(request, *backend);
|
return {doLedgerRange(request, *backend), 1};
|
||||||
case ledger_data:
|
case ledger_data: {
|
||||||
return doLedgerData(request, *backend);
|
auto res = doLedgerData(request, backend);
|
||||||
|
if (res.contains("objects"))
|
||||||
|
return {res, res["objects"].as_array().size() * 4};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
case account_info:
|
case account_info:
|
||||||
return doAccountInfo(request, *backend);
|
return {doAccountInfo(request, *backend), 1};
|
||||||
case book_offers:
|
case book_offers: {
|
||||||
return doBookOffers(request, *backend);
|
auto res = doBookOffers(request, backend);
|
||||||
case account_channels:
|
if (res.contains("offers"))
|
||||||
return doAccountChannels(request, *backend);
|
return {res, res["offers"].as_array().size() * 4};
|
||||||
case account_lines:
|
return {res, 1};
|
||||||
return doAccountLines(request, *backend);
|
}
|
||||||
case account_currencies:
|
case account_channels: {
|
||||||
return doAccountCurrencies(request, *backend);
|
auto res = doAccountChannels(request, *backend);
|
||||||
case account_offers:
|
if (res.contains("channels"))
|
||||||
return doAccountOffers(request, *backend);
|
return {res, res["channels"].as_array().size()};
|
||||||
case account_objects:
|
return {res, 1};
|
||||||
return doAccountObjects(request, *backend);
|
}
|
||||||
case channel_authorize:
|
case account_lines: {
|
||||||
return doChannelAuthorize(request);
|
auto res = doAccountLines(request, *backend);
|
||||||
|
if (res.contains("lines"))
|
||||||
|
return {res, res["lines"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
case account_currencies: {
|
||||||
|
auto res = doAccountCurrencies(request, *backend);
|
||||||
|
size_t count = 1;
|
||||||
|
if (res.contains("send_currencies"))
|
||||||
|
count = res["send_currencies"].as_array().size();
|
||||||
|
if(res.contains("receive_currencies"]))
|
||||||
|
count += res["receive_currencies"].as_array().size();
|
||||||
|
return {res, count};
|
||||||
|
}
|
||||||
|
|
||||||
|
case account_offers: {
|
||||||
|
auto res = doAccountOffers(request, *backend);
|
||||||
|
if (res.contains("offers"))
|
||||||
|
return {res, res["offers"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
case account_objects: {
|
||||||
|
auto res = doAccountObjects(request, *backend);
|
||||||
|
if (res.contains("objects"))
|
||||||
|
return {res, res["objects"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
case channel_authorize: {
|
||||||
|
return {doChannelAuthorize(request), 1};
|
||||||
|
};
|
||||||
case channel_verify:
|
case channel_verify:
|
||||||
return doChannelVerify(request);
|
return {doChannelVerify(request), 1};
|
||||||
case subscribe:
|
case subscribe:
|
||||||
return doSubscribe(request, session, *manager);
|
return {doSubscribe(request, session, *manager), 1};
|
||||||
case unsubscribe:
|
case unsubscribe:
|
||||||
return doUnsubscribe(request, session, *manager);
|
return {doUnsubscribe(request, session, *manager), 1};
|
||||||
|
case server_info: {
|
||||||
|
return {doServerInfo(request, backend), 1};
|
||||||
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
response["error"] = "Unknown command: " + command;
|
response["error"] = "Unknown command: " + command;
|
||||||
return response;
|
return {response, 1};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -49,6 +49,7 @@ enum RPCCommand {
|
|||||||
account_objects,
|
account_objects,
|
||||||
channel_authorize,
|
channel_authorize,
|
||||||
channel_verify,
|
channel_verify,
|
||||||
|
server_info,
|
||||||
subscribe,
|
subscribe,
|
||||||
unsubscribe
|
unsubscribe
|
||||||
};
|
};
|
||||||
@@ -69,6 +70,7 @@ static std::unordered_map<std::string, RPCCommand> commandMap{
|
|||||||
{"account_objects", account_objects},
|
{"account_objects", account_objects},
|
||||||
{"channel_authorize", channel_authorize},
|
{"channel_authorize", channel_authorize},
|
||||||
{"channel_verify", channel_verify},
|
{"channel_verify", channel_verify},
|
||||||
|
{"server_info", server_info},
|
||||||
{"subscribe", subscribe},
|
{"subscribe", subscribe},
|
||||||
{"unsubscribe", unsubscribe}};
|
{"unsubscribe", unsubscribe}};
|
||||||
|
|
||||||
@@ -170,6 +172,7 @@ class session : public std::enable_shared_from_this<session>
|
|||||||
std::shared_ptr<BackendInterface> backend_;
|
std::shared_ptr<BackendInterface> backend_;
|
||||||
std::weak_ptr<SubscriptionManager> subscriptions_;
|
std::weak_ptr<SubscriptionManager> subscriptions_;
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer_;
|
std::shared_ptr<ETLLoadBalancer> balancer_;
|
||||||
|
DOSGuard& dosGuard_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Take ownership of the socket
|
// Take ownership of the socket
|
||||||
@@ -177,11 +180,13 @@ public:
|
|||||||
boost::asio::ip::tcp::socket&& socket,
|
boost::asio::ip::tcp::socket&& socket,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer)
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
|
DOSGuard& dosGuard)
|
||||||
: ws_(std::move(socket))
|
: ws_(std::move(socket))
|
||||||
, backend_(backend)
|
, backend_(backend)
|
||||||
, subscriptions_(subscriptions)
|
, subscriptions_(subscriptions)
|
||||||
, balancer_(balancer)
|
, balancer_(balancer)
|
||||||
|
, dosGuard_(dosGuard)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,10 +195,11 @@ public:
|
|||||||
boost::asio::ip::tcp::socket&& socket,
|
boost::asio::ip::tcp::socket&& socket,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer)
|
std::shared_ptr<ETLLoadBalancer> balancer,
|
||||||
|
DOSGuard& dosGuard)
|
||||||
{
|
{
|
||||||
std::make_shared<session>(
|
std::make_shared<session>(
|
||||||
std::move(socket), backend, subscriptions, balancer)
|
std::move(socket), backend, subscriptions, balancer, dosGuard)
|
||||||
->run();
|
->run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -295,6 +301,14 @@ private:
|
|||||||
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
|
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
|
||||||
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
|
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
|
||||||
boost::json::object response;
|
boost::json::object response;
|
||||||
|
auto ip =
|
||||||
|
ws_.next_layer().socket().remote_endpoint().address().to_string();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " received request from ip = " << ip;
|
||||||
|
if (!dosGuard_.isOk(ip))
|
||||||
|
response["error"] = "Too many requests. Slow down";
|
||||||
|
else
|
||||||
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
boost::json::value raw = boost::json::parse(msg);
|
boost::json::value raw = boost::json::parse(msg);
|
||||||
@@ -307,8 +321,24 @@ private:
|
|||||||
if (!subPtr)
|
if (!subPtr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
response = buildResponse(
|
auto [res, cost] = buildResponse(
|
||||||
request, backend_, subPtr, balancer_, shared_from_this());
|
request,
|
||||||
|
backend_,
|
||||||
|
subPtr,
|
||||||
|
balancer_,
|
||||||
|
shared_from_this());
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
response = std::move(res);
|
||||||
|
if (!dosGuard_.add(ip, cost))
|
||||||
|
{
|
||||||
|
response["warning"] = "Too many requests";
|
||||||
|
}
|
||||||
|
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " RPC call took "
|
||||||
|
<< ((end - start).count() / 1000000000.0)
|
||||||
|
<< " . request = " << request;
|
||||||
}
|
}
|
||||||
catch (Backend::DatabaseTimeout const& t)
|
catch (Backend::DatabaseTimeout const& t)
|
||||||
{
|
{
|
||||||
@@ -321,6 +351,8 @@ private:
|
|||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(error)
|
BOOST_LOG_TRIVIAL(error)
|
||||||
<< __func__ << "caught exception : " << e.what();
|
<< __func__ << "caught exception : " << e.what();
|
||||||
|
response["error"] = "Unknown exception";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
|
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
|
||||||
response_ = boost::json::serialize(response);
|
response_ = boost::json::serialize(response);
|
||||||
|
|||||||
86
server/DOSGuard.h
Normal file
86
server/DOSGuard.h
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <unordered_set>
|
||||||
|
|
||||||
|
class DOSGuard
|
||||||
|
{
|
||||||
|
std::unordered_map<std::string, uint32_t> ipFetchCount_;
|
||||||
|
uint32_t maxFetches_ = 100;
|
||||||
|
uint32_t sweepInterval_ = 1;
|
||||||
|
std::unordered_set<std::string> whitelist_;
|
||||||
|
boost::asio::io_context& ctx_;
|
||||||
|
std::mutex mtx_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
DOSGuard(boost::json::object const& config, boost::asio::io_context& ctx)
|
||||||
|
: ctx_(ctx)
|
||||||
|
{
|
||||||
|
if (config.contains("dos_guard"))
|
||||||
|
{
|
||||||
|
auto dosGuardConfig = config.at("dos_guard").as_object();
|
||||||
|
if (dosGuardConfig.contains("max_fetches") &&
|
||||||
|
dosGuardConfig.contains("sweep_interval"))
|
||||||
|
{
|
||||||
|
maxFetches_ = dosGuardConfig.at("max_fetches").as_int64();
|
||||||
|
sweepInterval_ = dosGuardConfig.at("sweep_interval").as_int64();
|
||||||
|
}
|
||||||
|
if (dosGuardConfig.contains("whitelist"))
|
||||||
|
{
|
||||||
|
auto whitelist = dosGuardConfig.at("whitelist").as_array();
|
||||||
|
for (auto& ip : whitelist)
|
||||||
|
whitelist_.insert(ip.as_string().c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
createTimer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
createTimer()
|
||||||
|
{
|
||||||
|
auto wait = std::chrono::seconds(sweepInterval_);
|
||||||
|
std::shared_ptr<boost::asio::steady_timer> timer =
|
||||||
|
std::make_shared<boost::asio::steady_timer>(
|
||||||
|
ctx_, std::chrono::steady_clock::now() + wait);
|
||||||
|
timer->async_wait(
|
||||||
|
[timer, this](const boost::system::error_code& error) {
|
||||||
|
clear();
|
||||||
|
createTimer();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isOk(std::string const& ip)
|
||||||
|
{
|
||||||
|
if (whitelist_.count(ip) > 0)
|
||||||
|
return true;
|
||||||
|
std::unique_lock lck(mtx_);
|
||||||
|
auto it = ipFetchCount_.find(ip);
|
||||||
|
if (it == ipFetchCount_.end())
|
||||||
|
return true;
|
||||||
|
return it->second < maxFetches_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
add(std::string const& ip, uint32_t numObjects)
|
||||||
|
{
|
||||||
|
if (whitelist_.count(ip) > 0)
|
||||||
|
return true;
|
||||||
|
{
|
||||||
|
std::unique_lock lck(mtx_);
|
||||||
|
auto it = ipFetchCount_.find(ip);
|
||||||
|
if (it == ipFetchCount_.end())
|
||||||
|
ipFetchCount_[ip] = numObjects;
|
||||||
|
else
|
||||||
|
it->second += numObjects;
|
||||||
|
}
|
||||||
|
return isOk(ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
clear()
|
||||||
|
{
|
||||||
|
std::unique_lock lck(mtx_);
|
||||||
|
ipFetchCount_.clear();
|
||||||
|
}
|
||||||
|
};
|
||||||
563
server/websocket_server_async.cpp
Normal file
563
server/websocket_server_async.cpp
Normal file
@@ -0,0 +1,563 @@
|
|||||||
|
//
|
||||||
|
// Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
|
||||||
|
//
|
||||||
|
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||||
|
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||||
|
//
|
||||||
|
// Official repository: https://github.com/boostorg/beast
|
||||||
|
//
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
//
|
||||||
|
// Example: WebSocket server, asynchronous
|
||||||
|
//
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#include <boost/asio/dispatch.hpp>
|
||||||
|
#include <boost/asio/strand.hpp>
|
||||||
|
#include <boost/beast/core.hpp>
|
||||||
|
#include <boost/beast/websocket.hpp>
|
||||||
|
#include <boost/json.hpp>
|
||||||
|
|
||||||
|
#include <boost/log/core.hpp>
|
||||||
|
#include <boost/log/expressions.hpp>
|
||||||
|
#include <boost/log/trivial.hpp>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <fstream>
|
||||||
|
#include <functional>
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <reporting/BackendFactory.h>
|
||||||
|
#include <reporting/ReportingETL.h>
|
||||||
|
#include <reporting/server/listener.h>
|
||||||
|
#include <reporting/server/session.h>
|
||||||
|
#include <server/DOSGuard.h>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
<<<<<<< HEAD:websocket_server_async.cpp
|
||||||
|
=======
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
enum RPCCommand {
|
||||||
|
tx,
|
||||||
|
account_tx,
|
||||||
|
ledger,
|
||||||
|
account_info,
|
||||||
|
ledger_data,
|
||||||
|
book_offers,
|
||||||
|
ledger_range,
|
||||||
|
ledger_entry,
|
||||||
|
server_info
|
||||||
|
};
|
||||||
|
std::unordered_map<std::string, RPCCommand> commandMap{
|
||||||
|
{"tx", tx},
|
||||||
|
{"account_tx", account_tx},
|
||||||
|
{"ledger", ledger},
|
||||||
|
{"ledger_range", ledger_range},
|
||||||
|
{"ledger_entry", ledger_entry},
|
||||||
|
{"account_info", account_info},
|
||||||
|
{"ledger_data", ledger_data},
|
||||||
|
{"book_offers", book_offers},
|
||||||
|
{"server_info", server_info}};
|
||||||
|
|
||||||
|
boost::json::object
|
||||||
|
doAccountInfo(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doTx(boost::json::object const& request, BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doAccountTx(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doLedgerData(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doLedgerEntry(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doBookOffers(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doLedger(boost::json::object const& request, BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doLedgerRange(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
boost::json::object
|
||||||
|
doServerInfo(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend);
|
||||||
|
|
||||||
|
std::pair<boost::json::object, uint32_t>
|
||||||
|
buildResponse(
|
||||||
|
boost::json::object const& request,
|
||||||
|
BackendInterface const& backend)
|
||||||
|
{
|
||||||
|
std::string command = request.at("command").as_string().c_str();
|
||||||
|
BOOST_LOG_TRIVIAL(info) << "Received rpc command : " << request;
|
||||||
|
boost::json::object response;
|
||||||
|
switch (commandMap[command])
|
||||||
|
{
|
||||||
|
case tx:
|
||||||
|
return {doTx(request, backend), 1};
|
||||||
|
break;
|
||||||
|
case account_tx: {
|
||||||
|
auto res = doAccountTx(request, backend);
|
||||||
|
if (res.contains("transactions"))
|
||||||
|
return {res, res["transactions"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case ledger: {
|
||||||
|
auto res = doLedger(request, backend);
|
||||||
|
if (res.contains("transactions"))
|
||||||
|
return {res, res["transactions"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case ledger_entry:
|
||||||
|
return {doLedgerEntry(request, backend), 1};
|
||||||
|
break;
|
||||||
|
case ledger_range:
|
||||||
|
return {doLedgerRange(request, backend), 1};
|
||||||
|
break;
|
||||||
|
case ledger_data: {
|
||||||
|
auto res = doLedgerData(request, backend);
|
||||||
|
if (res.contains("objects"))
|
||||||
|
return {res, res["objects"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case server_info: {
|
||||||
|
return {doServerInfo(request, backend), 1};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case account_info:
|
||||||
|
return {doAccountInfo(request, backend), 1};
|
||||||
|
break;
|
||||||
|
case book_offers: {
|
||||||
|
auto res = doBookOffers(request, backend);
|
||||||
|
if (res.contains("offers"))
|
||||||
|
return {res, res["offers"].as_array().size()};
|
||||||
|
return {res, 1};
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BOOST_LOG_TRIVIAL(error) << "Unknown command: " << command;
|
||||||
|
}
|
||||||
|
return {response, 1};
|
||||||
|
}
|
||||||
|
// Report a failure
|
||||||
|
void
|
||||||
|
fail(boost::beast::error_code ec, char const* what)
|
||||||
|
{
|
||||||
|
std::cerr << what << ": " << ec.message() << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Echoes back all received WebSocket messages
|
||||||
|
class session : public std::enable_shared_from_this<session>
|
||||||
|
{
|
||||||
|
boost::beast::websocket::stream<boost::beast::tcp_stream> ws_;
|
||||||
|
boost::beast::flat_buffer buffer_;
|
||||||
|
std::string response_;
|
||||||
|
BackendInterface const& backend_;
|
||||||
|
DOSGuard& dosGuard_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Take ownership of the socket
|
||||||
|
explicit session(
|
||||||
|
boost::asio::ip::tcp::socket&& socket,
|
||||||
|
BackendInterface const& backend,
|
||||||
|
DOSGuard& dosGuard)
|
||||||
|
: ws_(std::move(socket)), backend_(backend), dosGuard_(dosGuard)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get on the correct executor
|
||||||
|
void
|
||||||
|
run()
|
||||||
|
{
|
||||||
|
// We need to be executing within a strand to perform async
|
||||||
|
// operations on the I/O objects in this session. Although not
|
||||||
|
// strictly necessary for single-threaded contexts, this example
|
||||||
|
// code is written to be thread-safe by default.
|
||||||
|
boost::asio::dispatch(
|
||||||
|
ws_.get_executor(),
|
||||||
|
boost::beast::bind_front_handler(
|
||||||
|
&session::on_run, shared_from_this()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the asynchronous operation
|
||||||
|
void
|
||||||
|
on_run()
|
||||||
|
{
|
||||||
|
// Set suggested timeout settings for the websocket
|
||||||
|
ws_.set_option(boost::beast::websocket::stream_base::timeout::suggested(
|
||||||
|
boost::beast::role_type::server));
|
||||||
|
|
||||||
|
// Set a decorator to change the Server of the handshake
|
||||||
|
ws_.set_option(boost::beast::websocket::stream_base::decorator(
|
||||||
|
[](boost::beast::websocket::response_type& res) {
|
||||||
|
res.set(
|
||||||
|
boost::beast::http::field::server,
|
||||||
|
std::string(BOOST_BEAST_VERSION_STRING) +
|
||||||
|
" websocket-server-async");
|
||||||
|
}));
|
||||||
|
// Accept the websocket handshake
|
||||||
|
ws_.async_accept(boost::beast::bind_front_handler(
|
||||||
|
&session::on_accept, shared_from_this()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
on_accept(boost::beast::error_code ec)
|
||||||
|
{
|
||||||
|
if (ec)
|
||||||
|
return fail(ec, "accept");
|
||||||
|
|
||||||
|
// Read a message
|
||||||
|
do_read();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
do_read()
|
||||||
|
{
|
||||||
|
// Read a message into our buffer
|
||||||
|
ws_.async_read(
|
||||||
|
buffer_,
|
||||||
|
boost::beast::bind_front_handler(
|
||||||
|
&session::on_read, shared_from_this()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
on_read(boost::beast::error_code ec, std::size_t bytes_transferred)
|
||||||
|
{
|
||||||
|
boost::ignore_unused(bytes_transferred);
|
||||||
|
|
||||||
|
// This indicates that the session was closed
|
||||||
|
if (ec == boost::beast::websocket::error::closed)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (ec)
|
||||||
|
fail(ec, "read");
|
||||||
|
std::string msg{
|
||||||
|
static_cast<char const*>(buffer_.data().data()), buffer_.size()};
|
||||||
|
// BOOST_LOG_TRIVIAL(debug) << __func__ << msg;
|
||||||
|
boost::json::object response;
|
||||||
|
auto ip =
|
||||||
|
ws_.next_layer().socket().remote_endpoint().address().to_string();
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " received request from ip = " << ip;
|
||||||
|
if (!dosGuard_.isOk(ip))
|
||||||
|
response["error"] = "Too many requests. Slow down";
|
||||||
|
else
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
boost::json::value raw = boost::json::parse(msg);
|
||||||
|
boost::json::object request = raw.as_object();
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << " received request : " << request;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto start = std::chrono::system_clock::now();
|
||||||
|
auto [res, cost] = buildResponse(request, backend_);
|
||||||
|
response = std::move(res);
|
||||||
|
if (!dosGuard_.add(ip, cost))
|
||||||
|
{
|
||||||
|
response["warning"] = "Too many requests";
|
||||||
|
}
|
||||||
|
|
||||||
|
auto end = std::chrono::system_clock::now();
|
||||||
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
<< __func__ << " RPC call took "
|
||||||
|
<< ((end - start).count() / 1000000000.0)
|
||||||
|
<< " . request = " << request;
|
||||||
|
}
|
||||||
|
catch (Backend::DatabaseTimeout const& t)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error) << __func__ << " Database timeout";
|
||||||
|
response["error"] =
|
||||||
|
"Database read timeout. Please retry the request";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(error)
|
||||||
|
<< __func__ << "caught exception : " << e.what();
|
||||||
|
response["error"] = "Unknown exception";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(trace) << __func__ << response;
|
||||||
|
response_ = boost::json::serialize(response);
|
||||||
|
|
||||||
|
// Echo the message
|
||||||
|
ws_.text(ws_.got_text());
|
||||||
|
ws_.async_write(
|
||||||
|
boost::asio::buffer(response_),
|
||||||
|
boost::beast::bind_front_handler(
|
||||||
|
&session::on_write, shared_from_this()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
on_write(boost::beast::error_code ec, std::size_t bytes_transferred)
|
||||||
|
{
|
||||||
|
boost::ignore_unused(bytes_transferred);
|
||||||
|
|
||||||
|
if (ec)
|
||||||
|
return fail(ec, "write");
|
||||||
|
|
||||||
|
// Clear the buffer
|
||||||
|
buffer_.consume(buffer_.size());
|
||||||
|
|
||||||
|
// Do another read
|
||||||
|
do_read();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// Accepts incoming connections and launches the sessions
|
||||||
|
class listener : public std::enable_shared_from_this<listener>
|
||||||
|
{
|
||||||
|
boost::asio::io_context& ioc_;
|
||||||
|
boost::asio::ip::tcp::acceptor acceptor_;
|
||||||
|
BackendInterface const& backend_;
|
||||||
|
DOSGuard& dosGuard_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
listener(
|
||||||
|
boost::asio::io_context& ioc,
|
||||||
|
boost::asio::ip::tcp::endpoint endpoint,
|
||||||
|
BackendInterface const& backend,
|
||||||
|
DOSGuard& dosGuard)
|
||||||
|
: ioc_(ioc), acceptor_(ioc), backend_(backend), dosGuard_(dosGuard)
|
||||||
|
{
|
||||||
|
boost::beast::error_code ec;
|
||||||
|
|
||||||
|
// Open the acceptor
|
||||||
|
acceptor_.open(endpoint.protocol(), ec);
|
||||||
|
if (ec)
|
||||||
|
{
|
||||||
|
fail(ec, "open");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow address reuse
|
||||||
|
acceptor_.set_option(boost::asio::socket_base::reuse_address(true), ec);
|
||||||
|
if (ec)
|
||||||
|
{
|
||||||
|
fail(ec, "set_option");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind to the server address
|
||||||
|
acceptor_.bind(endpoint, ec);
|
||||||
|
if (ec)
|
||||||
|
{
|
||||||
|
fail(ec, "bind");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start listening for connections
|
||||||
|
acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
|
||||||
|
if (ec)
|
||||||
|
{
|
||||||
|
fail(ec, "listen");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start accepting incoming connections
|
||||||
|
void
|
||||||
|
run()
|
||||||
|
{
|
||||||
|
do_accept();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void
|
||||||
|
do_accept()
|
||||||
|
{
|
||||||
|
// The new connection gets its own strand
|
||||||
|
acceptor_.async_accept(
|
||||||
|
boost::asio::make_strand(ioc_),
|
||||||
|
boost::beast::bind_front_handler(
|
||||||
|
&listener::on_accept, shared_from_this()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
on_accept(boost::beast::error_code ec, boost::asio::ip::tcp::socket socket)
|
||||||
|
{
|
||||||
|
if (ec)
|
||||||
|
{
|
||||||
|
fail(ec, "accept");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Create the session and run it
|
||||||
|
std::make_shared<session>(std::move(socket), backend_, dosGuard_)
|
||||||
|
->run();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept another connection
|
||||||
|
do_accept();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
>>>>>>> dev:server/websocket_server_async.cpp
|
||||||
|
std::optional<boost::json::object>
|
||||||
|
parse_config(const char* filename)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
std::ifstream in(filename, std::ios::in | std::ios::binary);
|
||||||
|
if (in)
|
||||||
|
{
|
||||||
|
std::stringstream contents;
|
||||||
|
contents << in.rdbuf();
|
||||||
|
in.close();
|
||||||
|
std::cout << contents.str() << std::endl;
|
||||||
|
boost::json::value value = boost::json::parse(contents.str());
|
||||||
|
return value.as_object();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
std::cout << e.what() << std::endl;
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
//
|
||||||
|
void
|
||||||
|
initLogLevel(int level)
|
||||||
|
{
|
||||||
|
switch (level)
|
||||||
|
{
|
||||||
|
case 0:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::trace);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::debug);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::info);
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::warning);
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::error);
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::fatal);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
||||||
|
{
|
||||||
|
std::vector<std::thread> v;
|
||||||
|
v.reserve(numThreads - 1);
|
||||||
|
for (auto i = numThreads - 1; i > 0; --i)
|
||||||
|
v.emplace_back([&ioc] { ioc.run(); });
|
||||||
|
|
||||||
|
ioc.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
main(int argc, char* argv[])
|
||||||
|
{
|
||||||
|
// Check command line arguments.
|
||||||
|
if (argc != 5 and argc != 6)
|
||||||
|
{
|
||||||
|
std::cerr
|
||||||
|
<< "Usage: websocket-server-async <address> <port> <threads> "
|
||||||
|
"<config_file> <log level> \n"
|
||||||
|
<< "Example:\n"
|
||||||
|
<< " websocket-server-async 0.0.0.0 8080 1 config.json 2\n";
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
auto const address = boost::asio::ip::make_address(argv[1]);
|
||||||
|
auto const port = static_cast<unsigned short>(std::atoi(argv[2]));
|
||||||
|
auto const threads = std::max<int>(1, std::atoi(argv[3]));
|
||||||
|
auto const config = parse_config(argv[4]);
|
||||||
|
if (argc > 5)
|
||||||
|
{
|
||||||
|
initLogLevel(std::atoi(argv[5]));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
initLogLevel(2);
|
||||||
|
}
|
||||||
|
if (!config)
|
||||||
|
{
|
||||||
|
std::cerr << "couldnt parse config. Exiting..." << std::endl;
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The io_context is required for all I/O
|
||||||
|
boost::asio::io_context ioc{threads};
|
||||||
|
<<<<<<< HEAD:websocket_server_async.cpp
|
||||||
|
|
||||||
|
std::shared_ptr<BackendInterface> backend{Backend::make_Backend(*config)};
|
||||||
|
|
||||||
|
std::shared_ptr<SubscriptionManager> subscriptions{
|
||||||
|
SubscriptionManager::make_SubscriptionManager()};
|
||||||
|
=======
|
||||||
|
ReportingETL etl{config.value(), ioc};
|
||||||
|
DOSGuard dosGuard{config.value(), ioc};
|
||||||
|
>>>>>>> dev:server/websocket_server_async.cpp
|
||||||
|
|
||||||
|
std::shared_ptr<NetworkValidatedLedgers> ledgers{
|
||||||
|
NetworkValidatedLedgers::make_ValidatedLedgers()};
|
||||||
|
|
||||||
|
std::shared_ptr<ETLLoadBalancer> balancer{
|
||||||
|
ETLLoadBalancer::make_ETLLoadBalancer(
|
||||||
|
*config,
|
||||||
|
ioc,
|
||||||
|
<<<<<<< HEAD:websocket_server_async.cpp
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
ledgers)};
|
||||||
|
=======
|
||||||
|
boost::asio::ip::tcp::endpoint{address, port},
|
||||||
|
etl.getFlatMapBackend(),
|
||||||
|
dosGuard)
|
||||||
|
->run();
|
||||||
|
>>>>>>> dev:server/websocket_server_async.cpp
|
||||||
|
|
||||||
|
std::shared_ptr<ReportingETL> etl{ReportingETL::make_ReportingETL(
|
||||||
|
*config, ioc, backend, subscriptions, balancer, ledgers)};
|
||||||
|
|
||||||
|
listener::make_listener(
|
||||||
|
ioc,
|
||||||
|
boost::asio::ip::tcp::endpoint{address, port},
|
||||||
|
backend,
|
||||||
|
subscriptions,
|
||||||
|
balancer);
|
||||||
|
|
||||||
|
// Blocks until stopped.
|
||||||
|
// When stopped, shared_ptrs fall out of scope
|
||||||
|
// Calls destructors on all resources, and destructs in order
|
||||||
|
start(ioc, threads);
|
||||||
|
|
||||||
|
return EXIT_SUCCESS;
|
||||||
|
}
|
||||||
20
test.py
20
test.py
@@ -436,9 +436,12 @@ async def ledger_data(ip, port, ledger, limit, binary, cursor):
|
|||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
try:
|
try:
|
||||||
async with websockets.connect(address) as ws:
|
async with websockets.connect(address) as ws:
|
||||||
|
if limit is not None:
|
||||||
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"limit":int(limit),"cursor":cursor}))
|
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"limit":int(limit),"cursor":cursor}))
|
||||||
|
else:
|
||||||
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"cursor":cursor}))
|
await ws.send(json.dumps({"command":"ledger_data","ledger_index":int(ledger),"binary":bool(binary),"cursor":cursor}))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
|
print(res)
|
||||||
objects = []
|
objects = []
|
||||||
blobs = []
|
blobs = []
|
||||||
keys = []
|
keys = []
|
||||||
@@ -598,7 +601,7 @@ async def book_offers(ip, port, ledger, pay_currency, pay_issuer, get_currency,
|
|||||||
req["cursor"] = cursor
|
req["cursor"] = cursor
|
||||||
await ws.send(json.dumps(req))
|
await ws.send(json.dumps(req))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
#print(json.dumps(res,indent=4,sort_keys=True))
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
if "result" in res:
|
if "result" in res:
|
||||||
res = res["result"]
|
res = res["result"]
|
||||||
for x in res["offers"]:
|
for x in res["offers"]:
|
||||||
@@ -729,7 +732,7 @@ async def ledger(ip, port, ledger, binary, transactions, expand):
|
|||||||
async with websockets.connect(address,max_size=1000000000) as ws:
|
async with websockets.connect(address,max_size=1000000000) as ws:
|
||||||
await ws.send(json.dumps({"command":"ledger","ledger_index":int(ledger),"binary":bool(binary), "transactions":bool(transactions),"expand":bool(expand)}))
|
await ws.send(json.dumps({"command":"ledger","ledger_index":int(ledger),"binary":bool(binary), "transactions":bool(transactions),"expand":bool(expand)}))
|
||||||
res = json.loads(await ws.recv())
|
res = json.loads(await ws.recv())
|
||||||
#print(json.dumps(res,indent=4,sort_keys=True))
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
@@ -764,6 +767,15 @@ async def fee(ip, port):
|
|||||||
print(json.dumps(res,indent=4,sort_keys=True))
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
except websockets.exceptions.connectionclosederror as e:
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
async def server_info(ip, port):
|
||||||
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
|
try:
|
||||||
|
async with websockets.connect(address) as ws:
|
||||||
|
await ws.send(json.dumps({"command":"server_info"}))
|
||||||
|
res = json.loads(await ws.recv())
|
||||||
|
print(json.dumps(res,indent=4,sort_keys=True))
|
||||||
|
except websockets.exceptions.connectionclosederror as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
async def ledger_diff(ip, port, base, desired, includeBlobs):
|
async def ledger_diff(ip, port, base, desired, includeBlobs):
|
||||||
address = 'ws://' + str(ip) + ':' + str(port)
|
address = 'ws://' + str(ip) + ':' + str(port)
|
||||||
@@ -785,7 +797,7 @@ async def perf(ip, port):
|
|||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
parser = argparse.ArgumentParser(description='test script for xrpl-reporting')
|
||||||
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_offerses","ledger_diff","perf","fee"])
|
parser.add_argument('action', choices=["account_info", "tx", "txs","account_tx", "account_tx_full","ledger_data", "ledger_data_full", "book_offers","ledger","ledger_range","ledger_entry", "ledgers", "ledger_entries","account_txs","account_infos","account_txs_full","book_offerses","ledger_diff","perf","fee","server_info"])
|
||||||
|
|
||||||
parser.add_argument('--ip', default='127.0.0.1')
|
parser.add_argument('--ip', default='127.0.0.1')
|
||||||
parser.add_argument('--port', default='8080')
|
parser.add_argument('--port', default='8080')
|
||||||
@@ -828,6 +840,8 @@ def run(args):
|
|||||||
args.ledger = asyncio.get_event_loop().run_until_complete(ledger_range(args.ip, args.port))[1]
|
args.ledger = asyncio.get_event_loop().run_until_complete(ledger_range(args.ip, args.port))[1]
|
||||||
if args.action == "fee":
|
if args.action == "fee":
|
||||||
asyncio.get_event_loop().run_until_complete(fee(args.ip, args.port))
|
asyncio.get_event_loop().run_until_complete(fee(args.ip, args.port))
|
||||||
|
elif args.action == "server_info":
|
||||||
|
asyncio.get_event_loop().run_until_complete(server_info(args.ip, args.port))
|
||||||
elif args.action == "perf":
|
elif args.action == "perf":
|
||||||
asyncio.get_event_loop().run_until_complete(
|
asyncio.get_event_loop().run_until_complete(
|
||||||
perf(args.ip,args.port))
|
perf(args.ip,args.port))
|
||||||
|
|||||||
727
unittests/main.cpp
Normal file
727
unittests/main.cpp
Normal file
@@ -0,0 +1,727 @@
|
|||||||
|
#include <algorithm>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <handlers/RPCHelpers.h>
|
||||||
|
#include <reporting/DBHelpers.h>
|
||||||
|
|
||||||
|
#include <boost/log/core.hpp>
|
||||||
|
#include <boost/log/expressions.hpp>
|
||||||
|
#include <boost/log/trivial.hpp>
|
||||||
|
#include <reporting/BackendFactory.h>
|
||||||
|
#include <reporting/BackendInterface.h>
|
||||||
|
|
||||||
|
// Demonstrate some basic assertions.
|
||||||
|
TEST(BackendTest, Basic)
|
||||||
|
{
|
||||||
|
boost::log::core::get()->set_filter(
|
||||||
|
boost::log::trivial::severity >= boost::log::trivial::warning);
|
||||||
|
std::string keyspace =
|
||||||
|
"oceand_test_" +
|
||||||
|
std::to_string(
|
||||||
|
std::chrono::system_clock::now().time_since_epoch().count());
|
||||||
|
boost::json::object cassandraConfig{
|
||||||
|
{"database",
|
||||||
|
{{"type", "cassandra"},
|
||||||
|
{"cassandra",
|
||||||
|
{{"contact_points", "127.0.0.1"},
|
||||||
|
{"port", 9042},
|
||||||
|
{"keyspace", keyspace.c_str()},
|
||||||
|
{"replication_factor", 1},
|
||||||
|
{"table_prefix", ""},
|
||||||
|
{"max_requests_outstanding", 1000},
|
||||||
|
{"indexer_key_shift", 2},
|
||||||
|
{"threads", 8}}}}}};
|
||||||
|
boost::json::object postgresConfig{
|
||||||
|
{"database",
|
||||||
|
{{"type", "postgres"},
|
||||||
|
{"postgres",
|
||||||
|
{{"contact_point", "127.0.0.1"},
|
||||||
|
{"username", "postgres"},
|
||||||
|
{"database", keyspace.c_str()},
|
||||||
|
{"password", "postgres"},
|
||||||
|
{"indexer_key_shift", 2},
|
||||||
|
{"threads", 8}}}}}};
|
||||||
|
std::vector<boost::json::object> configs = {
|
||||||
|
cassandraConfig, postgresConfig};
|
||||||
|
for (auto& config : configs)
|
||||||
|
{
|
||||||
|
std::cout << keyspace << std::endl;
|
||||||
|
auto backend = Backend::makeBackend(config);
|
||||||
|
backend->open(false);
|
||||||
|
|
||||||
|
std::string rawHeader =
|
||||||
|
"03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335BC54351E"
|
||||||
|
"DD73"
|
||||||
|
"3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2315A6DB6"
|
||||||
|
"FE30"
|
||||||
|
"CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF53E2232B3"
|
||||||
|
"3EF5"
|
||||||
|
"7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5AA29652EF"
|
||||||
|
"FD80"
|
||||||
|
"AC59CD91416E4E13DBBE";
|
||||||
|
|
||||||
|
auto hexStringToBinaryString = [](auto const& hex) {
|
||||||
|
auto blob = ripple::strUnHex(hex);
|
||||||
|
std::string strBlob;
|
||||||
|
for (auto c : *blob)
|
||||||
|
{
|
||||||
|
strBlob += c;
|
||||||
|
}
|
||||||
|
return strBlob;
|
||||||
|
};
|
||||||
|
auto binaryStringToUint256 = [](auto const& bin) -> ripple::uint256 {
|
||||||
|
ripple::uint256 uint;
|
||||||
|
return uint.fromVoid((void const*)bin.data());
|
||||||
|
};
|
||||||
|
auto ledgerInfoToBinaryString = [](auto const& info) {
|
||||||
|
auto blob = ledgerInfoToBlob(info);
|
||||||
|
std::string strBlob;
|
||||||
|
for (auto c : blob)
|
||||||
|
{
|
||||||
|
strBlob += c;
|
||||||
|
}
|
||||||
|
return strBlob;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::string rawHeaderBlob = hexStringToBinaryString(rawHeader);
|
||||||
|
ripple::LedgerInfo lgrInfo =
|
||||||
|
deserializeHeader(ripple::makeSlice(rawHeaderBlob));
|
||||||
|
|
||||||
|
backend->startWrites();
|
||||||
|
backend->writeLedger(lgrInfo, std::move(rawHeaderBlob), true);
|
||||||
|
ASSERT_TRUE(backend->finishWrites(lgrInfo.seq));
|
||||||
|
{
|
||||||
|
auto rng = backend->fetchLedgerRange();
|
||||||
|
EXPECT_TRUE(rng.has_value());
|
||||||
|
EXPECT_EQ(rng->minSequence, rng->maxSequence);
|
||||||
|
EXPECT_EQ(rng->maxSequence, lgrInfo.seq);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
auto seq = backend->fetchLatestLedgerSequence();
|
||||||
|
EXPECT_TRUE(seq.has_value());
|
||||||
|
EXPECT_EQ(*seq, lgrInfo.seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq);
|
||||||
|
EXPECT_TRUE(retLgr.has_value());
|
||||||
|
EXPECT_EQ(retLgr->seq, lgrInfo.seq);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(lgrInfo), ledgerInfoToBlob(*retLgr));
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPECT_FALSE(
|
||||||
|
backend->fetchLedgerBySequence(lgrInfo.seq + 1).has_value());
|
||||||
|
auto lgrInfoOld = lgrInfo;
|
||||||
|
|
||||||
|
auto lgrInfoNext = lgrInfo;
|
||||||
|
lgrInfoNext.seq = lgrInfo.seq + 1;
|
||||||
|
lgrInfoNext.parentHash = lgrInfo.hash;
|
||||||
|
lgrInfoNext.hash++;
|
||||||
|
lgrInfoNext.accountHash = ~lgrInfo.accountHash;
|
||||||
|
{
|
||||||
|
std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext);
|
||||||
|
|
||||||
|
backend->startWrites();
|
||||||
|
backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob));
|
||||||
|
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
auto rng = backend->fetchLedgerRange();
|
||||||
|
EXPECT_TRUE(rng.has_value());
|
||||||
|
EXPECT_EQ(rng->minSequence, lgrInfoOld.seq);
|
||||||
|
EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
auto seq = backend->fetchLatestLedgerSequence();
|
||||||
|
EXPECT_EQ(seq, lgrInfoNext.seq);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq);
|
||||||
|
EXPECT_TRUE(retLgr.has_value());
|
||||||
|
EXPECT_EQ(retLgr->seq, lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoNext));
|
||||||
|
EXPECT_NE(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoOld));
|
||||||
|
retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 1);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoOld));
|
||||||
|
|
||||||
|
EXPECT_NE(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoNext));
|
||||||
|
retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 2);
|
||||||
|
EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfoNext.seq - 2)
|
||||||
|
.has_value());
|
||||||
|
|
||||||
|
auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(txns.size(), 0);
|
||||||
|
auto hashes =
|
||||||
|
backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(hashes.size(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// the below dummy data is not expected to be consistent. The metadata
|
||||||
|
// string does represent valid metadata. Don't assume though that the
|
||||||
|
// transaction or its hash correspond to the metadata, or anything like
|
||||||
|
// that. These tests are purely binary tests to make sure the same data
|
||||||
|
// that goes in, comes back out
|
||||||
|
std::string metaHex =
|
||||||
|
"201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A29B46"
|
||||||
|
"3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A863300"
|
||||||
|
"000000000000003400000000000000005529983CBAED30F547471452921C3C"
|
||||||
|
"6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7F830"
|
||||||
|
"F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217CB65"
|
||||||
|
"D54B640B31521B05000000000000000000000000434E5900000000000360E3"
|
||||||
|
"E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF91263417762570"
|
||||||
|
"D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B616DCB"
|
||||||
|
"6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC09600"
|
||||||
|
"F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF783681031100"
|
||||||
|
"0000000000000000000000434E59000000000004110360E3E0751BD9A566CD"
|
||||||
|
"03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F80B61"
|
||||||
|
"6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B6686"
|
||||||
|
"0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED5B"
|
||||||
|
"66860B95011100000000000000000000000000000000000000000211000000"
|
||||||
|
"00000000000000000000000000000000000311000000000000000000000000"
|
||||||
|
"434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82BA0E1"
|
||||||
|
"E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9D24B"
|
||||||
|
"D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B616DCB"
|
||||||
|
"6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A6291"
|
||||||
|
"9C2D18000000000000000000000000434E5900000000000360E3E0751BD9A5"
|
||||||
|
"66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220CCB3"
|
||||||
|
"3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588"
|
||||||
|
"C2D55C84718FAFBBD2604AE722000000003100000000000000003200000000"
|
||||||
|
"0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C8471"
|
||||||
|
"8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1E1E5"
|
||||||
|
"1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E1E90"
|
||||||
|
"CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0576A"
|
||||||
|
"CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E72200"
|
||||||
|
"000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF9126"
|
||||||
|
"3417762570D67220CCB33B1370E1E1F1031000";
|
||||||
|
std::string txnHex =
|
||||||
|
"1200072200000000240480FDB920190480FDB5201B03CE1A8964400000033C"
|
||||||
|
"83A95F65D59D9A62919C2D18000000000000000000000000434E5900000000"
|
||||||
|
"000360E3E0751BD9A566CD03FA6CAFC78118B82BA068400000000000000C73"
|
||||||
|
"21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91D685"
|
||||||
|
"E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E9911"
|
||||||
|
"FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC60A0"
|
||||||
|
"9EED153BF60C992930D68F09F981142252F328CF91263417762570D67220CC"
|
||||||
|
"B33B1370";
|
||||||
|
std::string hashHex =
|
||||||
|
"0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22D28BF7";
|
||||||
|
|
||||||
|
// this account is not related to the above transaction and metadata
|
||||||
|
std::string accountHex =
|
||||||
|
"1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD018EFFBE"
|
||||||
|
"17C5"
|
||||||
|
"C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E07811422"
|
||||||
|
"52F3"
|
||||||
|
"28CF91263417762570D67220CCB33B1370";
|
||||||
|
std::string accountIndexHex =
|
||||||
|
"E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C004F06";
|
||||||
|
|
||||||
|
std::string metaBlob = hexStringToBinaryString(metaHex);
|
||||||
|
std::string txnBlob = hexStringToBinaryString(txnHex);
|
||||||
|
std::string hashBlob = hexStringToBinaryString(hashHex);
|
||||||
|
std::string accountBlob = hexStringToBinaryString(accountHex);
|
||||||
|
std::string accountIndexBlob = hexStringToBinaryString(accountIndexHex);
|
||||||
|
std::vector<ripple::AccountID> affectedAccounts;
|
||||||
|
|
||||||
|
{
|
||||||
|
backend->startWrites();
|
||||||
|
lgrInfoNext.seq = lgrInfoNext.seq + 1;
|
||||||
|
lgrInfoNext.txHash = ~lgrInfo.txHash;
|
||||||
|
lgrInfoNext.accountHash =
|
||||||
|
lgrInfoNext.accountHash ^ lgrInfoNext.txHash;
|
||||||
|
lgrInfoNext.parentHash = lgrInfoNext.hash;
|
||||||
|
lgrInfoNext.hash++;
|
||||||
|
|
||||||
|
ripple::uint256 hash256;
|
||||||
|
EXPECT_TRUE(hash256.parseHex(hashHex));
|
||||||
|
ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob};
|
||||||
|
auto journal = ripple::debugLog();
|
||||||
|
auto accountsSet = txMeta.getAffectedAccounts(journal);
|
||||||
|
for (auto& a : accountsSet)
|
||||||
|
{
|
||||||
|
affectedAccounts.push_back(a);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<AccountTransactionsData> accountTxData;
|
||||||
|
accountTxData.emplace_back(txMeta, hash256, journal);
|
||||||
|
backend->writeLedger(
|
||||||
|
lgrInfoNext, std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
||||||
|
backend->writeTransaction(
|
||||||
|
std::move(std::string{hashBlob}),
|
||||||
|
lgrInfoNext.seq,
|
||||||
|
std::move(std::string{txnBlob}),
|
||||||
|
std::move(std::string{metaBlob}));
|
||||||
|
backend->writeAccountTransactions(std::move(accountTxData));
|
||||||
|
backend->writeLedgerObject(
|
||||||
|
std::move(std::string{accountIndexBlob}),
|
||||||
|
lgrInfoNext.seq,
|
||||||
|
std::move(std::string{accountBlob}),
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
{});
|
||||||
|
|
||||||
|
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto rng = backend->fetchLedgerRange();
|
||||||
|
EXPECT_TRUE(rng);
|
||||||
|
EXPECT_EQ(rng->minSequence, lgrInfoOld.seq);
|
||||||
|
EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq);
|
||||||
|
auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq);
|
||||||
|
EXPECT_TRUE(retLgr);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoNext));
|
||||||
|
auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(txns.size(), 1);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)txns[0].transaction.data(),
|
||||||
|
(const char*)txnBlob.data());
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)txns[0].metadata.data(),
|
||||||
|
(const char*)metaBlob.data());
|
||||||
|
auto hashes =
|
||||||
|
backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(hashes.size(), 1);
|
||||||
|
EXPECT_EQ(ripple::strHex(hashes[0]), hashHex);
|
||||||
|
for (auto& a : affectedAccounts)
|
||||||
|
{
|
||||||
|
auto accountTxns = backend->fetchAccountTransactions(a, 100);
|
||||||
|
EXPECT_EQ(accountTxns.first.size(), 1);
|
||||||
|
EXPECT_EQ(accountTxns.first[0], txns[0]);
|
||||||
|
EXPECT_FALSE(accountTxns.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
ripple::uint256 key256;
|
||||||
|
EXPECT_TRUE(key256.parseHex(accountIndexHex));
|
||||||
|
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq);
|
||||||
|
EXPECT_TRUE(obj);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj->data(), (const char*)accountBlob.data());
|
||||||
|
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1);
|
||||||
|
EXPECT_TRUE(obj);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj->data(), (const char*)accountBlob.data());
|
||||||
|
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1);
|
||||||
|
EXPECT_FALSE(obj);
|
||||||
|
}
|
||||||
|
// obtain a time-based seed:
|
||||||
|
unsigned seed =
|
||||||
|
std::chrono::system_clock::now().time_since_epoch().count();
|
||||||
|
std::string accountBlobOld = accountBlob;
|
||||||
|
{
|
||||||
|
backend->startWrites();
|
||||||
|
lgrInfoNext.seq = lgrInfoNext.seq + 1;
|
||||||
|
lgrInfoNext.parentHash = lgrInfoNext.hash;
|
||||||
|
lgrInfoNext.hash++;
|
||||||
|
lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash;
|
||||||
|
lgrInfoNext.accountHash =
|
||||||
|
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
|
||||||
|
|
||||||
|
backend->writeLedger(
|
||||||
|
lgrInfoNext, std::move(ledgerInfoToBinaryString(lgrInfoNext)));
|
||||||
|
std::shuffle(
|
||||||
|
accountBlob.begin(),
|
||||||
|
accountBlob.end(),
|
||||||
|
std::default_random_engine(seed));
|
||||||
|
backend->writeLedgerObject(
|
||||||
|
std::move(std::string{accountIndexBlob}),
|
||||||
|
lgrInfoNext.seq,
|
||||||
|
std::move(std::string{accountBlob}),
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
{});
|
||||||
|
|
||||||
|
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
auto rng = backend->fetchLedgerRange();
|
||||||
|
EXPECT_TRUE(rng);
|
||||||
|
EXPECT_EQ(rng->minSequence, lgrInfoOld.seq);
|
||||||
|
EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq);
|
||||||
|
auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq);
|
||||||
|
EXPECT_TRUE(retLgr);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfoNext));
|
||||||
|
auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq);
|
||||||
|
EXPECT_EQ(txns.size(), 0);
|
||||||
|
|
||||||
|
ripple::uint256 key256;
|
||||||
|
EXPECT_TRUE(key256.parseHex(accountIndexHex));
|
||||||
|
auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq);
|
||||||
|
EXPECT_TRUE(obj);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj->data(), (const char*)accountBlob.data());
|
||||||
|
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1);
|
||||||
|
EXPECT_TRUE(obj);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj->data(), (const char*)accountBlob.data());
|
||||||
|
obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1);
|
||||||
|
EXPECT_TRUE(obj);
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj->data(), (const char*)accountBlobOld.data());
|
||||||
|
obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1);
|
||||||
|
EXPECT_FALSE(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto generateObjects = [seed](
|
||||||
|
size_t numObjects, uint32_t ledgerSequence) {
|
||||||
|
std::vector<std::pair<std::string, std::string>> res{numObjects};
|
||||||
|
ripple::uint256 key;
|
||||||
|
key = ledgerSequence * 100000;
|
||||||
|
|
||||||
|
for (auto& blob : res)
|
||||||
|
{
|
||||||
|
++key;
|
||||||
|
std::string keyStr{(const char*)key.data(), key.size()};
|
||||||
|
blob.first = keyStr;
|
||||||
|
blob.second = std::to_string(ledgerSequence) + keyStr;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
};
|
||||||
|
auto updateObjects = [](uint32_t ledgerSequence, auto objs) {
|
||||||
|
for (auto& [key, obj] : objs)
|
||||||
|
{
|
||||||
|
obj = std::to_string(ledgerSequence) + obj;
|
||||||
|
}
|
||||||
|
return objs;
|
||||||
|
};
|
||||||
|
auto generateTxns = [seed](size_t numTxns, uint32_t ledgerSequence) {
|
||||||
|
std::vector<std::tuple<std::string, std::string, std::string>> res{
|
||||||
|
numTxns};
|
||||||
|
ripple::uint256 base;
|
||||||
|
base = ledgerSequence * 100000;
|
||||||
|
for (auto& blob : res)
|
||||||
|
{
|
||||||
|
++base;
|
||||||
|
std::string hashStr{(const char*)base.data(), base.size()};
|
||||||
|
std::string txnStr =
|
||||||
|
"tx" + std::to_string(ledgerSequence) + hashStr;
|
||||||
|
std::string metaStr =
|
||||||
|
"meta" + std::to_string(ledgerSequence) + hashStr;
|
||||||
|
blob = std::make_tuple(hashStr, txnStr, metaStr);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
};
|
||||||
|
auto generateAccounts = [](uint32_t ledgerSequence,
|
||||||
|
uint32_t numAccounts) {
|
||||||
|
std::vector<ripple::AccountID> accounts;
|
||||||
|
ripple::AccountID base;
|
||||||
|
base = ledgerSequence * 998765;
|
||||||
|
for (size_t i = 0; i < numAccounts; ++i)
|
||||||
|
{
|
||||||
|
++base;
|
||||||
|
accounts.push_back(base);
|
||||||
|
}
|
||||||
|
return accounts;
|
||||||
|
};
|
||||||
|
auto generateAccountTx = [&](uint32_t ledgerSequence, auto txns) {
|
||||||
|
std::vector<AccountTransactionsData> ret;
|
||||||
|
auto accounts = generateAccounts(ledgerSequence, 10);
|
||||||
|
std::srand(std::time(nullptr));
|
||||||
|
uint32_t idx = 0;
|
||||||
|
for (auto& [hash, txn, meta] : txns)
|
||||||
|
{
|
||||||
|
AccountTransactionsData data;
|
||||||
|
data.ledgerSequence = ledgerSequence;
|
||||||
|
data.transactionIndex = idx;
|
||||||
|
data.txHash = hash;
|
||||||
|
for (size_t i = 0; i < 3; ++i)
|
||||||
|
{
|
||||||
|
data.accounts.insert(
|
||||||
|
accounts[std::rand() % accounts.size()]);
|
||||||
|
}
|
||||||
|
++idx;
|
||||||
|
ret.push_back(data);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto generateNextLedger = [seed](auto lgrInfo) {
|
||||||
|
++lgrInfo.seq;
|
||||||
|
lgrInfo.parentHash = lgrInfo.hash;
|
||||||
|
std::srand(std::time(nullptr));
|
||||||
|
std::shuffle(
|
||||||
|
lgrInfo.txHash.begin(),
|
||||||
|
lgrInfo.txHash.end(),
|
||||||
|
std::default_random_engine(seed));
|
||||||
|
std::shuffle(
|
||||||
|
lgrInfo.accountHash.begin(),
|
||||||
|
lgrInfo.accountHash.end(),
|
||||||
|
std::default_random_engine(seed));
|
||||||
|
std::shuffle(
|
||||||
|
lgrInfo.hash.begin(),
|
||||||
|
lgrInfo.hash.end(),
|
||||||
|
std::default_random_engine(seed));
|
||||||
|
return lgrInfo;
|
||||||
|
};
|
||||||
|
auto writeLedger =
|
||||||
|
[&](auto lgrInfo, auto txns, auto objs, auto accountTx) {
|
||||||
|
std::cout << "writing ledger = " << std::to_string(lgrInfo.seq);
|
||||||
|
backend->startWrites();
|
||||||
|
|
||||||
|
backend->writeLedger(
|
||||||
|
lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo)));
|
||||||
|
for (auto [hash, txn, meta] : txns)
|
||||||
|
{
|
||||||
|
backend->writeTransaction(
|
||||||
|
std::move(hash),
|
||||||
|
lgrInfo.seq,
|
||||||
|
std::move(txn),
|
||||||
|
std::move(meta));
|
||||||
|
}
|
||||||
|
for (auto [key, obj] : objs)
|
||||||
|
{
|
||||||
|
std::optional<ripple::uint256> bookDir;
|
||||||
|
if (isOffer(obj.data()))
|
||||||
|
bookDir = getBook(obj);
|
||||||
|
backend->writeLedgerObject(
|
||||||
|
std::move(key),
|
||||||
|
lgrInfo.seq,
|
||||||
|
std::move(obj),
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
std::move(bookDir));
|
||||||
|
}
|
||||||
|
backend->writeAccountTransactions(std::move(accountTx));
|
||||||
|
|
||||||
|
ASSERT_TRUE(backend->finishWrites(lgrInfo.seq));
|
||||||
|
};
|
||||||
|
|
||||||
|
auto checkLedger = [&](auto lgrInfo,
|
||||||
|
auto txns,
|
||||||
|
auto objs,
|
||||||
|
auto accountTx) {
|
||||||
|
auto rng = backend->fetchLedgerRange();
|
||||||
|
auto seq = lgrInfo.seq;
|
||||||
|
EXPECT_TRUE(rng);
|
||||||
|
EXPECT_EQ(rng->minSequence, lgrInfoOld.seq);
|
||||||
|
EXPECT_GE(rng->maxSequence, seq);
|
||||||
|
auto retLgr = backend->fetchLedgerBySequence(seq);
|
||||||
|
EXPECT_TRUE(retLgr);
|
||||||
|
EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfo));
|
||||||
|
// retLgr = backend->fetchLedgerByHash(lgrInfo.hash);
|
||||||
|
// EXPECT_TRUE(retLgr);
|
||||||
|
// EXPECT_EQ(ledgerInfoToBlob(*retLgr), ledgerInfoToBlob(lgrInfo));
|
||||||
|
auto retTxns = backend->fetchAllTransactionsInLedger(seq);
|
||||||
|
for (auto [hash, txn, meta] : txns)
|
||||||
|
{
|
||||||
|
bool found = false;
|
||||||
|
for (auto [retTxn, retMeta, retSeq] : retTxns)
|
||||||
|
{
|
||||||
|
if (std::strncmp(
|
||||||
|
(const char*)retTxn.data(),
|
||||||
|
(const char*)txn.data(),
|
||||||
|
txn.size()) == 0 &&
|
||||||
|
std::strncmp(
|
||||||
|
(const char*)retMeta.data(),
|
||||||
|
(const char*)meta.data(),
|
||||||
|
meta.size()) == 0)
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
ASSERT_TRUE(found);
|
||||||
|
}
|
||||||
|
for (auto [account, data] : accountTx)
|
||||||
|
{
|
||||||
|
std::vector<Backend::TransactionAndMetadata> retData;
|
||||||
|
std::optional<Backend::AccountTransactionsCursor> cursor;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
uint32_t limit = 10;
|
||||||
|
auto res = backend->fetchAccountTransactions(
|
||||||
|
account, limit, cursor);
|
||||||
|
if (res.second)
|
||||||
|
EXPECT_EQ(res.first.size(), limit);
|
||||||
|
retData.insert(
|
||||||
|
retData.end(), res.first.begin(), res.first.end());
|
||||||
|
cursor = res.second;
|
||||||
|
} while (cursor);
|
||||||
|
EXPECT_EQ(retData.size(), data.size());
|
||||||
|
for (size_t i = 0; i < retData.size(); ++i)
|
||||||
|
{
|
||||||
|
auto [txn, meta, seq] = retData[i];
|
||||||
|
auto [hash, expTxn, expMeta] = data[i];
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)txn.data(), (const char*)expTxn.data());
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)meta.data(), (const char*)expMeta.data());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto [key, obj] : objs)
|
||||||
|
{
|
||||||
|
auto retObj =
|
||||||
|
backend->fetchLedgerObject(binaryStringToUint256(key), seq);
|
||||||
|
if (obj.size())
|
||||||
|
{
|
||||||
|
ASSERT_TRUE(retObj.has_value());
|
||||||
|
EXPECT_STREQ(
|
||||||
|
(const char*)obj.data(), (const char*)retObj->data());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ASSERT_FALSE(retObj.has_value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Backend::LedgerPage page;
|
||||||
|
std::vector<Backend::LedgerObject> retObjs;
|
||||||
|
size_t numLoops = 0;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
uint32_t limit = 10;
|
||||||
|
page = backend->fetchLedgerPage(page.cursor, seq, limit);
|
||||||
|
if (page.cursor)
|
||||||
|
EXPECT_EQ(page.objects.size(), limit);
|
||||||
|
retObjs.insert(
|
||||||
|
retObjs.end(), page.objects.begin(), page.objects.end());
|
||||||
|
++numLoops;
|
||||||
|
ASSERT_FALSE(page.warning.has_value());
|
||||||
|
} while (page.cursor);
|
||||||
|
for (auto obj : objs)
|
||||||
|
{
|
||||||
|
bool found = false;
|
||||||
|
bool correct = false;
|
||||||
|
for (auto retObj : retObjs)
|
||||||
|
{
|
||||||
|
if (ripple::strHex(obj.first) == ripple::strHex(retObj.key))
|
||||||
|
{
|
||||||
|
found = true;
|
||||||
|
ASSERT_EQ(
|
||||||
|
ripple::strHex(obj.second),
|
||||||
|
ripple::strHex(retObj.blob));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT_EQ(found, obj.second.size() != 0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::map<uint32_t, std::vector<std::pair<std::string, std::string>>>
|
||||||
|
state;
|
||||||
|
std::map<
|
||||||
|
uint32_t,
|
||||||
|
std::vector<std::tuple<std::string, std::string, std::string>>>
|
||||||
|
allTxns;
|
||||||
|
std::unordered_map<std::string, std::pair<std::string, std::string>>
|
||||||
|
allTxnsMap;
|
||||||
|
std::
|
||||||
|
map<uint32_t, std::map<ripple::AccountID, std::vector<std::string>>>
|
||||||
|
allAccountTx;
|
||||||
|
std::map<uint32_t, ripple::LedgerInfo> lgrInfos;
|
||||||
|
for (size_t i = 0; i < 10; ++i)
|
||||||
|
{
|
||||||
|
lgrInfoNext = generateNextLedger(lgrInfoNext);
|
||||||
|
auto objs = generateObjects(25, lgrInfoNext.seq);
|
||||||
|
auto txns = generateTxns(10, lgrInfoNext.seq);
|
||||||
|
auto accountTx = generateAccountTx(lgrInfoNext.seq, txns);
|
||||||
|
for (auto rec : accountTx)
|
||||||
|
{
|
||||||
|
for (auto account : rec.accounts)
|
||||||
|
{
|
||||||
|
allAccountTx[lgrInfoNext.seq][account].push_back(
|
||||||
|
std::string{
|
||||||
|
(const char*)rec.txHash.data(), rec.txHash.size()});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPECT_EQ(objs.size(), 25);
|
||||||
|
EXPECT_NE(objs[0], objs[1]);
|
||||||
|
EXPECT_EQ(txns.size(), 10);
|
||||||
|
EXPECT_NE(txns[0], txns[1]);
|
||||||
|
writeLedger(lgrInfoNext, txns, objs, accountTx);
|
||||||
|
state[lgrInfoNext.seq] = objs;
|
||||||
|
allTxns[lgrInfoNext.seq] = txns;
|
||||||
|
lgrInfos[lgrInfoNext.seq] = lgrInfoNext;
|
||||||
|
for (auto& [hash, txn, meta] : txns)
|
||||||
|
{
|
||||||
|
allTxnsMap[hash] = std::make_pair(txn, meta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::pair<std::string, std::string>> objs;
|
||||||
|
for (size_t i = 0; i < 10; ++i)
|
||||||
|
{
|
||||||
|
lgrInfoNext = generateNextLedger(lgrInfoNext);
|
||||||
|
if (!objs.size())
|
||||||
|
objs = generateObjects(25, lgrInfoNext.seq);
|
||||||
|
else
|
||||||
|
objs = updateObjects(lgrInfoNext.seq, objs);
|
||||||
|
auto txns = generateTxns(10, lgrInfoNext.seq);
|
||||||
|
auto accountTx = generateAccountTx(lgrInfoNext.seq, txns);
|
||||||
|
for (auto rec : accountTx)
|
||||||
|
{
|
||||||
|
for (auto account : rec.accounts)
|
||||||
|
{
|
||||||
|
allAccountTx[lgrInfoNext.seq][account].push_back(
|
||||||
|
std::string{
|
||||||
|
(const char*)rec.txHash.data(), rec.txHash.size()});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPECT_EQ(objs.size(), 25);
|
||||||
|
EXPECT_NE(objs[0], objs[1]);
|
||||||
|
EXPECT_EQ(txns.size(), 10);
|
||||||
|
EXPECT_NE(txns[0], txns[1]);
|
||||||
|
writeLedger(lgrInfoNext, txns, objs, accountTx);
|
||||||
|
state[lgrInfoNext.seq] = objs;
|
||||||
|
allTxns[lgrInfoNext.seq] = txns;
|
||||||
|
lgrInfos[lgrInfoNext.seq] = lgrInfoNext;
|
||||||
|
for (auto& [hash, txn, meta] : txns)
|
||||||
|
{
|
||||||
|
allTxnsMap[hash] = std::make_pair(txn, meta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::cout << "WROTE ALL OBJECTS" << std::endl;
|
||||||
|
auto flatten = [&](uint32_t max) {
|
||||||
|
std::vector<std::pair<std::string, std::string>> flat;
|
||||||
|
std::map<std::string, std::string> objs;
|
||||||
|
for (auto [seq, diff] : state)
|
||||||
|
{
|
||||||
|
for (auto [k, v] : diff)
|
||||||
|
{
|
||||||
|
if (seq > max)
|
||||||
|
{
|
||||||
|
if (objs.count(k) == 0)
|
||||||
|
objs[k] = "";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
objs[k] = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto [key, value] : objs)
|
||||||
|
{
|
||||||
|
flat.push_back(std::make_pair(key, value));
|
||||||
|
}
|
||||||
|
return flat;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto flattenAccountTx = [&](uint32_t max) {
|
||||||
|
std::unordered_map<
|
||||||
|
ripple::AccountID,
|
||||||
|
std::vector<std::tuple<std::string, std::string, std::string>>>
|
||||||
|
accountTx;
|
||||||
|
for (auto [seq, map] : allAccountTx)
|
||||||
|
{
|
||||||
|
if (seq > max)
|
||||||
|
break;
|
||||||
|
for (auto& [account, hashes] : map)
|
||||||
|
{
|
||||||
|
for (auto& hash : hashes)
|
||||||
|
{
|
||||||
|
auto& [txn, meta] = allTxnsMap[hash];
|
||||||
|
accountTx[account].push_back(
|
||||||
|
std::make_tuple(hash, txn, meta));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto& [account, data] : accountTx)
|
||||||
|
std::reverse(data.begin(), data.end());
|
||||||
|
return accountTx;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto [seq, diff] : state)
|
||||||
|
{
|
||||||
|
std::cout << "flatteneing" << std::endl;
|
||||||
|
auto flat = flatten(seq);
|
||||||
|
std::cout << "flattened" << std::endl;
|
||||||
|
checkLedger(
|
||||||
|
lgrInfos[seq], allTxns[seq], flat, flattenAccountTx(seq));
|
||||||
|
std::cout << "checked" << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,187 +0,0 @@
|
|||||||
//
|
|
||||||
// Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
|
|
||||||
//
|
|
||||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
|
||||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
|
||||||
//
|
|
||||||
// Official repository: https://github.com/boostorg/beast
|
|
||||||
//
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// Example: WebSocket server, asynchronous
|
|
||||||
//
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#include <boost/asio/dispatch.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
#include <boost/beast/core.hpp>
|
|
||||||
#include <boost/beast/websocket.hpp>
|
|
||||||
#include <boost/json.hpp>
|
|
||||||
|
|
||||||
#include <boost/log/core.hpp>
|
|
||||||
#include <boost/log/expressions.hpp>
|
|
||||||
#include <boost/log/trivial.hpp>
|
|
||||||
#include <algorithm>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <fstream>
|
|
||||||
#include <functional>
|
|
||||||
#include <iostream>
|
|
||||||
#include <memory>
|
|
||||||
#include <reporting/ReportingETL.h>
|
|
||||||
#include <reporting/BackendFactory.h>
|
|
||||||
#include <reporting/server/session.h>
|
|
||||||
#include <reporting/server/listener.h>
|
|
||||||
#include <sstream>
|
|
||||||
#include <string>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
std::optional<boost::json::object>
|
|
||||||
parse_config(const char* filename)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
std::ifstream in(filename, std::ios::in | std::ios::binary);
|
|
||||||
if (in)
|
|
||||||
{
|
|
||||||
std::stringstream contents;
|
|
||||||
contents << in.rdbuf();
|
|
||||||
in.close();
|
|
||||||
std::cout << contents.str() << std::endl;
|
|
||||||
boost::json::value value = boost::json::parse(contents.str());
|
|
||||||
return value.as_object();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
void
|
|
||||||
initLogLevel(int level)
|
|
||||||
{
|
|
||||||
switch (level)
|
|
||||||
{
|
|
||||||
case 0:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::trace);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::debug);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::warning);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::error);
|
|
||||||
break;
|
|
||||||
case 5:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::fatal);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
boost::log::core::get()->set_filter(
|
|
||||||
boost::log::trivial::severity >= boost::log::trivial::info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
|
||||||
{
|
|
||||||
std::vector<std::thread> v;
|
|
||||||
v.reserve(numThreads - 1);
|
|
||||||
for (auto i = numThreads - 1; i > 0; --i)
|
|
||||||
v.emplace_back([&ioc] { ioc.run(); });
|
|
||||||
|
|
||||||
ioc.run();
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
main(int argc, char* argv[])
|
|
||||||
{
|
|
||||||
// Check command line arguments.
|
|
||||||
if (argc != 5 and argc != 6)
|
|
||||||
{
|
|
||||||
std::cerr
|
|
||||||
<< "Usage: websocket-server-async <address> <port> <threads> "
|
|
||||||
"<config_file> <log level> \n"
|
|
||||||
<< "Example:\n"
|
|
||||||
<< " websocket-server-async 0.0.0.0 8080 1 config.json 2\n";
|
|
||||||
return EXIT_FAILURE;
|
|
||||||
}
|
|
||||||
auto const address = boost::asio::ip::make_address(argv[1]);
|
|
||||||
auto const port = static_cast<unsigned short>(std::atoi(argv[2]));
|
|
||||||
auto const threads = std::max<int>(1, std::atoi(argv[3]));
|
|
||||||
auto const config = parse_config(argv[4]);
|
|
||||||
if (argc > 5)
|
|
||||||
{
|
|
||||||
initLogLevel(std::atoi(argv[5]));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
initLogLevel(2);
|
|
||||||
}
|
|
||||||
if (!config)
|
|
||||||
{
|
|
||||||
std::cerr << "couldnt parse config. Exiting..." << std::endl;
|
|
||||||
return EXIT_FAILURE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The io_context is required for all I/O
|
|
||||||
boost::asio::io_context ioc{threads};
|
|
||||||
|
|
||||||
std::shared_ptr<BackendInterface> backend{
|
|
||||||
Backend::make_Backend(*config)
|
|
||||||
};
|
|
||||||
|
|
||||||
std::shared_ptr<SubscriptionManager> subscriptions{
|
|
||||||
SubscriptionManager::make_SubscriptionManager()
|
|
||||||
};
|
|
||||||
|
|
||||||
std::shared_ptr<NetworkValidatedLedgers> ledgers{
|
|
||||||
NetworkValidatedLedgers::make_ValidatedLedgers()
|
|
||||||
};
|
|
||||||
|
|
||||||
std::shared_ptr<ETLLoadBalancer> balancer{ETLLoadBalancer::make_ETLLoadBalancer(
|
|
||||||
*config,
|
|
||||||
ioc,
|
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
ledgers
|
|
||||||
)};
|
|
||||||
|
|
||||||
std::shared_ptr<ReportingETL> etl{ReportingETL::make_ReportingETL(
|
|
||||||
*config,
|
|
||||||
ioc,
|
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
balancer,
|
|
||||||
ledgers
|
|
||||||
)};
|
|
||||||
|
|
||||||
listener::make_listener(
|
|
||||||
ioc,
|
|
||||||
boost::asio::ip::tcp::endpoint{address, port},
|
|
||||||
backend,
|
|
||||||
subscriptions,
|
|
||||||
balancer
|
|
||||||
);
|
|
||||||
|
|
||||||
// Blocks until stopped.
|
|
||||||
// When stopped, shared_ptrs fall out of scope
|
|
||||||
// Calls destructors on all resources, and destructs in order
|
|
||||||
start(ioc, threads);
|
|
||||||
|
|
||||||
return EXIT_SUCCESS;
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user