mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-23 05:05:54 +00:00
checkpoint
This commit is contained in:
@@ -438,22 +438,27 @@ BackendIndexer::finish(uint32_t ledgerSequence, BackendInterface const& backend)
|
|||||||
bool isFirst = false;
|
bool isFirst = false;
|
||||||
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
|
auto keyIndex = getKeyIndexOfSeq(ledgerSequence);
|
||||||
auto bookIndex = getBookIndexOfSeq(ledgerSequence);
|
auto bookIndex = getBookIndexOfSeq(ledgerSequence);
|
||||||
auto rng = backend.fetchLedgerRangeNoThrow();
|
if (isFirst_)
|
||||||
if (!rng || rng->minSequence == ledgerSequence)
|
|
||||||
{
|
{
|
||||||
isFirst = true;
|
auto rng = backend.fetchLedgerRangeNoThrow();
|
||||||
keyIndex = KeyIndex{ledgerSequence};
|
if (rng && rng->minSequence != ledgerSequence)
|
||||||
bookIndex = BookIndex{ledgerSequence};
|
isFirst_ = false;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
keyIndex = KeyIndex{ledgerSequence};
|
||||||
|
bookIndex = BookIndex{ledgerSequence};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
backend.writeKeys(keys, keyIndex);
|
backend.writeKeys(keys, keyIndex);
|
||||||
backend.writeBooks(books, bookIndex);
|
backend.writeBooks(books, bookIndex);
|
||||||
if (isFirst)
|
if (isFirst_)
|
||||||
{
|
{
|
||||||
// write completion record
|
// write completion record
|
||||||
ripple::uint256 zero = {};
|
ripple::uint256 zero = {};
|
||||||
backend.writeBooks({{zero, {zero}}}, bookIndex);
|
backend.writeBooks({{zero, {zero}}}, bookIndex);
|
||||||
backend.writeKeys({zero}, keyIndex);
|
backend.writeKeys({zero}, keyIndex);
|
||||||
}
|
}
|
||||||
|
isFirst_ = false;
|
||||||
keys = {};
|
keys = {};
|
||||||
books = {};
|
books = {};
|
||||||
BOOST_LOG_TRIVIAL(info)
|
BOOST_LOG_TRIVIAL(info)
|
||||||
|
|||||||
@@ -100,6 +100,8 @@ class BackendIndexer
|
|||||||
std::mutex mtx;
|
std::mutex mtx;
|
||||||
std::condition_variable cv_;
|
std::condition_variable cv_;
|
||||||
|
|
||||||
|
mutable bool isFirst_ = true;
|
||||||
|
|
||||||
void
|
void
|
||||||
addKeyAsync(ripple::uint256 const& key);
|
addKeyAsync(ripple::uint256 const& key);
|
||||||
void
|
void
|
||||||
@@ -200,6 +202,7 @@ class BackendInterface
|
|||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
mutable BackendIndexer indexer_;
|
mutable BackendIndexer indexer_;
|
||||||
|
mutable bool isFirst_ = true;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// read methods
|
// read methods
|
||||||
@@ -245,12 +248,17 @@ public:
|
|||||||
auto commitRes = doFinishWrites();
|
auto commitRes = doFinishWrites();
|
||||||
if (commitRes)
|
if (commitRes)
|
||||||
{
|
{
|
||||||
bool isFirst =
|
if (isFirst_)
|
||||||
fetchLedgerRangeNoThrow()->minSequence == ledgerSequence;
|
{
|
||||||
if (indexer_.isBookFlagLedger(ledgerSequence) || isFirst)
|
auto rng = fetchLedgerRangeNoThrow();
|
||||||
|
if (rng && rng->minSequence != ledgerSequence)
|
||||||
|
isFirst_ = false;
|
||||||
|
}
|
||||||
|
if (indexer_.isBookFlagLedger(ledgerSequence) || isFirst_)
|
||||||
indexer_.writeBookFlagLedgerAsync(ledgerSequence, *this);
|
indexer_.writeBookFlagLedgerAsync(ledgerSequence, *this);
|
||||||
if (indexer_.isKeyFlagLedger(ledgerSequence) || isFirst)
|
if (indexer_.isKeyFlagLedger(ledgerSequence) || isFirst_)
|
||||||
indexer_.writeKeyFlagLedgerAsync(ledgerSequence, *this);
|
indexer_.writeKeyFlagLedgerAsync(ledgerSequence, *this);
|
||||||
|
isFirst_ = false;
|
||||||
}
|
}
|
||||||
return commitRes;
|
return commitRes;
|
||||||
}
|
}
|
||||||
@@ -267,6 +275,7 @@ public:
|
|||||||
std::optional<LedgerRange>
|
std::optional<LedgerRange>
|
||||||
fetchLedgerRangeNoThrow() const
|
fetchLedgerRangeNoThrow() const
|
||||||
{
|
{
|
||||||
|
BOOST_LOG_TRIVIAL(warning) << __func__;
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
|
|||||||
@@ -1527,12 +1527,6 @@ CassandraBackend::open(bool readOnly)
|
|||||||
if (!executeSimpleStatement(query.str()))
|
if (!executeSimpleStatement(query.str()))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
query.str("");
|
|
||||||
query << "SELECT * FROM " << tablePrefix << "objects WHERE sequence=1"
|
|
||||||
<< " LIMIT 1";
|
|
||||||
if (!executeSimpleStatement(query.str()))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
query.str("");
|
query.str("");
|
||||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
||||||
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, "
|
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, "
|
||||||
@@ -1822,12 +1816,13 @@ CassandraBackend::open(bool readOnly)
|
|||||||
<< " is_latest IN (true, false)";
|
<< " is_latest IN (true, false)";
|
||||||
if (!selectLedgerRange_.prepareStatement(query, session_.get()))
|
if (!selectLedgerRange_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
/*
|
||||||
query.str("");
|
query.str("");
|
||||||
query << " SELECT key,object FROM " << tablePrefix
|
query << " SELECT key,object FROM " << tablePrefix
|
||||||
<< "objects WHERE sequence = ?";
|
<< "objects WHERE sequence = ?";
|
||||||
if (!selectLedgerDiff_.prepareStatement(query, session_.get()))
|
if (!selectLedgerDiff_.prepareStatement(query, session_.get()))
|
||||||
continue;
|
continue;
|
||||||
|
*/
|
||||||
setupPreparedStatements = true;
|
setupPreparedStatements = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -841,14 +841,6 @@ public:
|
|||||||
{
|
{
|
||||||
// wait for all other writes to finish
|
// wait for all other writes to finish
|
||||||
sync();
|
sync();
|
||||||
auto rng = fetchLedgerRangeNoThrow();
|
|
||||||
if (rng && rng->maxSequence >= ledgerSequence_)
|
|
||||||
{
|
|
||||||
BOOST_LOG_TRIVIAL(warning)
|
|
||||||
<< __func__ << " Ledger " << std::to_string(ledgerSequence_)
|
|
||||||
<< " already written. Returning";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// write range
|
// write range
|
||||||
if (isFirstLedger_)
|
if (isFirstLedger_)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -790,7 +790,7 @@ CREATE TABLE IF NOT EXISTS objects (
|
|||||||
object bytea
|
object bytea
|
||||||
) PARTITION BY RANGE (ledger_seq);
|
) PARTITION BY RANGE (ledger_seq);
|
||||||
|
|
||||||
CREATE INDEX objects_idx ON objects USING btree(key, ledger_seq);
|
CREATE INDEX objects_idx ON objects USING btree(ledger_seq,key);
|
||||||
|
|
||||||
create table if not exists objects1 partition of objects for values from (0) to (10000000);
|
create table if not exists objects1 partition of objects for values from (0) to (10000000);
|
||||||
create table if not exists objects2 partition of objects for values from (10000000) to (20000000);
|
create table if not exists objects2 partition of objects for values from (10000000) to (20000000);
|
||||||
|
|||||||
@@ -775,12 +775,22 @@ PostgresBackend::doFinishWrites() const
|
|||||||
{
|
{
|
||||||
if (!abortWrite_)
|
if (!abortWrite_)
|
||||||
{
|
{
|
||||||
writeConnection_.bulkInsert("transactions", transactionsBuffer_.str());
|
std::string txStr = transactionsBuffer_.str();
|
||||||
|
writeConnection_.bulkInsert("transactions", txStr);
|
||||||
writeConnection_.bulkInsert(
|
writeConnection_.bulkInsert(
|
||||||
"account_transactions", accountTxBuffer_.str());
|
"account_transactions", accountTxBuffer_.str());
|
||||||
std::string objectsStr = objectsBuffer_.str();
|
std::string objectsStr = objectsBuffer_.str();
|
||||||
if (objectsStr.size())
|
if (objectsStr.size())
|
||||||
writeConnection_.bulkInsert("objects", objectsStr);
|
writeConnection_.bulkInsert("objects", objectsStr);
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " objects size = " << objectsStr.size()
|
||||||
|
<< " txns size = " << txStr.size();
|
||||||
|
std::string keysStr = keysBuffer_.str();
|
||||||
|
if (keysStr.size())
|
||||||
|
writeConnection_.bulkInsert("keys", keysStr);
|
||||||
|
std::string booksStr = booksBuffer_.str();
|
||||||
|
if (booksStr.size())
|
||||||
|
writeConnection_.bulkInsert("books", booksStr);
|
||||||
}
|
}
|
||||||
auto res = writeConnection_("COMMIT");
|
auto res = writeConnection_("COMMIT");
|
||||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||||
@@ -795,6 +805,8 @@ PostgresBackend::doFinishWrites() const
|
|||||||
objectsBuffer_.clear();
|
objectsBuffer_.clear();
|
||||||
booksBuffer_.str("");
|
booksBuffer_.str("");
|
||||||
booksBuffer_.clear();
|
booksBuffer_.clear();
|
||||||
|
keysBuffer_.str("");
|
||||||
|
keysBuffer_.clear();
|
||||||
accountTxBuffer_.str("");
|
accountTxBuffer_.str("");
|
||||||
accountTxBuffer_.clear();
|
accountTxBuffer_.clear();
|
||||||
numRowsInObjectsBuffer_ = 0;
|
numRowsInObjectsBuffer_ = 0;
|
||||||
@@ -806,33 +818,36 @@ PostgresBackend::writeKeys(
|
|||||||
KeyIndex const& index,
|
KeyIndex const& index,
|
||||||
bool isAsync) const
|
bool isAsync) const
|
||||||
{
|
{
|
||||||
|
return true;
|
||||||
|
if (isAsync)
|
||||||
|
return true;
|
||||||
|
if (abortWrite_)
|
||||||
|
return false;
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
pgQuery("BEGIN");
|
PgQuery& conn = isAsync ? pgQuery : writeConnection_;
|
||||||
std::stringstream keysBuffer;
|
if (isAsync)
|
||||||
|
conn("BEGIN");
|
||||||
size_t numRows = 0;
|
size_t numRows = 0;
|
||||||
for (auto& key : keys)
|
for (auto& key : keys)
|
||||||
{
|
{
|
||||||
keysBuffer << std::to_string(index.keyIndex) << '\t' << "\\\\x"
|
keysBuffer_ << std::to_string(index.keyIndex) << '\t' << "\\\\x"
|
||||||
<< ripple::strHex(key) << '\n';
|
<< ripple::strHex(key) << '\n';
|
||||||
numRows++;
|
numRows++;
|
||||||
// If the buffer gets too large, the insert fails. Not sure why.
|
// If the buffer gets too large, the insert fails. Not sure why.
|
||||||
// When writing in the background, we insert after every 10000 rows
|
// When writing in the background, we insert after every 10000 rows
|
||||||
if ((isAsync && numRows == 10000) || numRows == 100000)
|
if ((isAsync && numRows == 10000) || numRows == 100000)
|
||||||
{
|
{
|
||||||
pgQuery.bulkInsert("keys", keysBuffer.str());
|
conn.bulkInsert("keys", keysBuffer_.str());
|
||||||
std::stringstream temp;
|
std::stringstream temp;
|
||||||
keysBuffer.swap(temp);
|
keysBuffer_.swap(temp);
|
||||||
numRows = 0;
|
numRows = 0;
|
||||||
if (isAsync)
|
if (isAsync)
|
||||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (numRows > 0)
|
if (isAsync)
|
||||||
{
|
conn("COMMIT");
|
||||||
pgQuery.bulkInsert("keys", keysBuffer.str());
|
|
||||||
}
|
|
||||||
pgQuery("COMMIT");
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
@@ -843,38 +858,41 @@ PostgresBackend::writeBooks(
|
|||||||
BookIndex const& index,
|
BookIndex const& index,
|
||||||
bool isAsync) const
|
bool isAsync) const
|
||||||
{
|
{
|
||||||
|
return true;
|
||||||
|
if (isAsync)
|
||||||
|
return true;
|
||||||
|
if (abortWrite_)
|
||||||
|
return false;
|
||||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||||
|
|
||||||
PgQuery pgQuery(pgPool_);
|
PgQuery pgQuery(pgPool_);
|
||||||
pgQuery("BEGIN");
|
PgQuery& conn = isAsync ? pgQuery : writeConnection_;
|
||||||
std::stringstream booksBuffer;
|
if (isAsync)
|
||||||
|
conn("BEGIN");
|
||||||
size_t numRows = 0;
|
size_t numRows = 0;
|
||||||
for (auto& book : books)
|
for (auto& book : books)
|
||||||
{
|
{
|
||||||
for (auto& offer : book.second)
|
for (auto& offer : book.second)
|
||||||
{
|
{
|
||||||
booksBuffer << std::to_string(index.bookIndex) << '\t' << "\\\\x"
|
booksBuffer_ << std::to_string(index.bookIndex) << '\t' << "\\\\x"
|
||||||
<< ripple::strHex(book.first) << '\t' << "\\\\x"
|
<< ripple::strHex(book.first) << '\t' << "\\\\x"
|
||||||
<< ripple::strHex(offer) << '\n';
|
<< ripple::strHex(offer) << '\n';
|
||||||
numRows++;
|
numRows++;
|
||||||
// If the buffer gets too large, the insert fails. Not sure why.
|
// If the buffer gets too large, the insert fails. Not sure why.
|
||||||
// When writing in the background, we insert after every 10 rows
|
// When writing in the background, we insert after every 10 rows
|
||||||
if ((isAsync && numRows == 1000) || numRows == 100000)
|
if ((isAsync && numRows == 1000) || numRows == 100000)
|
||||||
{
|
{
|
||||||
pgQuery.bulkInsert("books", booksBuffer.str());
|
conn.bulkInsert("books", booksBuffer_.str());
|
||||||
std::stringstream temp;
|
std::stringstream temp;
|
||||||
booksBuffer.swap(temp);
|
booksBuffer_.swap(temp);
|
||||||
numRows = 0;
|
numRows = 0;
|
||||||
if (isAsync)
|
if (isAsync)
|
||||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (numRows > 0)
|
if (isAsync)
|
||||||
{
|
conn("COMMIT");
|
||||||
pgQuery.bulkInsert("books", booksBuffer.str());
|
|
||||||
}
|
|
||||||
pgQuery("COMMIT");
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bool
|
bool
|
||||||
|
|||||||
@@ -9,8 +9,9 @@ class PostgresBackend : public BackendInterface
|
|||||||
private:
|
private:
|
||||||
mutable size_t numRowsInObjectsBuffer_ = 0;
|
mutable size_t numRowsInObjectsBuffer_ = 0;
|
||||||
mutable std::stringstream objectsBuffer_;
|
mutable std::stringstream objectsBuffer_;
|
||||||
mutable std::stringstream transactionsBuffer_;
|
|
||||||
mutable std::stringstream booksBuffer_;
|
mutable std::stringstream booksBuffer_;
|
||||||
|
mutable std::stringstream keysBuffer_;
|
||||||
|
mutable std::stringstream transactionsBuffer_;
|
||||||
mutable std::stringstream accountTxBuffer_;
|
mutable std::stringstream accountTxBuffer_;
|
||||||
std::shared_ptr<PgPool> pgPool_;
|
std::shared_ptr<PgPool> pgPool_;
|
||||||
mutable PgQuery writeConnection_;
|
mutable PgQuery writeConnection_;
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ ReportingETL::insertTransactions(
|
|||||||
auto metaSerializer = std::make_shared<ripple::Serializer>(
|
auto metaSerializer = std::make_shared<ripple::Serializer>(
|
||||||
txMeta.getAsObject().getSerializer());
|
txMeta.getAsObject().getSerializer());
|
||||||
|
|
||||||
BOOST_LOG_TRIVIAL(trace)
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
<< __func__ << " : "
|
<< __func__ << " : "
|
||||||
<< "Inserting transaction = " << sttx.getTransactionID();
|
<< "Inserting transaction = " << sttx.getTransactionID();
|
||||||
|
|
||||||
@@ -241,7 +241,7 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t idx)
|
|||||||
std::pair<ripple::LedgerInfo, bool>
|
std::pair<ripple::LedgerInfo, bool>
|
||||||
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
||||||
{
|
{
|
||||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
<< "Beginning ledger update";
|
<< "Beginning ledger update";
|
||||||
|
|
||||||
ripple::LedgerInfo lgrInfo =
|
ripple::LedgerInfo lgrInfo =
|
||||||
@@ -252,8 +252,12 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
|
<< "Deserialized ledger header. " << detail::toString(lgrInfo);
|
||||||
flatMapBackend_->startWrites();
|
flatMapBackend_->startWrites();
|
||||||
|
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
|
<< "started writes";
|
||||||
flatMapBackend_->writeLedger(
|
flatMapBackend_->writeLedger(
|
||||||
lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
lgrInfo, std::move(*rawData.mutable_ledger_header()));
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
|
<< "wrote ledger header";
|
||||||
std::vector<AccountTransactionsData> accountTxData{
|
std::vector<AccountTransactionsData> accountTxData{
|
||||||
insertTransactions(lgrInfo, rawData)};
|
insertTransactions(lgrInfo, rawData)};
|
||||||
|
|
||||||
@@ -293,7 +297,13 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
|||||||
isDeleted,
|
isDeleted,
|
||||||
std::move(bookDir));
|
std::move(bookDir));
|
||||||
}
|
}
|
||||||
|
BOOST_LOG_TRIVIAL(debug)
|
||||||
|
<< __func__ << " : "
|
||||||
|
<< "wrote objects. num objects = "
|
||||||
|
<< std::to_string(rawData.ledger_objects().objects_size());
|
||||||
flatMapBackend_->writeAccountTransactions(std::move(accountTxData));
|
flatMapBackend_->writeAccountTransactions(std::move(accountTxData));
|
||||||
|
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||||
|
<< "wrote account_tx";
|
||||||
accumTxns_ += rawData.transactions_list().transactions_size();
|
accumTxns_ += rawData.transactions_list().transactions_size();
|
||||||
bool success = true;
|
bool success = true;
|
||||||
if (accumTxns_ >= txnThreshold_)
|
if (accumTxns_ >= txnThreshold_)
|
||||||
@@ -361,6 +371,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
assert(false);
|
assert(false);
|
||||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||||
}
|
}
|
||||||
|
std::atomic<uint32_t> minSequence = rng->minSequence;
|
||||||
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
BOOST_LOG_TRIVIAL(info) << __func__ << " : "
|
||||||
<< "Populating caches";
|
<< "Populating caches";
|
||||||
|
|
||||||
@@ -451,6 +462,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::thread transformer{[this,
|
std::thread transformer{[this,
|
||||||
|
&minSequence,
|
||||||
&writeConflict,
|
&writeConflict,
|
||||||
&startSequence,
|
&startSequence,
|
||||||
&getNext,
|
&getNext,
|
||||||
@@ -499,16 +511,16 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
|
|||||||
lastPublishedSequence = lgrInfo.seq;
|
lastPublishedSequence = lgrInfo.seq;
|
||||||
}
|
}
|
||||||
writeConflict = !success;
|
writeConflict = !success;
|
||||||
auto range = flatMapBackend_->fetchLedgerRangeNoThrow();
|
|
||||||
if (onlineDeleteInterval_ && !deleting_ &&
|
if (onlineDeleteInterval_ && !deleting_ &&
|
||||||
range->maxSequence - range->minSequence >
|
lgrInfo.seq - minSequence > *onlineDeleteInterval_)
|
||||||
*onlineDeleteInterval_)
|
|
||||||
{
|
{
|
||||||
deleting_ = true;
|
deleting_ = true;
|
||||||
ioContext_.post([this, &range]() {
|
ioContext_.post([this, &minSequence]() {
|
||||||
BOOST_LOG_TRIVIAL(info) << "Running online delete";
|
BOOST_LOG_TRIVIAL(info) << "Running online delete";
|
||||||
flatMapBackend_->doOnlineDelete(*onlineDeleteInterval_);
|
flatMapBackend_->doOnlineDelete(*onlineDeleteInterval_);
|
||||||
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
|
BOOST_LOG_TRIVIAL(info) << "Finished online delete";
|
||||||
|
auto rng = flatMapBackend_->fetchLedgerRangeNoThrow();
|
||||||
|
minSequence = rng->minSequence;
|
||||||
deleting_ = false;
|
deleting_ = false;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user