mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-27 23:25:53 +00:00
Order book offers by quality
This commit is contained in:
@@ -63,16 +63,16 @@ flatMapWriteCallback(CassFuture* fut, void* cbData)
|
||||
processAsyncWriteResponse(requestParams, fut, func);
|
||||
}
|
||||
|
||||
void
|
||||
flatMapWriteBookCallback(CassFuture* fut, void* cbData)
|
||||
{
|
||||
CassandraBackend::WriteCallbackData& requestParams =
|
||||
*static_cast<CassandraBackend::WriteCallbackData*>(cbData);
|
||||
auto func = [](auto& params, bool retry) {
|
||||
params.backend->writeBook(params, retry);
|
||||
};
|
||||
processAsyncWriteResponse(requestParams, fut, func);
|
||||
}
|
||||
// void
|
||||
// flatMapWriteBookCallback(CassFuture* fut, void* cbData)
|
||||
// {
|
||||
// CassandraBackend::WriteCallbackData& requestParams =
|
||||
// *static_cast<CassandraBackend::WriteCallbackData*>(cbData);
|
||||
// auto func = [](auto& params, bool retry) {
|
||||
// params.backend->writeBook(params, retry);
|
||||
// };
|
||||
// processAsyncWriteResponse(requestParams, fut, func);
|
||||
// }
|
||||
/*
|
||||
|
||||
void
|
||||
@@ -641,54 +641,74 @@ CassandraBackend::fetchBookOffers(
|
||||
std::optional<ripple::uint256> const& cursor) const
|
||||
{
|
||||
CassandraStatement statement{selectBook_};
|
||||
statement.bindBytes(book);
|
||||
uint32_t upper = sequence;
|
||||
|
||||
auto rng = fetchLedgerRange();
|
||||
if (rng && sequence != rng->minSequence)
|
||||
|
||||
if(!rng)
|
||||
return {{},{}};
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
uint32_t upper = sequence;
|
||||
auto lastPage = rng->maxSequence - (rng->maxSequence % 256);
|
||||
|
||||
if (lastPage < sequence) {
|
||||
keys = indexer_.getCurrentOffers(book);
|
||||
}
|
||||
else if (sequence != rng->minSequence)
|
||||
{
|
||||
upper = (sequence >> 8) << 8;
|
||||
if (upper != sequence)
|
||||
upper += (1 << 8);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " upper = " << std::to_string(upper)
|
||||
<< " book = " << ripple::strHex(book);
|
||||
statement.bindInt(upper);
|
||||
if (cursor)
|
||||
statement.bindBytes(*cursor);
|
||||
else
|
||||
{
|
||||
ripple::uint256 zero = {};
|
||||
statement.bindBytes(zero);
|
||||
}
|
||||
statement.bindUInt(limit);
|
||||
CassandraResult result = executeSyncRead(statement);
|
||||
|
||||
statement.bindBytes(book.data(), 24);
|
||||
statement.bindInt(upper);
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
|
||||
std::vector<ripple::uint256> keys;
|
||||
if (!result)
|
||||
return {{}, {}};
|
||||
do
|
||||
{
|
||||
keys.push_back(result.getUInt256());
|
||||
} while (result.nextRow());
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " upper = " << std::to_string(upper)
|
||||
<< " book = " << ripple::strHex(std::string((char*)book.data(), 24));
|
||||
|
||||
// ripple::uint256 zero = {};
|
||||
// statement.bindBytes(zero.data(), 8);
|
||||
// if (cursor)
|
||||
// statement.bindBytes(*cursor);
|
||||
// else
|
||||
// {
|
||||
// statement.bindBytes(zero);
|
||||
// }
|
||||
|
||||
// statement.bindUInt(limit);
|
||||
CassandraResult result = executeSyncRead(statement);
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
|
||||
if (!result)
|
||||
{
|
||||
return {{}, {}};
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
auto index = result.getBytesTuple().second;
|
||||
keys.push_back(ripple::uint256::fromVoid(index.data()));
|
||||
} while (result.nextRow());
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " - populated keys. num keys = " << keys.size();
|
||||
if (keys.size())
|
||||
{
|
||||
std::vector<LedgerObject> results;
|
||||
std::vector<Blob> objs = fetchLedgerObjects(keys, sequence);
|
||||
for (size_t i = 0; i < objs.size(); ++i)
|
||||
{
|
||||
if (objs[i].size() != 0)
|
||||
results.push_back({keys[i], objs[i]});
|
||||
}
|
||||
if (keys.size())
|
||||
return {results, keys[keys.size() - 1]};
|
||||
|
||||
if (!keys.size())
|
||||
return {{}, {}};
|
||||
|
||||
std::vector<LedgerObject> results;
|
||||
std::vector<Blob> objs = fetchLedgerObjects(keys, sequence);
|
||||
for (size_t i = 0; i < objs.size(); ++i)
|
||||
{
|
||||
if (results.size() == limit)
|
||||
return {results, keys[i]};
|
||||
|
||||
if (objs[i].size() != 0)
|
||||
results.push_back({keys[i], objs[i]});
|
||||
}
|
||||
|
||||
return {{}, {}};
|
||||
return {results, {}};
|
||||
}
|
||||
struct WriteBookCallbackData
|
||||
{
|
||||
@@ -725,8 +745,9 @@ void
|
||||
writeBook2(WriteBookCallbackData& cb)
|
||||
{
|
||||
CassandraStatement statement{cb.backend.getInsertBookPreparedStatement()};
|
||||
statement.bindBytes(cb.book);
|
||||
statement.bindBytes(cb.book.data(), 24);
|
||||
statement.bindInt(cb.ledgerSequence);
|
||||
statement.bindBytes(cb.book.data()+24, 8);
|
||||
statement.bindBytes(cb.offerKey);
|
||||
// Passing isRetry as true bypasses incrementing numOutstanding
|
||||
cb.backend.executeAsyncWrite(statement, writeBookCallback, cb, true);
|
||||
@@ -1387,24 +1408,24 @@ CassandraBackend::open()
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "objects"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE INDEX ON " << tablePrefix << "objects(sequence)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "objects WHERE sequence=1"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions"
|
||||
<< " ( hash blob PRIMARY KEY, ledger_sequence bigint, "
|
||||
"transaction "
|
||||
@@ -1412,61 +1433,50 @@ CassandraBackend::open()
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "transactions"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE INDEX ON " << tablePrefix
|
||||
<< "transactions(ledger_sequence)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix
|
||||
<< "transactions WHERE ledger_sequence = 1"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "keys"
|
||||
<< " ( sequence bigint, key blob, PRIMARY KEY "
|
||||
"(sequence, key))";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "keys"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "books"
|
||||
<< " ( book blob, sequence bigint, key blob, deleted_at "
|
||||
"bigint, PRIMARY KEY "
|
||||
"(book, key)) WITH CLUSTERING ORDER BY (key ASC)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query << "SELECT * FROM " << tablePrefix << "books"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "books2"
|
||||
<< " ( book blob, sequence bigint, key blob, PRIMARY KEY "
|
||||
"((book, sequence), key)) WITH CLUSTERING ORDER BY (key "
|
||||
<< " ( book blob, sequence bigint, quality_key tuple<blob, blob>, PRIMARY KEY "
|
||||
"((book, sequence), quality_key)) WITH CLUSTERING ORDER BY (quality_key "
|
||||
"ASC)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "books2"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "account_tx"
|
||||
<< " ( account blob, seq_idx tuple<bigint, bigint>, "
|
||||
" hash blob, "
|
||||
@@ -1476,43 +1486,43 @@ CassandraBackend::open()
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "account_tx"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledgers"
|
||||
<< " ( sequence bigint PRIMARY KEY, header blob )";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "ledgers"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_hashes"
|
||||
<< " (hash blob PRIMARY KEY, sequence bigint)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "ledger_hashes"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_range"
|
||||
<< " (is_latest boolean PRIMARY KEY, sequence bigint)";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tablePrefix << "ledger_range"
|
||||
<< " LIMIT 1";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
@@ -1532,7 +1542,7 @@ CassandraBackend::open()
|
||||
if (!insertObject_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "INSERT INTO " << tablePrefix << "transactions"
|
||||
<< " (hash, ledger_sequence, transaction, metadata) VALUES "
|
||||
"(?, ?, "
|
||||
@@ -1540,35 +1550,26 @@ CassandraBackend::open()
|
||||
if (!insertTransaction_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "INSERT INTO " << tablePrefix << "keys"
|
||||
<< " (sequence, key) VALUES (?, ?)";
|
||||
if (!insertKey_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query << "INSERT INTO " << tablePrefix << "books"
|
||||
<< " (book, key, sequence, deleted_at) VALUES (?, ?, ?, ?)";
|
||||
if (!insertBook_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "INSERT INTO " << tablePrefix << "books2"
|
||||
<< " (book, sequence, key) VALUES (?, ?, ?)";
|
||||
<< " (book, sequence, quality_key) VALUES (?, ?, (?, ?))";
|
||||
if (!insertBook2_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
query = {};
|
||||
query << "INSERT INTO " << tablePrefix << "books"
|
||||
<< " (book, key, deleted_at) VALUES (?, ?, ?)";
|
||||
if (!deleteBook_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
query.str("");
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT key FROM " << tablePrefix << "keys"
|
||||
<< " WHERE sequence = ? AND key > ? ORDER BY key ASC LIMIT ?";
|
||||
if (!selectKeys_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT object, sequence FROM " << tablePrefix << "objects"
|
||||
<< " WHERE key = ? AND sequence <= ? ORDER BY sequence DESC "
|
||||
"LIMIT 1";
|
||||
@@ -1576,28 +1577,28 @@ CassandraBackend::open()
|
||||
if (!selectObject_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT transaction, metadata, ledger_sequence FROM "
|
||||
<< tablePrefix << "transactions"
|
||||
<< " WHERE hash = ?";
|
||||
if (!selectTransaction_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT transaction, metadata, ledger_sequence FROM "
|
||||
<< tablePrefix << "transactions"
|
||||
<< " WHERE ledger_sequence = ?";
|
||||
if (!selectAllTransactionsInLedger_.prepareStatement(
|
||||
query, session_.get()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT hash FROM " << tablePrefix << "transactions"
|
||||
<< " WHERE ledger_sequence = ?";
|
||||
if (!selectAllTransactionHashesInLedger_.prepareStatement(
|
||||
query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT key FROM " << tablePrefix << "objects "
|
||||
<< " WHERE TOKEN(key) >= ? and sequence <= ? "
|
||||
<< " PER PARTITION LIMIT 1 LIMIT ?"
|
||||
@@ -1605,7 +1606,7 @@ CassandraBackend::open()
|
||||
if (!selectLedgerPageKeys_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT object,key FROM " << tablePrefix << "objects "
|
||||
<< " WHERE TOKEN(key) >= ? and sequence <= ? "
|
||||
<< " PER PARTITION LIMIT 1 LIMIT ? ALLOW FILTERING";
|
||||
@@ -1614,7 +1615,7 @@ CassandraBackend::open()
|
||||
continue;
|
||||
|
||||
/*
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT filterempty(key,object) FROM " << tablePrefix <<
|
||||
"objects "
|
||||
<< " WHERE TOKEN(key) >= ? and sequence <= ?"
|
||||
@@ -1623,80 +1624,72 @@ CassandraBackend::open()
|
||||
if (!upperBound2_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
*/
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "SELECT TOKEN(key) FROM " << tablePrefix << "objects "
|
||||
<< " WHERE key = ? LIMIT 1";
|
||||
|
||||
if (!getToken_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query << "SELECT key FROM " << tablePrefix << "books "
|
||||
<< " WHERE book = ? AND sequence <= ? AND deleted_at > ? AND"
|
||||
" key > ? "
|
||||
" ORDER BY key ASC LIMIT ? ALLOW FILTERING";
|
||||
if (!getBook_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
query = {};
|
||||
query << "SELECT key FROM " << tablePrefix << "books2 "
|
||||
<< " WHERE book = ? AND sequence = ? AND "
|
||||
" key > ? "
|
||||
" ORDER BY key ASC LIMIT ?";
|
||||
query.str("");
|
||||
query << "SELECT quality_key FROM " << tablePrefix << "books2 "
|
||||
<< " WHERE book = ? AND sequence = ?"
|
||||
" ORDER BY quality_key ASC";
|
||||
if (!selectBook_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " INSERT INTO " << tablePrefix << "account_tx"
|
||||
<< " (account, seq_idx, hash) "
|
||||
<< " VALUES (?,?,?)";
|
||||
if (!insertAccountTx_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " SELECT hash,seq_idx FROM " << tablePrefix << "account_tx"
|
||||
<< " WHERE account = ? "
|
||||
<< " AND seq_idx < ? LIMIT ?";
|
||||
if (!selectAccountTx_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " INSERT INTO " << tablePrefix << "ledgers "
|
||||
<< " (sequence, header) VALUES(?,?)";
|
||||
if (!insertLedgerHeader_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " INSERT INTO " << tablePrefix << "ledger_hashes"
|
||||
<< " (hash, sequence) VALUES(?,?)";
|
||||
if (!insertLedgerHash_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " update " << tablePrefix << "ledger_range"
|
||||
<< " set sequence = ? where is_latest = ? if sequence in "
|
||||
"(?,null)";
|
||||
if (!updateLedgerRange_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " select header from " << tablePrefix
|
||||
<< "ledgers where sequence = ?";
|
||||
if (!selectLedgerBySeq_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " select sequence from " << tablePrefix
|
||||
<< "ledger_range where is_latest = true";
|
||||
if (!selectLatestLedger_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " SELECT sequence FROM " << tablePrefix
|
||||
<< "ledger_range WHERE "
|
||||
<< " is_latest IN (true, false)";
|
||||
if (!selectLedgerRange_.prepareStatement(query, session_.get()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << " SELECT key,object FROM " << tablePrefix
|
||||
<< "objects WHERE sequence = ?";
|
||||
if (!selectLedgerDiff_.prepareStatement(query, session_.get()))
|
||||
@@ -1714,30 +1707,27 @@ CassandraBackend::open()
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "ledger_range";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "ledgers";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "ledger_hashes";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "objects";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "transactions";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query.str("");
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "account_tx";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query = {};
|
||||
query << "TRUNCATE TABLE " << tablePrefix << "books";
|
||||
if (!executeSimpleStatement(query.str()))
|
||||
continue;
|
||||
query.str("");
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1746,26 +1736,26 @@ CassandraBackend::open()
|
||||
{
|
||||
maxRequestsOutstanding = config_["max_requests_outstanding"].as_int64();
|
||||
}
|
||||
if (config_.contains("run_indexer"))
|
||||
{
|
||||
if (config_["run_indexer"].as_bool())
|
||||
{
|
||||
if (config_.contains("indexer_shift"))
|
||||
{
|
||||
indexerShift_ = config_["indexer_shift"].as_int64();
|
||||
}
|
||||
indexer_ = std::thread{[this]() {
|
||||
auto seq = getNextToIndex();
|
||||
if (seq)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Running indexer. Ledger = " << std::to_string(*seq);
|
||||
runIndexer(*seq);
|
||||
BOOST_LOG_TRIVIAL(info) << "Ran indexer";
|
||||
}
|
||||
}};
|
||||
}
|
||||
}
|
||||
// if (config_.contains("run_indexer"))
|
||||
// {
|
||||
// if (config_["run_indexer"].as_bool())
|
||||
// {
|
||||
// if (config_.contains("indexer_shift"))
|
||||
// {
|
||||
// indexerShift_ = config_["indexer_shift"].as_int64();
|
||||
// }
|
||||
// indexer_ = std::thread{[this]() {
|
||||
// auto seq = getNextToIndex();
|
||||
// if (seq)
|
||||
// {
|
||||
// BOOST_LOG_TRIVIAL(info)
|
||||
// << "Running indexer. Ledger = " << std::to_string(*seq);
|
||||
// runIndexer(*seq);
|
||||
// BOOST_LOG_TRIVIAL(info) << "Ran indexer";
|
||||
// }
|
||||
// }};
|
||||
// }
|
||||
// }
|
||||
|
||||
work_.emplace(ioContext_);
|
||||
ioThread_ = std::thread{[this]() { ioContext_.run(); }};
|
||||
|
||||
Reference in New Issue
Block a user