Fix compiler warnings (#306)

This commit is contained in:
Alex Kremer
2022-09-12 22:35:30 +02:00
committed by GitHub
parent 97c431680a
commit e2792f5a0c
12 changed files with 63 additions and 105 deletions

View File

@@ -19,7 +19,6 @@ BackendInterface::writeLedgerObject(
std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}

View File

@@ -180,7 +180,7 @@ CassandraBackend::doWriteLedgerObject(
if (range)
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(seq, key)),
std::make_tuple(seq, key),
[this](auto& params) {
auto& [sequence, key] = params.data;
@@ -192,7 +192,7 @@ CassandraBackend::doWriteLedgerObject(
"ledger_diff");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(blob))),
std::make_tuple(std::move(key), seq, std::move(blob)),
[this](auto& params) {
auto& [key, sequence, blob] = params.data;
@@ -217,7 +217,7 @@ CassandraBackend::writeSuccessor(
assert(successor.size() != 0);
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(successor))),
std::make_tuple(std::move(key), seq, std::move(successor)),
[this](auto& params) {
auto& [key, sequence, successor] = params.data;
@@ -236,7 +236,7 @@ CassandraBackend::writeLedger(
{
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.seq, std::move(header))),
std::make_tuple(ledgerInfo.seq, std::move(header)),
[this](auto& params) {
auto& [sequence, header] = params.data;
CassandraStatement statement{insertLedgerHeader_};
@@ -247,7 +247,7 @@ CassandraBackend::writeLedger(
"ledger");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.hash, ledgerInfo.seq)),
std::make_tuple(ledgerInfo.hash, ledgerInfo.seq),
[this](auto& params) {
auto& [hash, sequence] = params.data;
CassandraStatement statement{insertLedgerHash_};
@@ -324,7 +324,7 @@ CassandraBackend::writeTransaction(
makeAndExecuteAsyncWrite(
this,
std::move(std::make_pair(seq, hash)),
std::make_pair(seq, hash),
[this](auto& params) {
CassandraStatement statement{insertLedgerTransaction_};
statement.bindNextInt(params.data.first);
@@ -334,12 +334,12 @@ CassandraBackend::writeTransaction(
"ledger_transaction");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(
std::make_tuple(
std::move(hash),
seq,
date,
std::move(transaction),
std::move(metadata))),
std::move(metadata)),
[this](auto& params) {
CassandraStatement statement{insertTransaction_};
auto& [hash, sequence, date, transaction, metadata] = params.data;
@@ -680,9 +680,6 @@ CassandraBackend::fetchAccountTransactions(
if (!rng)
return {{}, {}};
auto keylet = ripple::keylet::account(account);
auto cursor = cursorIn;
CassandraStatement statement = [this, forward]() {
if (forward)
return CassandraStatement{selectAccountTxForward_};
@@ -690,6 +687,7 @@ CassandraBackend::fetchAccountTransactions(
return CassandraStatement{selectAccountTx_};
}();
auto cursor = cursorIn;
statement.bindNextBytes(account);
if (cursor)
{
@@ -1062,8 +1060,8 @@ CassandraBackend::open(bool readOnly)
cass_cluster_set_credentials(
cluster, username.c_str(), getString("password").c_str());
}
int threads = getInt("threads") ? *getInt("threads")
: std::thread::hardware_concurrency();
int threads =
getInt("threads").value_or(std::thread::hardware_concurrency());
rc = cass_cluster_set_num_threads_io(cluster, threads);
if (rc != CASS_OK)

View File

@@ -648,9 +648,6 @@ private:
// maximum number of concurrent in flight requests. New requests will wait
// for earlier requests to finish if this limit is exceeded
std::uint32_t maxRequestsOutstanding = 10000;
// we keep this small because the indexer runs in the background, and we
// don't want the database to be swamped when the indexer is running
std::uint32_t indexerMaxRequestsOutstanding = 10;
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
// mutex and condition_variable to limit the number of concurrent in flight
@@ -1067,6 +1064,7 @@ public:
{
return numRequestsOutstanding_ < maxRequestsOutstanding;
}
inline bool
finishedAllRequests() const
{

View File

@@ -833,7 +833,7 @@ PgPool::checkout()
else if (connections_ < config_.max_connections)
{
++connections_;
ret = std::make_unique<Pg>(config_, ioc_, stop_, mutex_);
ret = std::make_unique<Pg>(config_, ioc_);
}
// Otherwise, wait until a connection becomes available or we stop.
else
@@ -1680,7 +1680,6 @@ getLedger(
whichLedger,
std::shared_ptr<PgPool>& pgPool)
{
ripple::LedgerInfo lgrInfo;
std::stringstream sql;
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
"total_coins, closing_time, prev_closing_time, close_time_res, "

View File

@@ -262,8 +262,6 @@ class Pg
PgConfig const& config_;
boost::asio::io_context::strand strand_;
bool& stop_;
std::mutex& mutex_;
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
@@ -364,14 +362,9 @@ public:
*
* @param config Config parameters.
* @param j Logger object.
* @param stop Reference to connection pool's stop flag.
* @param mutex Reference to connection pool's mutex.
*/
Pg(PgConfig const& config,
boost::asio::io_context& ctx,
bool& stop,
std::mutex& mutex)
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
Pg(PgConfig const& config, boost::asio::io_context& ctx)
: config_(config), strand_(ctx)
{
}
};

View File

@@ -23,8 +23,6 @@ class NetworkValidatedLedgers
std::condition_variable cv_;
bool stopping_ = false;
public:
static std::shared_ptr<NetworkValidatedLedgers>
make_ValidatedLedgers()

View File

@@ -694,8 +694,6 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
beast::setCurrentThreadName("rippled: ReportingETL transform");
uint32_t currentSequence = startSequence;
auto begin = std::chrono::system_clock::now();
while (!writeConflict)
{
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{

View File

@@ -109,18 +109,6 @@ private:
// deletion
std::atomic_bool deleting_ = false;
/// Used to determine when to write to the database during the initial
/// ledger download. By default, the software downloads an entire ledger and
/// then writes to the database. If flushInterval_ is non-zero, the software
/// will write to the database as new ledger data (SHAMap leaf nodes)
/// arrives. It is not neccesarily more effient to write the data as it
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
/// nodes; flushing prematurely can result in the same SHAMap inner node
/// being written to the database more than once. It is recommended to use
/// the default value of 0 for this variable; however, different values can
/// be experimented with if better performance is desired.
size_t flushInterval_ = 0;
/// This variable controls the number of GetLedgerData calls that will be
/// executed in parallel during the initial ledger download. GetLedgerData
/// allows clients to page through a ledger over many RPC calls.
@@ -146,7 +134,6 @@ private:
std::optional<uint32_t> startSequence_;
std::optional<uint32_t> finishSequence_;
size_t accumTxns_ = 0;
size_t txnThreshold_ = 0;
/// The time that the most recently published ledger was published. Used by

View File

@@ -54,10 +54,8 @@ doAccountInfo(Context const& context)
auto key = ripple::keylet::account(accountID.value());
auto start = std::chrono::system_clock::now();
std::optional<std::vector<unsigned char>> dbResponse =
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse)
{

View File

@@ -351,10 +351,8 @@ doLedgerEntry(Context const& context)
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
}
auto start = std::chrono::system_clock::now();
auto dbResponse =
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse or dbResponse->size() == 0)
return Status{"entryNotFound"};

View File

@@ -260,7 +260,7 @@ public:
if (!id.is_null())
e["id"] = id;
e["request"] = request;
send(boost::json::serialize(e));
this->send(boost::json::serialize(e));
};
try
@@ -421,4 +421,4 @@ public:
}
};
#endif // RIPPLE_REPORTING_WS_BASE_SESSION_H
#endif // RIPPLE_REPORTING_WS_BASE_SESSION_H

View File

@@ -467,14 +467,13 @@ TEST(BackendTest, Basic)
nftData.push_back(*parsedNFT);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeTransaction(
std::move(std::string{hashBlob}),
std::string{hashBlob},
lgrInfoNext.seq,
lgrInfoNext.closeTime.time_since_epoch().count(),
std::move(std::string{txnBlob}),
std::move(std::string{metaBlob}));
std::string{txnBlob},
std::string{metaBlob});
backend->writeAccountTransactions(std::move(accountTxData));
// NFT writing not yet implemented for pg
@@ -497,9 +496,9 @@ TEST(BackendTest, Basic)
}
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -606,16 +605,15 @@ TEST(BackendTest, Basic)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
std::shuffle(
accountBlob.begin(),
accountBlob.end(),
std::default_random_engine(seed));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
@@ -669,12 +667,11 @@ TEST(BackendTest, Basic)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{}));
std::string{});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -716,9 +713,8 @@ TEST(BackendTest, Basic)
EXPECT_FALSE(obj);
}
auto generateObjects = [seed](
size_t numObjects,
uint32_t ledgerSequence) {
auto generateObjects = [](size_t numObjects,
uint32_t ledgerSequence) {
std::vector<std::pair<std::string, std::string>> res{
numObjects};
ripple::uint256 key;
@@ -740,26 +736,26 @@ TEST(BackendTest, Basic)
}
return objs;
};
auto generateTxns =
[seed](size_t numTxns, uint32_t ledgerSequence) {
std::vector<
std::tuple<std::string, std::string, std::string>>
res{numTxns};
ripple::uint256 base;
base = ledgerSequence * 100000;
for (auto& blob : res)
{
++base;
std::string hashStr{
(const char*)base.data(), base.size()};
std::string txnStr =
"tx" + std::to_string(ledgerSequence) + hashStr;
std::string metaStr = "meta" +
std::to_string(ledgerSequence) + hashStr;
blob = std::make_tuple(hashStr, txnStr, metaStr);
}
return res;
};
auto generateTxns = [](size_t numTxns,
uint32_t ledgerSequence) {
std::vector<
std::tuple<std::string, std::string, std::string>>
res{numTxns};
ripple::uint256 base;
base = ledgerSequence * 100000;
for (auto& blob : res)
{
++base;
std::string hashStr{
(const char*)base.data(), base.size()};
std::string txnStr =
"tx" + std::to_string(ledgerSequence) + hashStr;
std::string metaStr =
"meta" + std::to_string(ledgerSequence) + hashStr;
blob = std::make_tuple(hashStr, txnStr, metaStr);
}
return res;
};
auto generateAccounts = [](uint32_t ledgerSequence,
uint32_t numAccounts) {
std::vector<ripple::AccountID> accounts;
@@ -824,7 +820,7 @@ TEST(BackendTest, Basic)
backend->startWrites();
backend->writeLedger(
lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo)));
lgrInfo, ledgerInfoToBinaryString(lgrInfo));
for (auto [hash, txn, meta] : txns)
{
backend->writeTransaction(
@@ -2042,12 +2038,11 @@ TEST(Backend, cacheIntegration)
lgrInfoNext.hash++;
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
auto key =
ripple::uint256::fromVoidChecked(accountIndexBlob);
backend->cache().update(
@@ -2109,8 +2104,7 @@ TEST(Backend, cacheIntegration)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
std::shuffle(
accountBlob.begin(),
accountBlob.end(),
@@ -2121,9 +2115,9 @@ TEST(Backend, cacheIntegration)
{{*key, {accountBlob.begin(), accountBlob.end()}}},
lgrInfoNext.seq);
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{accountBlob}));
std::string{accountBlob});
ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq));
}
@@ -2171,15 +2165,14 @@ TEST(Backend, cacheIntegration)
~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash);
backend->writeLedger(
lgrInfoNext,
std::move(ledgerInfoToBinaryString(lgrInfoNext)));
lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext));
auto key =
ripple::uint256::fromVoidChecked(accountIndexBlob);
backend->cache().update({{*key, {}}}, lgrInfoNext.seq);
backend->writeLedgerObject(
std::move(std::string{accountIndexBlob}),
std::string{accountIndexBlob},
lgrInfoNext.seq,
std::move(std::string{}));
std::string{});
backend->writeSuccessor(
uint256ToString(Backend::firstKey),
lgrInfoNext.seq,
@@ -2215,9 +2208,8 @@ TEST(Backend, cacheIntegration)
EXPECT_FALSE(obj);
}
auto generateObjects = [seed](
size_t numObjects,
uint32_t ledgerSequence) {
auto generateObjects = [](size_t numObjects,
uint32_t ledgerSequence) {
std::vector<std::pair<std::string, std::string>> res{
numObjects};
ripple::uint256 key;