Fix compiler warnings (#306)

This commit is contained in:
Alex Kremer
2022-09-12 22:35:30 +02:00
committed by GitHub
parent 97c431680a
commit e2792f5a0c
12 changed files with 63 additions and 105 deletions

View File

@@ -19,7 +19,6 @@ BackendInterface::writeLedgerObject(
std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}

View File

@@ -180,7 +180,7 @@ CassandraBackend::doWriteLedgerObject(
if (range)
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(seq, key)),
std::make_tuple(seq, key),
[this](auto& params) {
auto& [sequence, key] = params.data;
@@ -192,7 +192,7 @@ CassandraBackend::doWriteLedgerObject(
"ledger_diff");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(blob))),
std::make_tuple(std::move(key), seq, std::move(blob)),
[this](auto& params) {
auto& [key, sequence, blob] = params.data;
@@ -217,7 +217,7 @@ CassandraBackend::writeSuccessor(
assert(successor.size() != 0);
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(std::move(key), seq, std::move(successor))),
std::make_tuple(std::move(key), seq, std::move(successor)),
[this](auto& params) {
auto& [key, sequence, successor] = params.data;
@@ -236,7 +236,7 @@ CassandraBackend::writeLedger(
{
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.seq, std::move(header))),
std::make_tuple(ledgerInfo.seq, std::move(header)),
[this](auto& params) {
auto& [sequence, header] = params.data;
CassandraStatement statement{insertLedgerHeader_};
@@ -247,7 +247,7 @@ CassandraBackend::writeLedger(
"ledger");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(ledgerInfo.hash, ledgerInfo.seq)),
std::make_tuple(ledgerInfo.hash, ledgerInfo.seq),
[this](auto& params) {
auto& [hash, sequence] = params.data;
CassandraStatement statement{insertLedgerHash_};
@@ -324,7 +324,7 @@ CassandraBackend::writeTransaction(
makeAndExecuteAsyncWrite(
this,
std::move(std::make_pair(seq, hash)),
std::make_pair(seq, hash),
[this](auto& params) {
CassandraStatement statement{insertLedgerTransaction_};
statement.bindNextInt(params.data.first);
@@ -334,12 +334,12 @@ CassandraBackend::writeTransaction(
"ledger_transaction");
makeAndExecuteAsyncWrite(
this,
std::move(std::make_tuple(
std::make_tuple(
std::move(hash),
seq,
date,
std::move(transaction),
std::move(metadata))),
std::move(metadata)),
[this](auto& params) {
CassandraStatement statement{insertTransaction_};
auto& [hash, sequence, date, transaction, metadata] = params.data;
@@ -680,9 +680,6 @@ CassandraBackend::fetchAccountTransactions(
if (!rng)
return {{}, {}};
auto keylet = ripple::keylet::account(account);
auto cursor = cursorIn;
CassandraStatement statement = [this, forward]() {
if (forward)
return CassandraStatement{selectAccountTxForward_};
@@ -690,6 +687,7 @@ CassandraBackend::fetchAccountTransactions(
return CassandraStatement{selectAccountTx_};
}();
auto cursor = cursorIn;
statement.bindNextBytes(account);
if (cursor)
{
@@ -1062,8 +1060,8 @@ CassandraBackend::open(bool readOnly)
cass_cluster_set_credentials(
cluster, username.c_str(), getString("password").c_str());
}
int threads = getInt("threads") ? *getInt("threads")
: std::thread::hardware_concurrency();
int threads =
getInt("threads").value_or(std::thread::hardware_concurrency());
rc = cass_cluster_set_num_threads_io(cluster, threads);
if (rc != CASS_OK)

View File

@@ -648,9 +648,6 @@ private:
// maximum number of concurrent in flight requests. New requests will wait
// for earlier requests to finish if this limit is exceeded
std::uint32_t maxRequestsOutstanding = 10000;
// we keep this small because the indexer runs in the background, and we
// don't want the database to be swamped when the indexer is running
std::uint32_t indexerMaxRequestsOutstanding = 10;
mutable std::atomic_uint32_t numRequestsOutstanding_ = 0;
// mutex and condition_variable to limit the number of concurrent in flight
@@ -1067,6 +1064,7 @@ public:
{
return numRequestsOutstanding_ < maxRequestsOutstanding;
}
inline bool
finishedAllRequests() const
{

View File

@@ -833,7 +833,7 @@ PgPool::checkout()
else if (connections_ < config_.max_connections)
{
++connections_;
ret = std::make_unique<Pg>(config_, ioc_, stop_, mutex_);
ret = std::make_unique<Pg>(config_, ioc_);
}
// Otherwise, wait until a connection becomes available or we stop.
else
@@ -1680,7 +1680,6 @@ getLedger(
whichLedger,
std::shared_ptr<PgPool>& pgPool)
{
ripple::LedgerInfo lgrInfo;
std::stringstream sql;
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
"total_coins, closing_time, prev_closing_time, close_time_res, "

View File

@@ -262,8 +262,6 @@ class Pg
PgConfig const& config_;
boost::asio::io_context::strand strand_;
bool& stop_;
std::mutex& mutex_;
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
@@ -364,14 +362,9 @@ public:
*
* @param config Config parameters.
* @param j Logger object.
* @param stop Reference to connection pool's stop flag.
* @param mutex Reference to connection pool's mutex.
*/
Pg(PgConfig const& config,
boost::asio::io_context& ctx,
bool& stop,
std::mutex& mutex)
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
Pg(PgConfig const& config, boost::asio::io_context& ctx)
: config_(config), strand_(ctx)
{
}
};

View File

@@ -23,8 +23,6 @@ class NetworkValidatedLedgers
std::condition_variable cv_;
bool stopping_ = false;
public:
static std::shared_ptr<NetworkValidatedLedgers>
make_ValidatedLedgers()

View File

@@ -694,8 +694,6 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors)
beast::setCurrentThreadName("rippled: ReportingETL transform");
uint32_t currentSequence = startSequence;
auto begin = std::chrono::system_clock::now();
while (!writeConflict)
{
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{

View File

@@ -109,18 +109,6 @@ private:
// deletion
std::atomic_bool deleting_ = false;
/// Used to determine when to write to the database during the initial
/// ledger download. By default, the software downloads an entire ledger and
/// then writes to the database. If flushInterval_ is non-zero, the software
/// will write to the database as new ledger data (SHAMap leaf nodes)
/// arrives. It is not neccesarily more effient to write the data as it
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
/// nodes; flushing prematurely can result in the same SHAMap inner node
/// being written to the database more than once. It is recommended to use
/// the default value of 0 for this variable; however, different values can
/// be experimented with if better performance is desired.
size_t flushInterval_ = 0;
/// This variable controls the number of GetLedgerData calls that will be
/// executed in parallel during the initial ledger download. GetLedgerData
/// allows clients to page through a ledger over many RPC calls.
@@ -146,7 +134,6 @@ private:
std::optional<uint32_t> startSequence_;
std::optional<uint32_t> finishSequence_;
size_t accumTxns_ = 0;
size_t txnThreshold_ = 0;
/// The time that the most recently published ledger was published. Used by

View File

@@ -54,10 +54,8 @@ doAccountInfo(Context const& context)
auto key = ripple::keylet::account(accountID.value());
auto start = std::chrono::system_clock::now();
std::optional<std::vector<unsigned char>> dbResponse =
context.backend->fetchLedgerObject(key.key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse)
{

View File

@@ -351,10 +351,8 @@ doLedgerEntry(Context const& context)
return Status{Error::rpcINVALID_PARAMS, "unknownOption"};
}
auto start = std::chrono::system_clock::now();
auto dbResponse =
context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield);
auto end = std::chrono::system_clock::now();
if (!dbResponse or dbResponse->size() == 0)
return Status{"entryNotFound"};

View File

@@ -260,7 +260,7 @@ public:
if (!id.is_null())
e["id"] = id;
e["request"] = request;
send(boost::json::serialize(e));
this->send(boost::json::serialize(e));
};
try
@@ -421,4 +421,4 @@ public:
}
};
#endif // RIPPLE_REPORTING_WS_BASE_SESSION_H
#endif // RIPPLE_REPORTING_WS_BASE_SESSION_H