diff --git a/.clang-format b/.clang-format index ba409869..bd853278 100644 --- a/.clang-format +++ b/.clang-format @@ -34,7 +34,7 @@ BreakBeforeBinaryOperators: false BreakBeforeBraces: Custom BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: true -ColumnLimit: 80 +ColumnLimit: 120 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index beaec158..f1a34346 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -7,3 +7,4 @@ # clang-format e41150248a97e4bdc1cf21b54650c4bb7c63928e 2e542e7b0d94451a933c88778461cc8d3d7e6417 +cde682b42d1b3f798abb3d5a0b729e48a9cbeb27 diff --git a/src/backend/BackendFactory.h b/src/backend/BackendFactory.h index 774fffc5..ac12ddb1 100644 --- a/src/backend/BackendFactory.h +++ b/src/backend/BackendFactory.h @@ -48,8 +48,8 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config) { auto cfg = config.section("database." + type); auto ttl = config.valueOr("online_delete", 0) * 4; - backend = std::make_shared( - Backend::Cassandra::SettingsProvider{cfg, ttl}); + backend = + std::make_shared(Backend::Cassandra::SettingsProvider{cfg, ttl}); } if (!backend) diff --git a/src/backend/BackendInterface.cpp b/src/backend/BackendInterface.cpp index 8d1e69ad..8b2322bf 100644 --- a/src/backend/BackendInterface.cpp +++ b/src/backend/BackendInterface.cpp @@ -37,25 +37,20 @@ BackendInterface::finishWrites(std::uint32_t const ledgerSequence) auto commitRes = doFinishWrites(); if (commitRes) { - gLog.debug() << "Successfully commited. Updating range now to " - << ledgerSequence; + gLog.debug() << "Successfully commited. Updating range now to " << ledgerSequence; updateRange(ledgerSequence); } return commitRes; } void -BackendInterface::writeLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob) +BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) { assert(key.size() == sizeof(ripple::uint256)); doWriteLedgerObject(std::move(key), seq, std::move(blob)); } std::optional -BackendInterface::hardFetchLedgerRangeNoThrow( - boost::asio::yield_context& yield) const +BackendInterface::hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const { gLog.trace() << "called"; while (true) @@ -120,8 +115,7 @@ BackendInterface::fetchLedgerObjects( else misses.push_back(keys[i]); } - gLog.trace() << "Cache hits = " << keys.size() - misses.size() - << " - cache misses = " << misses.size(); + gLog.trace() << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size(); if (misses.size()) { @@ -184,10 +178,7 @@ BackendInterface::fetchBookOffers( const ripple::uint256 bookEnd = ripple::getQualityNext(book); ripple::uint256 uTipIndex = book; std::vector keys; - auto getMillis = [](auto diff) { - return std::chrono::duration_cast(diff) - .count(); - }; + auto getMillis = [](auto diff) { return std::chrono::duration_cast(diff).count(); }; auto begin = std::chrono::system_clock::now(); std::uint32_t numSucc = 0; std::uint32_t numPages = 0; @@ -202,18 +193,14 @@ BackendInterface::fetchBookOffers( succMillis += getMillis(mid2 - mid1); if (!offerDir || offerDir->key >= bookEnd) { - gLog.trace() << "offerDir.has_value() " << offerDir.has_value() - << " breaking"; + gLog.trace() << "offerDir.has_value() " << offerDir.has_value() << " breaking"; break; } uTipIndex = offerDir->key; while (keys.size() < limit) { ++numPages; - ripple::STLedgerEntry sle{ - ripple::SerialIter{ - offerDir->blob.data(), offerDir->blob.size()}, - offerDir->key}; + ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key}; auto indexes = sle.getFieldV256(ripple::sfIndexes); keys.insert(keys.end(), indexes.begin(), indexes.end()); auto next = sle.getFieldU64(ripple::sfIndexNext); @@ -223,8 +210,7 @@ BackendInterface::fetchBookOffers( break; } auto nextKey = ripple::keylet::page(uTipIndex, next); - auto nextDir = - fetchLedgerObject(nextKey.key, ledgerSequence, yield); + auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield); assert(nextDir); offerDir->blob = *nextDir; offerDir->key = nextKey.key; @@ -236,26 +222,20 @@ BackendInterface::fetchBookOffers( auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); for (size_t i = 0; i < keys.size() && i < limit; ++i) { - gLog.trace() << "Key = " << ripple::strHex(keys[i]) - << " blob = " << ripple::strHex(objs[i]) + gLog.trace() << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i]) << " ledgerSequence = " << ledgerSequence; assert(objs[i].size()); page.offers.push_back({keys[i], objs[i]}); } auto end = std::chrono::system_clock::now(); - gLog.debug() << "Fetching " << std::to_string(keys.size()) - << " offers took " << std::to_string(getMillis(mid - begin)) - << " milliseconds. Fetching next dir took " - << std::to_string(succMillis) - << " milliseonds. Fetched next dir " << std::to_string(numSucc) + gLog.debug() << "Fetching " << std::to_string(keys.size()) << " offers took " + << std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took " + << std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc) << " times" - << " Fetching next page of dir took " - << std::to_string(pageMillis) << " milliseconds" - << ". num pages = " << std::to_string(numPages) - << ". Fetching all objects took " + << " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds" + << ". num pages = " << std::to_string(numPages) << ". Fetching all objects took " << std::to_string(getMillis(end - mid)) - << " milliseconds. total time = " - << std::to_string(getMillis(end - begin)) << " milliseconds" + << " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds" << " book = " << ripple::strHex(book); return page; @@ -275,11 +255,8 @@ BackendInterface::fetchLedgerPage( bool reachedEnd = false; while (keys.size() < limit && !reachedEnd) { - ripple::uint256 const& curCursor = keys.size() ? keys.back() - : cursor ? *cursor - : firstKey; - std::uint32_t const seq = - outOfOrder ? range->maxSequence : ledgerSequence; + ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey; + std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence; auto succ = fetchSuccessorKey(curCursor, seq, yield); if (!succ) reachedEnd = true; @@ -294,9 +271,8 @@ BackendInterface::fetchLedgerPage( page.objects.push_back({std::move(keys[i]), std::move(objects[i])}); else if (!outOfOrder) { - gLog.error() - << "Deleted or non-existent object in successor table. key = " - << ripple::strHex(keys[i]) << " - seq = " << ledgerSequence; + gLog.error() << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i]) + << " - seq = " << ledgerSequence; std::stringstream msg; for (size_t j = 0; j < objects.size(); ++j) { @@ -312,9 +288,7 @@ BackendInterface::fetchLedgerPage( } std::optional -BackendInterface::fetchFees( - std::uint32_t const seq, - boost::asio::yield_context& yield) const +BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const { ripple::Fees fees; diff --git a/src/backend/BackendInterface.h b/src/backend/BackendInterface.h index 33098249..6374c806 100644 --- a/src/backend/BackendInterface.h +++ b/src/backend/BackendInterface.h @@ -72,8 +72,7 @@ retryOnTimeout(F func, size_t waitMs = 500) } catch (DatabaseTimeout& t) { - log.error() - << "Database request timed out. Sleeping and retrying ... "; + log.error() << "Database request timed out. Sleeping and retrying ... "; std::this_thread::sleep_for(std::chrono::milliseconds(waitMs)); } } @@ -122,11 +121,10 @@ synchronous(F&& f) * executing coroutine, yield. The different type is returned. */ R res; - boost::asio::spawn( - strand, [&f, &work, &res](boost::asio::yield_context yield) { - res = f(yield); - work.reset(); - }); + boost::asio::spawn(strand, [&f, &work, &res](boost::asio::yield_context yield) { + res = f(yield); + work.reset(); + }); ctx.run(); return res; @@ -134,11 +132,10 @@ synchronous(F&& f) else { /*! @brief When the corutine type is different, run as normal. */ - boost::asio::spawn( - strand, [&f, &work](boost::asio::yield_context yield) { - f(yield); - work.reset(); - }); + boost::asio::spawn(strand, [&f, &work](boost::asio::yield_context yield) { + f(yield); + work.reset(); + }); ctx.run(); } @@ -209,15 +206,11 @@ public: /*! @brief Fetches a specific ledger by sequence number. */ virtual std::optional - fetchLedgerBySequence( - std::uint32_t const sequence, - boost::asio::yield_context& yield) const = 0; + fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const = 0; /*! @brief Fetches a specific ledger by hash. */ virtual std::optional - fetchLedgerByHash( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const = 0; + fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0; /*! @brief Fetches the latest ledger sequence. */ virtual std::optional @@ -269,9 +262,7 @@ public: * @return std::optional */ virtual std::optional - fetchTransaction( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const = 0; + fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0; /** * @brief Fetches multiple transactions. @@ -281,9 +272,7 @@ public: * @return std::vector */ virtual std::vector - fetchTransactions( - std::vector const& hashes, - boost::asio::yield_context& yield) const = 0; + fetchTransactions(std::vector const& hashes, boost::asio::yield_context& yield) const = 0; /** * @brief Fetches all transactions for a specific account @@ -314,9 +303,7 @@ public: * @return std::vector */ virtual std::vector - fetchAllTransactionsInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const = 0; + fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0; /** * @brief Fetches all transaction hashes from a specific ledger. @@ -326,9 +313,7 @@ public: * @return std::vector */ virtual std::vector - fetchAllTransactionHashesInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const = 0; + fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0; /*! @brief NFT methods */ /** @@ -340,10 +325,8 @@ public: * @return std::optional */ virtual std::optional - fetchNFT( - ripple::uint256 const& tokenID, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const = 0; + fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const = 0; /** * @brief Fetches all transactions for a specific NFT. @@ -373,10 +356,8 @@ public: * @return std::optional */ std::optional - fetchLedgerObject( - ripple::uint256 const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield) const; + fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield) + const; /** * @brief Fetches all ledger objects: a vector of vectors of unsigned chars. @@ -394,10 +375,8 @@ public: /*! @brief Virtual function version of fetchLedgerObject */ virtual std::optional - doFetchLedgerObject( - ripple::uint256 const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield) const = 0; + doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield) + const = 0; /*! @brief Virtual function version of fetchLedgerObjects */ virtual std::vector @@ -417,9 +396,7 @@ public: * @return std::vector */ virtual std::vector - fetchLedgerDiff( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const = 0; + fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0; /** * @brief Fetches a page of ledger objects, ordered by key/index. @@ -441,24 +418,17 @@ public: /*! @brief Fetches successor object from key/index. */ std::optional - fetchSuccessorObject( - ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const; + fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const; /*! @brief Fetches successor key from key/index. */ std::optional - fetchSuccessorKey( - ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const; + fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const; /*! @brief Virtual function version of fetchSuccessorKey. */ virtual std::optional - doFetchSuccessorKey( - ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const = 0; + doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const = 0; /** * @brief Fetches book offers. @@ -490,9 +460,7 @@ public: std::optional hardFetchLedgerRange() const { - return synchronous([&](boost::asio::yield_context yield) { - return hardFetchLedgerRange(yield); - }); + return synchronous([&](boost::asio::yield_context yield) { return hardFetchLedgerRange(yield); }); } /*! @brief Virtual function equivalent of hardFetchLedgerRange. */ @@ -513,9 +481,7 @@ public: * @param ledgerHeader r-value string representing ledger header. */ virtual void - writeLedger( - ripple::LedgerInfo const& ledgerInfo, - std::string&& ledgerHeader) = 0; + writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader) = 0; /** * @brief Writes a new ledger object. @@ -527,10 +493,7 @@ public: * @param blob r-value vector of unsigned characters (blob). */ virtual void - writeLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob); + writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob); /** * @brief Writes a new transaction. @@ -581,10 +544,7 @@ public: * @param successor Passed in as an r-value reference. */ virtual void - writeSuccessor( - std::string&& key, - std::uint32_t const seq, - std::string&& successor) = 0; + writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0; /*! @brief Tells database we will write data for a specific ledger. */ virtual void @@ -613,9 +573,7 @@ public: * @return false */ virtual bool - doOnlineDelete( - std::uint32_t numLedgersToKeep, - boost::asio::yield_context& yield) const = 0; + doOnlineDelete(std::uint32_t numLedgersToKeep, boost::asio::yield_context& yield) const = 0; /** * @brief Opens the database @@ -645,10 +603,7 @@ private: * @param blob r-value vector of unsigned chars. */ virtual void - doWriteLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob) = 0; + doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0; virtual bool doFinishWrites() = 0; diff --git a/src/backend/CassandraBackend.cpp b/src/backend/CassandraBackend.cpp index 1325395b..fa12f82e 100644 --- a/src/backend/CassandraBackend.cpp +++ b/src/backend/CassandraBackend.cpp @@ -48,22 +48,15 @@ processAsyncWriteResponse(T& requestParams, CassFuture* fut, F func) if (rc != CASS_OK) { // exponential backoff with a max wait of 2^10 ms (about 1 second) - auto wait = std::chrono::milliseconds( - lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); - log.error() << "ERROR!!! Cassandra write error: " << rc << ", " - << cass_error_desc(rc) - << " id= " << requestParams.toString() - << ", current retries " << requestParams.currentRetries + auto wait = std::chrono::milliseconds(lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); + log.error() << "ERROR!!! Cassandra write error: " << rc << ", " << cass_error_desc(rc) + << " id= " << requestParams.toString() << ", current retries " << requestParams.currentRetries << ", retrying in " << wait.count() << " milliseconds"; ++requestParams.currentRetries; - std::shared_ptr timer = - std::make_shared( - backend.getIOContext(), - std::chrono::steady_clock::now() + wait); - timer->async_wait([timer, &requestParams, func]( - const boost::system::error_code& error) { - func(requestParams, true); - }); + std::shared_ptr timer = std::make_shared( + backend.getIOContext(), std::chrono::steady_clock::now() + wait); + timer->async_wait( + [timer, &requestParams, func](const boost::system::error_code& error) { func(requestParams, true); }); } else { @@ -91,21 +84,13 @@ struct WriteCallbackData std::atomic refs = 1; std::string id; - WriteCallbackData( - CassandraBackend const* b, - T&& d, - B bind, - std::string const& identifier) + WriteCallbackData(CassandraBackend const* b, T&& d, B bind, std::string const& identifier) : backend(b), data(std::move(d)), id(identifier) { retry = [bind, this](auto& params, bool isRetry) { auto statement = bind(params); backend->executeAsyncWrite( - statement, - processAsyncWrite< - typename std::remove_reference::type>, - params, - isRetry); + statement, processAsyncWrite::type>, params, isRetry); }; } virtual void @@ -146,10 +131,7 @@ struct BulkWriteCallbackData : public WriteCallbackData std::atomic_int& r, std::mutex& m, std::condition_variable& c) - : WriteCallbackData(b, std::move(d), bind, "bulk") - , numRemaining(r) - , mtx(m) - , cv(c) + : WriteCallbackData(b, std::move(d), bind, "bulk"), numRemaining(r), mtx(m), cv(c) { } void @@ -173,11 +155,7 @@ struct BulkWriteCallbackData : public WriteCallbackData template void -makeAndExecuteAsyncWrite( - CassandraBackend const* b, - T&& d, - B bind, - std::string const& id) +makeAndExecuteAsyncWrite(CassandraBackend const* b, T&& d, B bind, std::string const& id) { auto* cb = new WriteCallbackData(b, std::move(d), bind, id); cb->start(); @@ -193,17 +171,13 @@ makeAndExecuteBulkAsyncWrite( std::mutex& m, std::condition_variable& c) { - auto cb = std::make_shared>( - b, std::move(d), bind, r, m, c); + auto cb = std::make_shared>(b, std::move(d), bind, r, m, c); cb->start(); return cb; } void -CassandraBackend::doWriteLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob) +CassandraBackend::doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) { log_.trace() << "Writing ledger object to cassandra"; if (range) @@ -235,14 +209,10 @@ CassandraBackend::doWriteLedgerObject( } void -CassandraBackend::writeSuccessor( - std::string&& key, - std::uint32_t const seq, - std::string&& successor) +CassandraBackend::writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) { log_.trace() << "Writing successor. key = " << key.size() << " bytes. " - << " seq = " << std::to_string(seq) - << " successor = " << successor.size() << " bytes."; + << " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes."; assert(key.size() != 0); assert(successor.size() != 0); makeAndExecuteAsyncWrite( @@ -260,9 +230,7 @@ CassandraBackend::writeSuccessor( "successor"); } void -CassandraBackend::writeLedger( - ripple::LedgerInfo const& ledgerInfo, - std::string&& header) +CassandraBackend::writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) { makeAndExecuteAsyncWrite( this, @@ -290,8 +258,7 @@ CassandraBackend::writeLedger( } void -CassandraBackend::writeAccountTransactions( - std::vector&& data) +CassandraBackend::writeAccountTransactions(std::vector&& data) { for (auto& record : data) { @@ -299,11 +266,7 @@ CassandraBackend::writeAccountTransactions( { makeAndExecuteAsyncWrite( this, - std::make_tuple( - std::move(account), - record.ledgerSequence, - record.transactionIndex, - record.txHash), + std::make_tuple(std::move(account), record.ledgerSequence, record.transactionIndex, record.txHash), [this](auto& params) { CassandraStatement statement(insertAccountTx_); auto& [account, lgrSeq, txnIdx, hash] = params.data; @@ -324,11 +287,7 @@ CassandraBackend::writeNFTTransactions(std::vector&& data) { makeAndExecuteAsyncWrite( this, - std::make_tuple( - record.tokenID, - record.ledgerSequence, - record.transactionIndex, - record.txHash), + std::make_tuple(record.tokenID, record.ledgerSequence, record.transactionIndex, record.txHash), [this](auto const& params) { CassandraStatement statement(insertNFTTx_); auto const& [tokenID, lgrSeq, txnIdx, txHash] = params.data; @@ -364,12 +323,7 @@ CassandraBackend::writeTransaction( "ledger_transaction"); makeAndExecuteAsyncWrite( this, - std::make_tuple( - std::move(hash), - seq, - date, - std::move(transaction), - std::move(metadata)), + std::make_tuple(std::move(hash), seq, date, std::move(transaction), std::move(metadata)), [this](auto& params) { CassandraStatement statement{insertTransaction_}; auto& [hash, sequence, date, transaction, metadata] = params.data; @@ -390,11 +344,7 @@ CassandraBackend::writeNFTs(std::vector&& data) { makeAndExecuteAsyncWrite( this, - std::make_tuple( - record.tokenID, - record.ledgerSequence, - record.owner, - record.isBurned), + std::make_tuple(record.tokenID, record.ledgerSequence, record.owner, record.isBurned), [this](auto const& params) { CassandraStatement statement{insertNFT_}; auto const& [tokenID, lgrSeq, owner, isBurned] = params.data; @@ -420,8 +370,7 @@ CassandraBackend::writeNFTs(std::vector&& data) CassandraStatement statement{insertIssuerNFT_}; auto const& [tokenID] = params.data; statement.bindNextBytes(ripple::nft::getIssuer(tokenID)); - statement.bindNextInt( - ripple::nft::toUInt32(ripple::nft::getTaxon(tokenID))); + statement.bindNextInt(ripple::nft::toUInt32(ripple::nft::getTaxon(tokenID))); statement.bindNextBytes(tokenID); return statement; }, @@ -429,8 +378,7 @@ CassandraBackend::writeNFTs(std::vector&& data) makeAndExecuteAsyncWrite( this, - std::make_tuple( - record.tokenID, record.ledgerSequence, record.uri.value()), + std::make_tuple(record.tokenID, record.ledgerSequence, record.uri.value()), [this](auto const& params) { CassandraStatement statement{insertNFTURI_}; auto const& [tokenID, lgrSeq, uri] = params.data; @@ -470,9 +418,8 @@ CassandraBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const } std::vector -CassandraBackend::fetchAllTransactionsInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const +CassandraBackend::fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const { auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield); return fetchTransactions(hashes, yield); @@ -517,26 +464,21 @@ struct ReadCallbackData void resume() { - boost::asio::post( - boost::asio::get_associated_executor(handler), - [handler = std::move(handler)]() mutable { - handler(boost::system::error_code{}); - }); + boost::asio::post(boost::asio::get_associated_executor(handler), [handler = std::move(handler)]() mutable { + handler(boost::system::error_code{}); + }); } }; void processAsyncRead(CassFuture* fut, void* cbData) { - ReadCallbackData& cb = - *static_cast*>(cbData); + ReadCallbackData& cb = *static_cast*>(cbData); cb.finish(fut); } std::vector -CassandraBackend::fetchTransactions( - std::vector const& hashes, - boost::asio::yield_context& yield) const +CassandraBackend::fetchTransactions(std::vector const& hashes, boost::asio::yield_context& yield) const { if (hashes.size() == 0) return {}; @@ -556,14 +498,10 @@ CassandraBackend::fetchTransactions( CassandraStatement statement{selectTransaction_}; statement.bindNextBytes(hashes[i]); - cbs.push_back(std::make_shared>( - numOutstanding, handler, [i, &results](auto& result) { + cbs.push_back( + std::make_shared>(numOutstanding, handler, [i, &results](auto& result) { if (result.hasResult()) - results[i] = { - result.getBytes(), - result.getBytes(), - result.getUInt32(), - result.getUInt32()}; + results[i] = {result.getBytes(), result.getBytes(), result.getUInt32(), result.getUInt32()}; })); executeAsyncRead(statement, processAsyncRead, *cbs[i]); @@ -580,9 +518,7 @@ CassandraBackend::fetchTransactions( throw DatabaseTimeout(); } - log_.debug() << "Fetched " << numHashes - << " transactions from Cassandra in " << timeDiff - << " milliseconds"; + log_.debug() << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff << " milliseconds"; return results; } @@ -608,12 +544,8 @@ CassandraBackend::fetchAllTransactionHashesInLedger( { hashes.push_back(result.getUInt256()); } while (result.nextRow()); - log_.debug() << "Fetched " << hashes.size() - << " transaction hashes from Cassandra in " - << std::chrono::duration_cast( - end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << hashes.size() << " transaction hashes from Cassandra in " + << std::chrono::duration_cast(end - start).count() << " milliseconds"; return hashes; } @@ -668,29 +600,23 @@ CassandraBackend::fetchNFTTransactions( if (!rng) return {{}, {}}; - CassandraStatement statement = forward - ? CassandraStatement(selectNFTTxForward_) - : CassandraStatement(selectNFTTx_); + CassandraStatement statement = forward ? CassandraStatement(selectNFTTxForward_) : CassandraStatement(selectNFTTx_); statement.bindNextBytes(tokenID); if (cursor) { - statement.bindNextIntTuple( - cursor->ledgerSequence, cursor->transactionIndex); - log_.debug() << "token_id = " << ripple::strHex(tokenID) - << " tuple = " << cursor->ledgerSequence + statement.bindNextIntTuple(cursor->ledgerSequence, cursor->transactionIndex); + log_.debug() << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence << cursor->transactionIndex; } else { int const seq = forward ? rng->minSequence : rng->maxSequence; - int const placeHolder = - forward ? 0 : std::numeric_limits::max(); + int const placeHolder = forward ? 0 : std::numeric_limits::max(); statement.bindNextIntTuple(placeHolder, placeHolder); - log_.debug() << "token_id = " << ripple::strHex(tokenID) - << " idx = " << seq << " tuple = " << placeHolder; + log_.debug() << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq << " tuple = " << placeHolder; } statement.bindNextUInt(limit); @@ -713,9 +639,7 @@ CassandraBackend::fetchNFTTransactions( { log_.debug() << "Setting cursor"; auto const [lgrSeq, txnIdx] = result.getInt64Tuple(); - cursor = { - static_cast(lgrSeq), - static_cast(txnIdx)}; + cursor = {static_cast(lgrSeq), static_cast(txnIdx)}; // Only modify if forward because forward query // (selectNFTTxForward_) orders by ledger/tx sequence >= whereas @@ -760,21 +684,17 @@ CassandraBackend::fetchAccountTransactions( statement.bindNextBytes(account); if (cursor) { - statement.bindNextIntTuple( - cursor->ledgerSequence, cursor->transactionIndex); - log_.debug() << "account = " << ripple::strHex(account) - << " tuple = " << cursor->ledgerSequence + statement.bindNextIntTuple(cursor->ledgerSequence, cursor->transactionIndex); + log_.debug() << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence << cursor->transactionIndex; } else { int const seq = forward ? rng->minSequence : rng->maxSequence; - int const placeHolder = - forward ? 0 : std::numeric_limits::max(); + int const placeHolder = forward ? 0 : std::numeric_limits::max(); statement.bindNextIntTuple(placeHolder, placeHolder); - log_.debug() << "account = " << ripple::strHex(account) - << " idx = " << seq << " tuple = " << placeHolder; + log_.debug() << "account = " << ripple::strHex(account) << " idx = " << seq << " tuple = " << placeHolder; } statement.bindNextUInt(limit); @@ -796,9 +716,7 @@ CassandraBackend::fetchAccountTransactions( { log_.debug() << "Setting cursor"; auto [lgrSeq, txnIdx] = result.getInt64Tuple(); - cursor = { - static_cast(lgrSeq), - static_cast(txnIdx)}; + cursor = {static_cast(lgrSeq), static_cast(txnIdx)}; // Only modify if forward because forward query // (selectAccountTxForward_) orders by ledger/tx sequence >= whereas @@ -890,8 +808,8 @@ CassandraBackend::doFetchLedgerObjects( cbs.reserve(numKeys); for (std::size_t i = 0; i < keys.size(); ++i) { - cbs.push_back(std::make_shared>( - numOutstanding, handler, [i, &results](auto& result) { + cbs.push_back( + std::make_shared>(numOutstanding, handler, [i, &results](auto& result) { if (result.hasResult()) results[i] = result.getBytes(); })); @@ -917,9 +835,7 @@ CassandraBackend::doFetchLedgerObjects( } std::vector -CassandraBackend::fetchLedgerDiff( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const +CassandraBackend::fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const { CassandraStatement statement{selectDiff_}; statement.bindNextInt(ledgerSequence); @@ -939,29 +855,19 @@ CassandraBackend::fetchLedgerDiff( { keys.push_back(result.getUInt256()); } while (result.nextRow()); - log_.debug() << "Fetched " << keys.size() - << " diff hashes from Cassandra in " - << std::chrono::duration_cast( - end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << keys.size() << " diff hashes from Cassandra in " + << std::chrono::duration_cast(end - start).count() << " milliseconds"; auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); std::vector results; std::transform( - keys.begin(), - keys.end(), - objs.begin(), - std::back_inserter(results), - [](auto const& k, auto const& o) { + keys.begin(), keys.end(), objs.begin(), std::back_inserter(results), [](auto const& k, auto const& o) { return LedgerObject{k, o}; }); return results; } bool -CassandraBackend::doOnlineDelete( - std::uint32_t const numLedgersToKeep, - boost::asio::yield_context& yield) const +CassandraBackend::doOnlineDelete(std::uint32_t const numLedgersToKeep, boost::asio::yield_context& yield) const { // calculate TTL // ledgers close roughly every 4 seconds. We double the TTL so that way @@ -994,17 +900,15 @@ CassandraBackend::doOnlineDelete( std::optional cursor; while (true) { - auto [objects, curCursor] = retryOnTimeout([&]() { - return fetchLedgerPage(cursor, minLedger, 256, false, yield); - }); + auto [objects, curCursor] = + retryOnTimeout([&]() { return fetchLedgerPage(cursor, minLedger, 256, false, yield); }); for (auto& obj : objects) { ++numOutstanding; cbs.push_back(makeAndExecuteBulkAsyncWrite( this, - std::make_tuple( - std::move(obj.key), minLedger, std::move(obj.blob)), + std::make_tuple(std::move(obj.key), minLedger, std::move(obj.blob)), bind, numOutstanding, mtx, @@ -1012,9 +916,7 @@ CassandraBackend::doOnlineDelete( std::unique_lock lck(mtx); log_.trace() << "Got the mutex"; - cv.wait(lck, [&numOutstanding, concurrentLimit]() { - return numOutstanding < concurrentLimit; - }); + cv.wait(lck, [&numOutstanding, concurrentLimit]() { return numOutstanding < concurrentLimit; }); } log_.debug() << "Fetched a page"; cursor = curCursor; @@ -1052,15 +954,13 @@ CassandraBackend::open(bool readOnly) if (!cluster) throw std::runtime_error("nodestore:: Failed to create CassCluster"); - std::string secureConnectBundle = - config_.valueOr("secure_connect_bundle", ""); + std::string secureConnectBundle = config_.valueOr("secure_connect_bundle", ""); if (!secureConnectBundle.empty()) { /* Setup driver to connect to the cloud using the secure connection * bundle */ - if (cass_cluster_set_cloud_secure_connection_bundle( - cluster, secureConnectBundle.c_str()) != CASS_OK) + if (cass_cluster_set_cloud_secure_connection_bundle(cluster, secureConnectBundle.c_str()) != CASS_OK) { log_.error() << "Unable to configure cloud using the " "secure connection bundle: " @@ -1074,15 +974,12 @@ CassandraBackend::open(bool readOnly) else { std::string contact_points = config_.valueOrThrow( - "contact_points", - "nodestore: Missing contact_points in Cassandra config"); - CassError rc = - cass_cluster_set_contact_points(cluster, contact_points.c_str()); + "contact_points", "nodestore: Missing contact_points in Cassandra config"); + CassError rc = cass_cluster_set_contact_points(cluster, contact_points.c_str()); if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting Cassandra contact_points: " - << contact_points << ", result: " << rc << ", " + ss << "nodestore: Error setting Cassandra contact_points: " << contact_points << ", result: " << rc << ", " << cass_error_desc(rc); throw std::runtime_error(ss.str()); @@ -1095,16 +992,15 @@ CassandraBackend::open(bool readOnly) if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting Cassandra port: " << *port - << ", result: " << rc << ", " << cass_error_desc(rc); + ss << "nodestore: Error setting Cassandra port: " << *port << ", result: " << rc << ", " + << cass_error_desc(rc); throw std::runtime_error(ss.str()); } } } cass_cluster_set_token_aware_routing(cluster, cass_true); - CassError rc = - cass_cluster_set_protocol_version(cluster, CASS_PROTOCOL_VERSION_V4); + CassError rc = cass_cluster_set_protocol_version(cluster, CASS_PROTOCOL_VERSION_V4); if (rc != CASS_OK) { std::stringstream ss; @@ -1119,40 +1015,32 @@ CassandraBackend::open(bool readOnly) { log_.debug() << "user = " << *username; auto password = config_.value("password"); - cass_cluster_set_credentials( - cluster, username->c_str(), password.c_str()); + cass_cluster_set_credentials(cluster, username->c_str(), password.c_str()); } - auto threads = - config_.valueOr("threads", std::thread::hardware_concurrency()); + auto threads = config_.valueOr("threads", std::thread::hardware_concurrency()); rc = cass_cluster_set_num_threads_io(cluster, threads); if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting Cassandra io threads to " << threads - << ", result: " << rc << ", " << cass_error_desc(rc); + ss << "nodestore: Error setting Cassandra io threads to " << threads << ", result: " << rc << ", " + << cass_error_desc(rc); throw std::runtime_error(ss.str()); } - maxWriteRequestsOutstanding = config_.valueOr( - "max_write_requests_outstanding", maxWriteRequestsOutstanding); - maxReadRequestsOutstanding = config_.valueOr( - "max_read_requests_outstanding", maxReadRequestsOutstanding); + maxWriteRequestsOutstanding = config_.valueOr("max_write_requests_outstanding", maxWriteRequestsOutstanding); + maxReadRequestsOutstanding = config_.valueOr("max_read_requests_outstanding", maxReadRequestsOutstanding); syncInterval_ = config_.valueOr("sync_interval", syncInterval_); - log_.info() << "Sync interval is " << syncInterval_ - << ". max write requests outstanding is " - << maxWriteRequestsOutstanding - << ". max read requests outstanding is " - << maxReadRequestsOutstanding; + log_.info() << "Sync interval is " << syncInterval_ << ". max write requests outstanding is " + << maxWriteRequestsOutstanding << ". max read requests outstanding is " << maxReadRequestsOutstanding; cass_cluster_set_request_timeout(cluster, 10000); rc = cass_cluster_set_queue_size_io( cluster, - maxWriteRequestsOutstanding + - maxReadRequestsOutstanding); // This number needs to scale w/ the - // number of request per sec + maxWriteRequestsOutstanding + maxReadRequestsOutstanding); // This number needs to scale w/ the + // number of request per sec if (rc != CASS_OK) { std::stringstream ss; @@ -1165,17 +1053,14 @@ CassandraBackend::open(bool readOnly) if (auto certfile = config_.maybeValue("certfile"); certfile) { - std::ifstream fileStream( - boost::filesystem::path(*certfile).string(), std::ios::in); + std::ifstream fileStream(boost::filesystem::path(*certfile).string(), std::ios::in); if (!fileStream) { std::stringstream ss; ss << "opening config file " << *certfile; throw std::system_error(errno, std::generic_category(), ss.str()); } - std::string cert( - std::istreambuf_iterator{fileStream}, - std::istreambuf_iterator{}); + std::string cert(std::istreambuf_iterator{fileStream}, std::istreambuf_iterator{}); if (fileStream.bad()) { std::stringstream ss; @@ -1189,8 +1074,7 @@ CassandraBackend::open(bool readOnly) if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting Cassandra ssl context: " << rc - << ", " << cass_error_desc(rc); + ss << "nodestore: Error setting Cassandra ssl context: " << rc << ", " << cass_error_desc(rc); throw std::runtime_error(ss.str()); } @@ -1226,8 +1110,8 @@ CassandraBackend::open(bool readOnly) if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY) { std::stringstream ss; - ss << "nodestore: Error executing simple statement: " << rc << ", " - << cass_error_desc(rc) << " - " << query; + ss << "nodestore: Error executing simple statement: " << rc << ", " << cass_error_desc(rc) << " - " + << query; log_.error() << ss.str(); return false; } @@ -1241,15 +1125,13 @@ CassandraBackend::open(bool readOnly) session_.reset(cass_session_new()); assert(session_); - fut = cass_session_connect_keyspace( - session_.get(), cluster, keyspace.c_str()); + fut = cass_session_connect_keyspace(session_.get(), cluster, keyspace.c_str()); rc = cass_future_error_code(fut); cass_future_free(fut); if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error connecting Cassandra session keyspace: " - << rc << ", " << cass_error_desc(rc) + ss << "nodestore: Error connecting Cassandra session keyspace: " << rc << ", " << cass_error_desc(rc) << ", trying to create it ourselves"; log_.error() << ss.str(); // if the keyspace doesn't exist, try to create it @@ -1260,8 +1142,7 @@ CassandraBackend::open(bool readOnly) if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error connecting Cassandra session at all: " - << rc << ", " << cass_error_desc(rc); + ss << "nodestore: Error connecting Cassandra session at all: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); } else @@ -1298,16 +1179,14 @@ CassandraBackend::open(bool readOnly) continue; query.str(""); - query - << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions" - << " ( hash blob PRIMARY KEY, ledger_sequence bigint, date bigint, " - "transaction blob, metadata blob)" - << " WITH default_time_to_live = " << std::to_string(ttl); + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "transactions" + << " ( hash blob PRIMARY KEY, ledger_sequence bigint, date bigint, " + "transaction blob, metadata blob)" + << " WITH default_time_to_live = " << std::to_string(ttl); if (!executeSimpleStatement(query.str())) continue; query.str(""); - query << "CREATE TABLE IF NOT EXISTS " << tablePrefix - << "ledger_transactions" + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "ledger_transactions" << " ( ledger_sequence bigint, hash blob, PRIMARY " "KEY(ledger_sequence, hash))" << " WITH default_time_to_live = " << std::to_string(ttl); @@ -1429,8 +1308,7 @@ CassandraBackend::open(bool readOnly) continue; query.str(""); - query << "CREATE TABLE IF NOT EXISTS " << tablePrefix - << "issuer_nf_tokens_v2" + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "issuer_nf_tokens_v2" << " (" << " issuer blob," << " taxon bigint," @@ -1466,8 +1344,7 @@ CassandraBackend::open(bool readOnly) continue; query.str(""); - query << "CREATE TABLE IF NOT EXISTS " << tablePrefix - << "nf_token_transactions" + query << "CREATE TABLE IF NOT EXISTS " << tablePrefix << "nf_token_transactions" << " (" << " token_id blob," << " seq_idx tuple," @@ -1546,8 +1423,7 @@ CassandraBackend::open(bool readOnly) continue; query.str(""); - query << "SELECT transaction, metadata, ledger_sequence, date FROM " - << tablePrefix << "transactions" + query << "SELECT transaction, metadata, ledger_sequence, date FROM " << tablePrefix << "transactions" << " WHERE hash = ?"; if (!selectTransaction_.prepareStatement(query, session_.get())) continue; @@ -1555,8 +1431,7 @@ CassandraBackend::open(bool readOnly) query.str(""); query << "SELECT hash FROM " << tablePrefix << "ledger_transactions" << " WHERE ledger_sequence = ?"; - if (!selectAllTransactionHashesInLedger_.prepareStatement( - query, session_.get())) + if (!selectAllTransactionHashesInLedger_.prepareStatement(query, session_.get())) continue; query.str(""); @@ -1701,14 +1576,12 @@ CassandraBackend::open(bool readOnly) continue; query.str(""); - query << " select header from " << tablePrefix - << "ledgers where sequence = ?"; + query << " select header from " << tablePrefix << "ledgers where sequence = ?"; if (!selectLedgerBySeq_.prepareStatement(query, session_.get())) continue; query.str(""); - query << " select sequence from " << tablePrefix - << "ledger_range where is_latest = true"; + query << " select sequence from " << tablePrefix << "ledger_range where is_latest = true"; if (!selectLatestLedger_.prepareStatement(query, session_.get())) continue; diff --git a/src/backend/CassandraBackend.h b/src/backend/CassandraBackend.h index ed44c4f4..856068a5 100644 --- a/src/backend/CassandraBackend.h +++ b/src/backend/CassandraBackend.h @@ -85,8 +85,8 @@ public: else { std::stringstream ss; - ss << "nodestore: Error preparing statement : " << rc << ", " - << cass_error_desc(rc) << ". query : " << query; + ss << "nodestore: Error preparing statement : " << rc << ", " << cass_error_desc(rc) + << ". query : " << query; log_.error() << ss.str(); } cass_future_free(prepareFuture); @@ -137,15 +137,12 @@ public: bindNextBoolean(bool val) { if (!statement_) - throw std::runtime_error( - "CassandraStatement::bindNextBoolean - statement_ is null"); - CassError rc = cass_statement_bind_bool( - statement_, curBindingIndex_, static_cast(val)); + throw std::runtime_error("CassandraStatement::bindNextBoolean - statement_ is null"); + CassError rc = cass_statement_bind_bool(statement_, curBindingIndex_, static_cast(val)); if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding boolean to statement: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding boolean to statement: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -190,18 +187,13 @@ public: bindNextBytes(const unsigned char* data, std::uint32_t const size) { if (!statement_) - throw std::runtime_error( - "CassandraStatement::bindNextBytes - statement_ is null"); - CassError rc = cass_statement_bind_bytes( - statement_, - curBindingIndex_, - static_cast(data), - size); + throw std::runtime_error("CassandraStatement::bindNextBytes - statement_ is null"); + CassError rc = + cass_statement_bind_bytes(statement_, curBindingIndex_, static_cast(data), size); if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding bytes to statement: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding bytes to statement: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -212,17 +204,13 @@ public: bindNextUInt(std::uint32_t const value) { if (!statement_) - throw std::runtime_error( - "CassandraStatement::bindNextUInt - statement_ is null"); - log_.trace() << std::to_string(curBindingIndex_) << " " - << std::to_string(value); - CassError rc = - cass_statement_bind_int32(statement_, curBindingIndex_, value); + throw std::runtime_error("CassandraStatement::bindNextUInt - statement_ is null"); + log_.trace() << std::to_string(curBindingIndex_) << " " << std::to_string(value); + CassError rc = cass_statement_bind_int32(statement_, curBindingIndex_, value); if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding uint to statement: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding uint to statement: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -239,15 +227,12 @@ public: bindNextInt(int64_t value) { if (!statement_) - throw std::runtime_error( - "CassandraStatement::bindNextInt - statement_ is null"); - CassError rc = - cass_statement_bind_int64(statement_, curBindingIndex_, value); + throw std::runtime_error("CassandraStatement::bindNextInt - statement_ is null"); + CassError rc = cass_statement_bind_int64(statement_, curBindingIndex_, value); if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding int to statement: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding int to statement: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -262,8 +247,7 @@ public: if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding int to tuple: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding int to tuple: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -271,8 +255,7 @@ public: if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding int to tuple: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding int to tuple: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -280,8 +263,7 @@ public: if (rc != CASS_OK) { std::stringstream ss; - ss << "Error binding tuple to statement: " << rc << ", " - << cass_error_desc(rc); + ss << "Error binding tuple to statement: " << rc << ", " << cass_error_desc(rc); log_.error() << ss.str(); throw std::runtime_error(ss.str()); } @@ -382,13 +364,11 @@ public: throw std::runtime_error("CassandraResult::getBytes - no result"); cass_byte_t const* buf; std::size_t bufSize; - CassError rc = cass_value_get_bytes( - cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); + CassError rc = cass_value_get_bytes(cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); if (rc != CASS_OK) { std::stringstream msg; - msg << "CassandraResult::getBytes - error getting value: " << rc - << ", " << cass_error_desc(rc); + msg << "CassandraResult::getBytes - error getting value: " << rc << ", " << cass_error_desc(rc); log_.error() << msg.str(); throw std::runtime_error(msg.str()); } @@ -403,13 +383,11 @@ public: throw std::runtime_error("CassandraResult::uint256 - no result"); cass_byte_t const* buf; std::size_t bufSize; - CassError rc = cass_value_get_bytes( - cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); + CassError rc = cass_value_get_bytes(cass_row_get_column(row_, curGetIndex_), &buf, &bufSize); if (rc != CASS_OK) { std::stringstream msg; - msg << "CassandraResult::getuint256 - error getting value: " << rc - << ", " << cass_error_desc(rc); + msg << "CassandraResult::getuint256 - error getting value: " << rc << ", " << cass_error_desc(rc); log_.error() << msg.str(); throw std::runtime_error(msg.str()); } @@ -423,13 +401,11 @@ public: if (!row_) throw std::runtime_error("CassandraResult::getInt64 - no result"); cass_int64_t val; - CassError rc = - cass_value_get_int64(cass_row_get_column(row_, curGetIndex_), &val); + CassError rc = cass_value_get_int64(cass_row_get_column(row_, curGetIndex_), &val); if (rc != CASS_OK) { std::stringstream msg; - msg << "CassandraResult::getInt64 - error getting value: " << rc - << ", " << cass_error_desc(rc); + msg << "CassandraResult::getInt64 - error getting value: " << rc << ", " << cass_error_desc(rc); log_.error() << msg.str(); throw std::runtime_error(msg.str()); } @@ -447,8 +423,7 @@ public: getInt64Tuple() { if (!row_) - throw std::runtime_error( - "CassandraResult::getInt64Tuple - no result"); + throw std::runtime_error("CassandraResult::getInt64Tuple - no result"); CassValue const* tuple = cass_row_get_column(row_, curGetIndex_); CassIterator* tupleIter = cass_iterator_from_tuple(tuple); @@ -456,8 +431,7 @@ public: if (!cass_iterator_next(tupleIter)) { cass_iterator_free(tupleIter); - throw std::runtime_error( - "CassandraResult::getInt64Tuple - failed to iterate tuple"); + throw std::runtime_error("CassandraResult::getInt64Tuple - failed to iterate tuple"); } CassValue const* value = cass_iterator_get_value(tupleIter); @@ -466,8 +440,7 @@ public: if (!cass_iterator_next(tupleIter)) { cass_iterator_free(tupleIter); - throw std::runtime_error( - "CassandraResult::getInt64Tuple - failed to iterate tuple"); + throw std::runtime_error("CassandraResult::getInt64Tuple - failed to iterate tuple"); } value = cass_iterator_get_value(tupleIter); @@ -486,20 +459,17 @@ public: std::size_t bufSize; if (!row_) - throw std::runtime_error( - "CassandraResult::getBytesTuple - no result"); + throw std::runtime_error("CassandraResult::getBytesTuple - no result"); CassValue const* tuple = cass_row_get_column(row_, curGetIndex_); CassIterator* tupleIter = cass_iterator_from_tuple(tuple); if (!cass_iterator_next(tupleIter)) - throw std::runtime_error( - "CassandraResult::getBytesTuple - failed to iterate tuple"); + throw std::runtime_error("CassandraResult::getBytesTuple - failed to iterate tuple"); CassValue const* value = cass_iterator_get_value(tupleIter); cass_value_get_bytes(value, &buf, &bufSize); Blob first{buf, buf + bufSize}; if (!cass_iterator_next(tupleIter)) - throw std::runtime_error( - "CassandraResult::getBytesTuple - failed to iterate tuple"); + throw std::runtime_error("CassandraResult::getBytesTuple - failed to iterate tuple"); value = cass_iterator_get_value(tupleIter); cass_value_get_bytes(value, &buf, &bufSize); Blob second{buf, buf + bufSize}; @@ -519,8 +489,7 @@ public: throw std::runtime_error(msg); } cass_bool_t val; - CassError rc = - cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val); + CassError rc = cass_value_get_bool(cass_row_get_column(row_, curGetIndex_), &val); if (rc != CASS_OK) { std::stringstream msg; @@ -544,10 +513,8 @@ public: inline bool isTimeout(CassError rc) { - if (rc == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or - rc == CASS_ERROR_LIB_REQUEST_TIMED_OUT or - rc == CASS_ERROR_SERVER_UNAVAILABLE or - rc == CASS_ERROR_SERVER_OVERLOADED or + if (rc == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or rc == CASS_ERROR_LIB_REQUEST_TIMED_OUT or + rc == CASS_ERROR_SERVER_UNAVAILABLE or rc == CASS_ERROR_SERVER_OVERLOADED or rc == CASS_ERROR_SERVER_READ_TIMEOUT) return true; return false; @@ -558,8 +525,7 @@ CassError cass_future_error_code(CassFuture* fut, CompletionToken&& token) { using function_type = void(boost::system::error_code, CassError); - using result_type = - boost::asio::async_result; + using result_type = boost::asio::async_result; using handler_type = typename result_type::completion_handler_type; handler_type handler(std::forward(token)); @@ -578,12 +544,10 @@ cass_future_error_code(CassFuture* fut, CompletionToken&& token) HandlerWrapper* hw = (HandlerWrapper*)data; boost::asio::post( - boost::asio::get_associated_executor(hw->handler), - [fut, hw, handler = std::move(hw->handler)]() mutable { + boost::asio::get_associated_executor(hw->handler), [fut, hw, handler = std::move(hw->handler)]() mutable { delete hw; - handler( - boost::system::error_code{}, cass_future_error_code(fut)); + handler(boost::system::error_code{}, cass_future_error_code(fut)); }); }; @@ -608,13 +572,12 @@ private: makeStatement(char const* query, std::size_t params) { CassStatement* ret = cass_statement_new(query, params); - CassError rc = - cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM); + CassError rc = cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM); if (rc != CASS_OK) { std::stringstream ss; - ss << "nodestore: Error setting query consistency: " << query - << ", result: " << rc << ", " << cass_error_desc(rc); + ss << "nodestore: Error setting query consistency: " << query << ", result: " << rc << ", " + << cass_error_desc(rc); throw std::runtime_error(ss.str()); } return ret; @@ -623,15 +586,13 @@ private: clio::Logger log_{"Backend"}; std::atomic open_{false}; - std::unique_ptr session_{ - nullptr, - [](CassSession* session) { - // Try to disconnect gracefully. - CassFuture* fut = cass_session_close(session); - cass_future_wait(fut); - cass_future_free(fut); - cass_session_free(session); - }}; + std::unique_ptr session_{nullptr, [](CassSession* session) { + // Try to disconnect gracefully. + CassFuture* fut = cass_session_close(session); + cass_future_wait(fut); + cass_future_free(fut); + cass_session_free(session); + }}; // Database statements cached server side. Using these is more efficient // than making a new statement @@ -704,10 +665,7 @@ private: mutable std::uint32_t ledgerSequence_ = 0; public: - CassandraBackend( - boost::asio::io_context& ioc, - clio::Config const& config, - uint32_t ttl) + CassandraBackend(boost::asio::io_context& ioc, clio::Config const& config, uint32_t ttl) : config_(config), ttl_(ttl) { work_.emplace(ioContext_); @@ -777,8 +735,7 @@ public: statement.bindNextInt(ledgerSequence_ - 1); if (!executeSyncUpdate(statement)) { - log_.warn() << "Update failed for ledger " - << std::to_string(ledgerSequence_) << ". Returning"; + log_.warn() << "Update failed for ledger " << std::to_string(ledgerSequence_) << ". Returning"; return false; } log_.info() << "Committed ledger " << std::to_string(ledgerSequence_); @@ -792,8 +749,7 @@ public: // if db is empty, sync. if sync interval is 1, always sync. // if we've never synced, sync. if its been greater than the configured // sync interval since we last synced, sync. - if (!range || lastSync_ == 0 || - ledgerSequence_ - syncInterval_ >= lastSync_) + if (!range || lastSync_ == 0 || ledgerSequence_ - syncInterval_ >= lastSync_) { // wait for all other writes to finish sync(); @@ -815,20 +771,16 @@ public: statement.bindNextInt(lastSync_); if (!executeSyncUpdate(statement)) { - log_.warn() << "Update failed for ledger " - << std::to_string(ledgerSequence_) << ". Returning"; + log_.warn() << "Update failed for ledger " << std::to_string(ledgerSequence_) << ". Returning"; return false; } - log_.info() << "Committed ledger " - << std::to_string(ledgerSequence_); + log_.info() << "Committed ledger " << std::to_string(ledgerSequence_); lastSync_ = ledgerSequence_; } else { - log_.info() << "Skipping commit. sync interval is " - << std::to_string(syncInterval_) << " - last sync is " - << std::to_string(lastSync_) << " - ledger sequence is " - << std::to_string(ledgerSequence_); + log_.info() << "Skipping commit. sync interval is " << std::to_string(syncInterval_) << " - last sync is " + << std::to_string(lastSync_) << " - ledger sequence is " << std::to_string(ledgerSequence_); } return true; } @@ -842,8 +794,7 @@ public: return doFinishWritesAsync(); } void - writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) - override; + writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) override; std::optional fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override @@ -853,17 +804,14 @@ public: CassandraResult result = executeAsyncRead(statement, yield); if (!result.hasResult()) { - log_.error() - << "CassandraBackend::fetchLatestLedgerSequence - no rows"; + log_.error() << "CassandraBackend::fetchLatestLedgerSequence - no rows"; return {}; } return result.getUInt32(); } std::optional - fetchLedgerBySequence( - std::uint32_t const sequence, - boost::asio::yield_context& yield) const override + fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const override { log_.trace() << "called"; CassandraStatement statement{selectLedgerBySeq_}; @@ -879,9 +827,7 @@ public: } std::optional - fetchLedgerByHash( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const override + fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override { CassandraStatement statement{selectLedgerByHash_}; @@ -904,20 +850,15 @@ public: hardFetchLedgerRange(boost::asio::yield_context& yield) const override; std::vector - fetchAllTransactionsInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override; + fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override; std::vector - fetchAllTransactionHashesInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override; + fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override; std::optional - fetchNFT( - ripple::uint256 const& tokenID, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override; + fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override; TransactionsAndCursor fetchNFTTransactions( @@ -930,15 +871,11 @@ public: // Synchronously fetch the object with key key, as of ledger with sequence // sequence std::optional - doFetchLedgerObject( - ripple::uint256 const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield) const override; + doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield) + const override; std::optional - fetchTransaction( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const override + fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override { log_.trace() << "called"; CassandraStatement statement{selectTransaction_}; @@ -950,23 +887,15 @@ public: log_.error() << "No rows"; return {}; } - return { - {result.getBytes(), - result.getBytes(), - result.getUInt32(), - result.getUInt32()}}; + return {{result.getBytes(), result.getBytes(), result.getUInt32(), result.getUInt32()}}; } std::optional - doFetchSuccessorKey( - ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override; + doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override; std::vector - fetchTransactions( - std::vector const& hashes, - boost::asio::yield_context& yield) const override; + fetchTransactions(std::vector const& hashes, boost::asio::yield_context& yield) const override; std::vector doFetchLedgerObjects( @@ -975,25 +904,16 @@ public: boost::asio::yield_context& yield) const override; std::vector - fetchLedgerDiff( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override; + fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override; void - doWriteLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob) override; + doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override; void - writeSuccessor( - std::string&& key, - std::uint32_t const seq, - std::string&& successor) override; + writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override; void - writeAccountTransactions( - std::vector&& data) override; + writeAccountTransactions(std::vector&& data) override; void writeNFTTransactions(std::vector&& data) override; @@ -1023,9 +943,7 @@ public: } bool - doOnlineDelete( - std::uint32_t const numLedgersToKeep, - boost::asio::yield_context& yield) const override; + doOnlineDelete(std::uint32_t const numLedgersToKeep, boost::asio::yield_context& yield) const override; bool isTooBusy() const override; @@ -1090,26 +1008,18 @@ public: template void - executeAsyncHelper( - CassandraStatement const& statement, - T callback, - S& callbackData) const + executeAsyncHelper(CassandraStatement const& statement, T callback, S& callbackData) const { CassFuture* fut = cass_session_execute(session_.get(), statement.get()); - cass_future_set_callback( - fut, callback, static_cast(&callbackData)); + cass_future_set_callback(fut, callback, static_cast(&callbackData)); cass_future_free(fut); } template void - executeAsyncWrite( - CassandraStatement const& statement, - T callback, - S& callbackData, - bool isRetry) const + executeAsyncWrite(CassandraStatement const& statement, T callback, S& callbackData, bool isRetry) const { if (!isRetry) incrementOutstandingRequestCount(); @@ -1118,10 +1028,7 @@ public: template void - executeAsyncRead( - CassandraStatement const& statement, - T callback, - S& callbackData) const + executeAsyncRead(CassandraStatement const& statement, T callback, S& callbackData) const { executeAsyncHelper(statement, callback, callbackData); } @@ -1184,8 +1091,7 @@ public: if (rc != CASS_OK) { cass_result_free(res); - log_.error() << "executeSyncUpdate - error getting result " << rc - << ", " << cass_error_desc(rc); + log_.error() << "executeSyncUpdate - error getting result " << rc << ", " << cass_error_desc(rc); return false; } cass_result_free(res); @@ -1206,13 +1112,10 @@ public: } CassandraResult - executeAsyncRead( - CassandraStatement const& statement, - boost::asio::yield_context& yield) const + executeAsyncRead(CassandraStatement const& statement, boost::asio::yield_context& yield) const { - using result = boost::asio::async_result< - boost::asio::yield_context, - void(boost::system::error_code, CassError)>; + using result = + boost::asio::async_result; CassFuture* fut; CassError rc; diff --git a/src/backend/CassandraBackendNew.h b/src/backend/CassandraBackendNew.h index feffb0e6..c4366290 100644 --- a/src/backend/CassandraBackendNew.h +++ b/src/backend/CassandraBackendNew.h @@ -42,9 +42,7 @@ namespace Backend::Cassandra { * Eventually we should change the interface so that it does not have to know * about yield_context. */ -template < - SomeSettingsProvider SettingsProviderType, - SomeExecutionStrategy ExecutionStrategy> +template class BasicCassandraBackend : public BackendInterface { clio::Logger log_{"Backend"}; @@ -71,11 +69,9 @@ public: , executor_{settingsProvider_.getSettings(), handle_} { if (auto const res = handle_.connect(); not res) - throw std::runtime_error( - "Could not connect to Cassandra: " + res.error()); + throw std::runtime_error("Could not connect to Cassandra: " + res.error()); if (auto const res = handle_.execute(schema_.createKeyspace); not res) - throw std::runtime_error( - "Could not create keyspace: " + res.error()); + throw std::runtime_error("Could not create keyspace: " + res.error()); if (auto const res = handle_.executeEach(schema_.createSchema); not res) throw std::runtime_error("Could not create schema: " + res.error()); @@ -118,19 +114,16 @@ public: if (cursor) { statement.bindAt(1, cursor->asTuple()); - log_.debug() << "account = " << ripple::strHex(account) - << " tuple = " << cursor->ledgerSequence + log_.debug() << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence << cursor->transactionIndex; } else { auto const seq = forward ? rng->minSequence : rng->maxSequence; - auto const placeHolder = - forward ? 0u : std::numeric_limits::max(); + auto const placeHolder = forward ? 0u : std::numeric_limits::max(); statement.bindAt(1, std::make_tuple(placeHolder, placeHolder)); - log_.debug() << "account = " << ripple::strHex(account) - << " idx = " << seq << " tuple = " << placeHolder; + log_.debug() << "account = " << ripple::strHex(account) << " idx = " << seq << " tuple = " << placeHolder; } // FIXME: Limit is a hack to support uint32_t properly for the time @@ -149,8 +142,7 @@ public: auto numRows = results.numRows(); log_.info() << "num_rows = " << numRows; - for (auto [hash, data] : - extract>(results)) + for (auto [hash, data] : extract>(results)) { hashes.push_back(hash); if (--numRows == 0) @@ -185,15 +177,10 @@ public: if (!range) { - executor_.writeSync( - schema_->updateLedgerRange, - ledgerSequence_, - false, - ledgerSequence_); + executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_); } - if (not executeSyncUpdate(schema_->updateLedgerRange.bind( - ledgerSequence_, true, ledgerSequence_ - 1))) + if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) { log_.warn() << "Update failed for ledger " << ledgerSequence_; return false; @@ -204,14 +191,11 @@ public: } void - writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) - override + writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& header) override { - executor_.write( - schema_->insertLedgerHeader, ledgerInfo.seq, std::move(header)); + executor_.write(schema_->insertLedgerHeader, ledgerInfo.seq, std::move(header)); - executor_.write( - schema_->insertLedgerHash, ledgerInfo.hash, ledgerInfo.seq); + executor_.write(schema_->insertLedgerHash, ledgerInfo.hash, ledgerInfo.seq); ledgerSequence_ = ledgerInfo.seq; } @@ -219,13 +203,11 @@ public: std::optional fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override { - if (auto const res = executor_.read(yield, schema_->selectLatestLedger); - res) + if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) { if (auto const& result = res.value(); result) { - if (auto const maybeValue = result.template get(); - maybeValue) + if (auto const maybeValue = result.template get(); maybeValue) return maybeValue; log_.error() << "Could not fetch latest ledger - no rows"; @@ -243,21 +225,16 @@ public: } std::optional - fetchLedgerBySequence( - std::uint32_t const sequence, - boost::asio::yield_context& yield) const override + fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call for seq " << sequence; - auto const res = - executor_.read(yield, schema_->selectLedgerBySeq, sequence); + auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence); if (res) { if (auto const& result = res.value(); result) { - if (auto const maybeValue = - result.template get>(); - maybeValue) + if (auto const maybeValue = result.template get>(); maybeValue) { return deserializeHeader(ripple::makeSlice(*maybeValue)); } @@ -270,28 +247,22 @@ public: } else { - log_.error() << "Could not fetch ledger by sequence: " - << res.error(); + log_.error() << "Could not fetch ledger by sequence: " << res.error(); } return std::nullopt; } std::optional - fetchLedgerByHash( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const override + fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; - if (auto const res = - executor_.read(yield, schema_->selectLedgerByHash, hash); - res) + if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) { if (auto const& result = res.value(); result) { - if (auto const maybeValue = result.template get(); - maybeValue) + if (auto const maybeValue = result.template get(); maybeValue) return fetchLedgerBySequence(*maybeValue, yield); log_.error() << "Could not fetch ledger by hash - no rows"; @@ -313,8 +284,7 @@ public: { log_.trace() << __func__ << " call"; - if (auto const res = executor_.read(yield, schema_->selectLedgerRange); - res) + if (auto const res = executor_.read(yield, schema_->selectLedgerRange); res) { auto const& results = res.value(); if (not results.hasRows()) @@ -341,8 +311,7 @@ public: if (range.minSequence > range.maxSequence) std::swap(range.minSequence, range.maxSequence); - log_.debug() << "After hardFetchLedgerRange range is " - << range.minSequence << ":" << range.maxSequence; + log_.debug() << "After hardFetchLedgerRange range is " << range.minSequence << ":" << range.maxSequence; return range; } else @@ -354,9 +323,7 @@ public: } std::vector - fetchAllTransactionsInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override + fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield); @@ -364,28 +331,24 @@ public: } std::vector - fetchAllTransactionHashesInLedger( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override + fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override { log_.trace() << __func__ << " call"; auto start = std::chrono::system_clock::now(); - auto const res = executor_.read( - yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence); + auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence); if (not res) { - log_.error() << "Could not fetch all transaction hashes: " - << res.error(); + log_.error() << "Could not fetch all transaction hashes: " << res.error(); return {}; } auto const& result = res.value(); if (not result.hasRows()) { - log_.error() - << "Could not fetch all transaction hashes - no rows; ledger = " - << std::to_string(ledgerSequence); + log_.error() << "Could not fetch all transaction hashes - no rows; ledger = " + << std::to_string(ledgerSequence); return {}; } @@ -394,36 +357,26 @@ public: hashes.push_back(std::move(hash)); auto end = std::chrono::system_clock::now(); - log_.debug() << "Fetched " << hashes.size() - << " transaction hashes from Cassandra in " - << std::chrono::duration_cast( - end - start) - .count() - << " milliseconds"; + log_.debug() << "Fetched " << hashes.size() << " transaction hashes from Cassandra in " + << std::chrono::duration_cast(end - start).count() << " milliseconds"; return hashes; } std::optional - fetchNFT( - ripple::uint256 const& tokenID, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override + fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override { log_.trace() << __func__ << " call"; - auto const res = - executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence); + auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence); if (not res) return std::nullopt; - if (auto const maybeRow = - res->template get(); - maybeRow) + if (auto const maybeRow = res->template get(); maybeRow) { auto [seq, owner, isBurned] = *maybeRow; - auto result = - std::make_optional(tokenID, seq, owner, isBurned); + auto result = std::make_optional(tokenID, seq, owner, isBurned); // now fetch URI. Usually we will have the URI even for burned NFTs, // but if the first ledger on this clio included NFTokenBurn @@ -436,12 +389,10 @@ public: // a URI because it was burned in the first ledger) to indicate that // even though we are returning a blank URI, the NFT might have had // one. - auto uriRes = executor_.read( - yield, schema_->selectNFTURI, tokenID, ledgerSequence); + auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence); if (uriRes) { - if (auto const maybeUri = uriRes->template get(); - maybeUri) + if (auto const maybeUri = uriRes->template get(); maybeUri) result->uri = *maybeUri; } @@ -477,19 +428,16 @@ public: if (cursor) { statement.bindAt(1, cursor->asTuple()); - log_.debug() << "token_id = " << ripple::strHex(tokenID) - << " tuple = " << cursor->ledgerSequence + log_.debug() << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence << cursor->transactionIndex; } else { auto const seq = forward ? rng->minSequence : rng->maxSequence; - auto const placeHolder = - forward ? 0 : std::numeric_limits::max(); + auto const placeHolder = forward ? 0 : std::numeric_limits::max(); statement.bindAt(1, std::make_tuple(placeHolder, placeHolder)); - log_.debug() << "token_id = " << ripple::strHex(tokenID) - << " idx = " << seq << " tuple = " << placeHolder; + log_.debug() << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq << " tuple = " << placeHolder; } statement.bindAt(2, Limit{limit}); @@ -506,8 +454,7 @@ public: auto numRows = results.numRows(); log_.info() << "num_rows = " << numRows; - for (auto [hash, data] : - extract>(results)) + for (auto [hash, data] : extract>(results)) { hashes.push_back(hash); if (--numRows == 0) @@ -535,16 +482,11 @@ public: } std::optional - doFetchLedgerObject( - ripple::uint256 const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield) const override + doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield) + const override { - log_.debug() << "Fetching ledger object for seq " << sequence - << ", key = " << ripple::to_string(key); - if (auto const res = - executor_.read(yield, schema_->selectObject, key, sequence); - res) + log_.debug() << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key); + if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) { if (auto const result = res->template get(); result) { @@ -565,23 +507,16 @@ public: } std::optional - fetchTransaction( - ripple::uint256 const& hash, - boost::asio::yield_context& yield) const override + fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; - if (auto const res = - executor_.read(yield, schema_->selectTransaction, hash); - res) + if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) { - if (auto const maybeValue = - res->template get(); - maybeValue) + if (auto const maybeValue = res->template get(); maybeValue) { auto [transaction, meta, seq, date] = *maybeValue; - return std::make_optional( - transaction, meta, seq, date); + return std::make_optional(transaction, meta, seq, date); } else { @@ -597,19 +532,14 @@ public: } std::optional - doFetchSuccessorKey( - ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override + doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) + const override { log_.trace() << __func__ << " call"; - if (auto const res = executor_.read( - yield, schema_->selectSuccessor, key, ledgerSequence); - res) + if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) { - if (auto const result = res->template get(); - result) + if (auto const result = res->template get(); result) { if (*result == lastKey) return std::nullopt; @@ -629,9 +559,7 @@ public: } std::vector - fetchTransactions( - std::vector const& hashes, - boost::asio::yield_context& yield) const override + fetchTransactions(std::vector const& hashes, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; @@ -645,17 +573,10 @@ public: std::vector statements; statements.reserve(numHashes); - auto const timeDiff = util::timed([this, - &yield, - &results, - &hashes, - &statements]() { + auto const timeDiff = util::timed([this, &yield, &results, &hashes, &statements]() { // TODO: seems like a job for "hash IN (list of hashes)" instead? std::transform( - std::cbegin(hashes), - std::cend(hashes), - std::back_inserter(statements), - [this](auto const& hash) { + std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) { return schema_->selectTransaction.bind(hash); }); @@ -665,9 +586,7 @@ public: std::cend(entries), std::back_inserter(results), [](auto const& res) -> TransactionAndMetadata { - if (auto const maybeRow = - res.template get(); - maybeRow) + if (auto const maybeRow = res.template get(); maybeRow) return *maybeRow; else return {}; @@ -675,9 +594,7 @@ public: }); assert(numHashes == results.size()); - log_.debug() << "Fetched " << numHashes - << " transactions from Cassandra in " << timeDiff - << " milliseconds"; + log_.debug() << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff << " milliseconds"; return results; } @@ -703,21 +620,14 @@ public: // TODO: seems like a job for "key IN (list of keys)" instead? std::transform( - std::cbegin(keys), - std::cend(keys), - std::back_inserter(statements), - [this, &sequence](auto const& key) { + std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }); auto const entries = executor_.readEach(yield, statements); std::transform( - std::cbegin(entries), - std::cend(entries), - std::back_inserter(results), - [](auto const& res) -> Blob { - if (auto const maybeValue = res.template get(); - maybeValue) + std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob { + if (auto const maybeValue = res.template get(); maybeValue) return *maybeValue; else return {}; @@ -728,47 +638,37 @@ public: } std::vector - fetchLedgerDiff( - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield) const override + fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; - auto const [keys, timeDiff] = util::timed( - [this, &ledgerSequence, &yield]() -> std::vector { - auto const res = - executor_.read(yield, schema_->selectDiff, ledgerSequence); - if (not res) - { - log_.error() - << "Could not fetch ledger diff: " << res.error() - << "; ledger = " << ledgerSequence; - return {}; - } + auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, &yield]() -> std::vector { + auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence); + if (not res) + { + log_.error() << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence; + return {}; + } - auto const& results = res.value(); - if (not results) - { - log_.error() - << "Could not fetch ledger diff - no rows; ledger = " - << ledgerSequence; - return {}; - } + auto const& results = res.value(); + if (not results) + { + log_.error() << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence; + return {}; + } - std::vector keys; - for (auto [key] : extract(results)) - keys.push_back(key); + std::vector keys; + for (auto [key] : extract(results)) + keys.push_back(key); - return keys; - }); + return keys; + }); // one of the above errors must have happened if (keys.empty()) return {}; - log_.debug() << "Fetched " << keys.size() - << " diff hashes from Cassandra in " << timeDiff - << " milliseconds"; + log_.debug() << "Fetched " << keys.size() << " diff hashes from Cassandra in " << timeDiff << " milliseconds"; auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield); std::vector results; @@ -787,43 +687,29 @@ public: } void - doWriteLedgerObject( - std::string&& key, - std::uint32_t const seq, - std::string&& blob) override + doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override { - log_.trace() << " Writing ledger object " << key.size() << ":" << seq - << " [" << blob.size() << " bytes]"; + log_.trace() << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]"; if (range) executor_.write(schema_->insertDiff, seq, key); - executor_.write( - schema_->insertObject, std::move(key), seq, std::move(blob)); + executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob)); } void - writeSuccessor( - std::string&& key, - std::uint32_t const seq, - std::string&& successor) override + writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override { log_.trace() << "Writing successor. key = " << key.size() << " bytes. " - << " seq = " << std::to_string(seq) - << " successor = " << successor.size() << " bytes."; + << " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes."; assert(key.size() != 0); assert(successor.size() != 0); - executor_.write( - schema_->insertSuccessor, - std::move(key), - seq, - std::move(successor)); + executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor)); } void - writeAccountTransactions( - std::vector&& data) override + writeAccountTransactions(std::vector&& data) override { std::vector statements; statements.reserve(data.size() * 10); // assume 10 transactions avg @@ -837,8 +723,7 @@ public: [this, &record](auto&& account) { return schema_->insertAccountTx.bind( std::move(account), - std::make_tuple( - record.ledgerSequence, record.transactionIndex), + std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash); }); } @@ -852,17 +737,10 @@ public: std::vector statements; statements.reserve(data.size()); - std::transform( - std::cbegin(data), - std::cend(data), - std::back_inserter(statements), - [this](auto const& record) { - return schema_->insertNFTTx.bind( - record.tokenID, - std::make_tuple( - record.ledgerSequence, record.transactionIndex), - record.txHash); - }); + std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) { + return schema_->insertNFTTx.bind( + record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash); + }); executor_.write(statements); } @@ -879,12 +757,7 @@ public: executor_.write(schema_->insertLedgerTransaction, seq, hash); executor_.write( - schema_->insertTransaction, - std::move(hash), - seq, - date, - std::move(transaction), - std::move(metadata)); + schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)); } void @@ -895,11 +768,8 @@ public: for (NFTsData const& record : data) { - statements.push_back(schema_->insertNFT.bind( - record.tokenID, - record.ledgerSequence, - record.owner, - record.isBurned)); + statements.push_back( + schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)); // If `uri` is set (and it can be set to an empty uri), we know this // is a net-new NFT. That is, this NFT has not been seen before by @@ -910,11 +780,10 @@ public: { statements.push_back(schema_->insertIssuerNFT.bind( ripple::nft::getIssuer(record.tokenID), - static_cast( - ripple::nft::getTaxon(record.tokenID)), + static_cast(ripple::nft::getTaxon(record.tokenID)), record.tokenID)); - statements.push_back(schema_->insertNFTURI.bind( - record.tokenID, record.ledgerSequence, record.uri.value())); + statements.push_back( + schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())); } } @@ -930,9 +799,7 @@ public: /*! Unused in this implementation */ bool - doOnlineDelete( - std::uint32_t const numLedgersToKeep, - boost::asio::yield_context& yield) const override + doOnlineDelete(std::uint32_t const numLedgersToKeep, boost::asio::yield_context& yield) const override { log_.trace() << __func__ << " call"; return true; @@ -958,8 +825,7 @@ private: if (not maybeSuccess.value()) { - log_.warn() - << "Update failed. Checking if DB state is what we expect"; + log_.warn() << "Update failed. Checking if DB state is what we expect"; // error may indicate that another writer wrote something. // in this case let's just compare the current state of things @@ -973,7 +839,6 @@ private: } }; -using CassandraBackend = - BasicCassandraBackend>; +using CassandraBackend = BasicCassandraBackend>; } // namespace Backend::Cassandra diff --git a/src/backend/DBHelpers.h b/src/backend/DBHelpers.h index 6dbcfb7e..d36c3712 100644 --- a/src/backend/DBHelpers.h +++ b/src/backend/DBHelpers.h @@ -39,10 +39,7 @@ struct AccountTransactionsData std::uint32_t transactionIndex; ripple::uint256 txHash; - AccountTransactionsData( - ripple::TxMeta& meta, - ripple::uint256 const& txHash, - beast::Journal& j) + AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j) : accounts(meta.getAffectedAccounts()) , ledgerSequence(meta.getLgrSeq()) , transactionIndex(meta.getIndex()) @@ -62,14 +59,8 @@ struct NFTTransactionsData std::uint32_t transactionIndex; ripple::uint256 txHash; - NFTTransactionsData( - ripple::uint256 const& tokenID, - ripple::TxMeta const& meta, - ripple::uint256 const& txHash) - : tokenID(tokenID) - , ledgerSequence(meta.getLgrSeq()) - , transactionIndex(meta.getIndex()) - , txHash(txHash) + NFTTransactionsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::uint256 const& txHash) + : tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), txHash(txHash) { } }; @@ -110,21 +101,13 @@ struct NFTsData ripple::AccountID const& owner, ripple::Blob const& uri, ripple::TxMeta const& meta) - : tokenID(tokenID) - , ledgerSequence(meta.getLgrSeq()) - , transactionIndex(meta.getIndex()) - , owner(owner) - , uri(uri) + : tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri) { } // This constructor is used when parsing an NFTokenBurn or // NFTokenAcceptOffer tx - NFTsData( - ripple::uint256 const& tokenID, - ripple::AccountID const& owner, - ripple::TxMeta const& meta, - bool isBurned) + NFTsData(ripple::uint256 const& tokenID, ripple::AccountID const& owner, ripple::TxMeta const& meta, bool isBurned) : tokenID(tokenID) , ledgerSequence(meta.getLgrSeq()) , transactionIndex(meta.getIndex()) @@ -144,10 +127,7 @@ struct NFTsData std::uint32_t const ledgerSequence, ripple::AccountID const& owner, ripple::Blob const& uri) - : tokenID(tokenID) - , ledgerSequence(ledgerSequence) - , owner(owner) - , uri(uri) + : tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri) { } }; @@ -188,8 +168,7 @@ isBookDir(T const& key, R const& object) if (!isDirNode(object)) return false; - ripple::STLedgerEntry const sle{ - ripple::SerialIter{object.data(), object.size()}, key}; + ripple::STLedgerEntry const sle{ripple::SerialIter{object.data(), object.size()}, key}; return !sle[~ripple::sfOwner].has_value(); } @@ -228,10 +207,8 @@ deserializeHeader(ripple::Slice data) info.parentHash = sit.get256(); info.txHash = sit.get256(); info.accountHash = sit.get256(); - info.parentCloseTime = - ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; - info.closeTime = - ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; + info.parentCloseTime = ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; + info.closeTime = ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}}; info.closeTimeResolution = ripple::NetClock::duration{sit.get8()}; info.closeFlags = sit.get8(); diff --git a/src/backend/SimpleCache.cpp b/src/backend/SimpleCache.cpp index 6284f460..6337a22b 100644 --- a/src/backend/SimpleCache.cpp +++ b/src/backend/SimpleCache.cpp @@ -28,10 +28,7 @@ SimpleCache::latestLedgerSequence() const } void -SimpleCache::update( - std::vector const& objs, - uint32_t seq, - bool isBackground) +SimpleCache::update(std::vector const& objs, uint32_t seq, bool isBackground) { if (disabled_) return; diff --git a/src/backend/SimpleCache.h b/src/backend/SimpleCache.h index 85d2ab88..699c9ba5 100644 --- a/src/backend/SimpleCache.h +++ b/src/backend/SimpleCache.h @@ -56,10 +56,7 @@ public: // Update the cache with new ledger objects // set isBackground to true when writing old data from a background thread void - update( - std::vector const& blobs, - uint32_t seq, - bool isBackground = false); + update(std::vector const& blobs, uint32_t seq, bool isBackground = false); std::optional get(ripple::uint256 const& key, uint32_t seq) const; diff --git a/src/backend/Types.h b/src/backend/Types.h index 8400b572..406846b6 100644 --- a/src/backend/Types.h +++ b/src/backend/Types.h @@ -65,15 +65,11 @@ struct TransactionAndMetadata Blob const& metadata, std::uint32_t ledgerSequence, std::uint32_t date) - : transaction{transaction} - , metadata{metadata} - , ledgerSequence{ledgerSequence} - , date{date} + : transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date} { } - TransactionAndMetadata( - std::tuple data) + TransactionAndMetadata(std::tuple data) : transaction{std::get<0>(data)} , metadata{std::get<1>(data)} , ledgerSequence{std::get<2>(data)} @@ -95,9 +91,7 @@ struct TransactionsCursor std::uint32_t transactionIndex; TransactionsCursor() = default; - TransactionsCursor( - std::uint32_t ledgerSequence, - std::uint32_t transactionIndex) + TransactionsCursor(std::uint32_t ledgerSequence, std::uint32_t transactionIndex) : ledgerSequence{ledgerSequence}, transactionIndex{transactionIndex} { } @@ -140,18 +134,11 @@ struct NFT ripple::AccountID const& owner, Blob const& uri, bool isBurned) - : tokenID{tokenID} - , ledgerSequence{ledgerSequence} - , owner{owner} - , uri{uri} - , isBurned{isBurned} + : tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned} { } - NFT(ripple::uint256 const& tokenID, - std::uint32_t ledgerSequence, - ripple::AccountID const& owner, - bool isBurned) + NFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, ripple::AccountID const& owner, bool isBurned) : NFT(tokenID, ledgerSequence, owner, {}, isBurned) { } @@ -162,8 +149,7 @@ struct NFT bool operator==(NFT const& other) const { - return tokenID == other.tokenID && - ledgerSequence == other.ledgerSequence; + return tokenID == other.tokenID && ledgerSequence == other.ledgerSequence; } }; @@ -172,10 +158,7 @@ struct LedgerRange std::uint32_t minSequence; std::uint32_t maxSequence; }; -constexpr ripple::uint256 firstKey{ - "0000000000000000000000000000000000000000000000000000000000000000"}; -constexpr ripple::uint256 lastKey{ - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"}; -constexpr ripple::uint256 hi192{ - "0000000000000000000000000000000000000000000000001111111111111111"}; +constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"}; +constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"}; +constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"}; } // namespace Backend diff --git a/src/backend/cassandra/Error.h b/src/backend/cassandra/Error.h index a9f13a14..98ea00a1 100644 --- a/src/backend/cassandra/Error.h +++ b/src/backend/cassandra/Error.h @@ -35,27 +35,20 @@ class CassandraError public: CassandraError() = default; // default constructible required by Expected - CassandraError(std::string message, uint32_t code) - : message_{message}, code_{code} + CassandraError(std::string message, uint32_t code) : message_{message}, code_{code} { } template friend std::string - operator+( - T const& lhs, - CassandraError const& - rhs) requires std::is_convertible_v + operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v { return lhs + rhs.message(); } template friend bool - operator==( - T const& lhs, - CassandraError const& - rhs) requires std::is_convertible_v + operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v { return lhs == rhs.message(); } @@ -89,10 +82,8 @@ public: bool isTimeout() const { - if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or - code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or - code_ == CASS_ERROR_SERVER_UNAVAILABLE or - code_ == CASS_ERROR_SERVER_OVERLOADED or + if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or + code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or code_ == CASS_ERROR_SERVER_READ_TIMEOUT) return true; return false; diff --git a/src/backend/cassandra/Handle.cpp b/src/backend/cassandra/Handle.cpp index b21e6b6f..8cf193eb 100644 --- a/src/backend/cassandra/Handle.cpp +++ b/src/backend/cassandra/Handle.cpp @@ -25,8 +25,7 @@ Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings} { } -Handle::Handle(std::string_view contactPoints) - : Handle{Settings::defaultSettings().withContactPoints(contactPoints)} +Handle::Handle(std::string_view contactPoints) : Handle{Settings::defaultSettings().withContactPoints(contactPoints)} { } @@ -75,9 +74,7 @@ Handle::FutureType Handle::asyncReconnect(std::string_view keyspace) const { if (auto rc = asyncDisconnect().await(); not rc) // sync - throw std::logic_error( - "Reconnect to keyspace '" + std::string{keyspace} + - "' failed: " + rc.error()); + throw std::logic_error("Reconnect to keyspace '" + std::string{keyspace} + "' failed: " + rc.error()); return asyncConnect(keyspace); } @@ -99,8 +96,7 @@ Handle::asyncExecuteEach(std::vector const& statements) const Handle::MaybeErrorType Handle::executeEach(std::vector const& statements) const { - for (auto futures = asyncExecuteEach(statements); - auto const& future : futures) + for (auto futures = asyncExecuteEach(statements); auto const& future : futures) { if (auto const rc = future.await(); not rc) return rc; @@ -116,12 +112,9 @@ Handle::asyncExecute(Statement const& statement) const } Handle::FutureWithCallbackType -Handle::asyncExecute( - Statement const& statement, - std::function&& cb) const +Handle::asyncExecute(Statement const& statement, std::function&& cb) const { - return Handle::FutureWithCallbackType{ - cass_session_execute(session_, statement), std::move(cb)}; + return Handle::FutureWithCallbackType{cass_session_execute(session_, statement), std::move(cb)}; } Handle::ResultOrErrorType @@ -143,12 +136,10 @@ Handle::execute(std::vector const& statements) const } Handle::FutureWithCallbackType -Handle::asyncExecute( - std::vector const& statements, - std::function&& cb) const +Handle::asyncExecute(std::vector const& statements, std::function&& cb) + const { - return Handle::FutureWithCallbackType{ - cass_session_execute_batch(session_, Batch{statements}), std::move(cb)}; + return Handle::FutureWithCallbackType{cass_session_execute_batch(session_, Batch{statements}), std::move(cb)}; } Handle::PreparedStatementType diff --git a/src/backend/cassandra/Handle.h b/src/backend/cassandra/Handle.h index 5fc5618b..2f7577cd 100644 --- a/src/backend/cassandra/Handle.h +++ b/src/backend/cassandra/Handle.h @@ -212,8 +212,7 @@ public: [[maybe_unused]] ResultOrErrorType execute(PreparedStatementType const& statement, Args&&... args) const { - return asyncExecute(statement, std::forward(args)...) - .get(); + return asyncExecute(statement, std::forward(args)...).get(); } /** @@ -231,9 +230,7 @@ public: * @return A future that holds onto the callback provided */ [[nodiscard]] FutureWithCallbackType - asyncExecute( - StatementType const& statement, - std::function&& cb) const; + asyncExecute(StatementType const& statement, std::function&& cb) const; /** * @brief Synchonous version of the above @@ -268,9 +265,7 @@ public: * @return A future that holds onto the callback provided */ [[nodiscard]] FutureWithCallbackType - asyncExecute( - std::vector const& statements, - std::function&& cb) const; + asyncExecute(std::vector const& statements, std::function&& cb) const; /** * @brief Prepare a statement diff --git a/src/backend/cassandra/Schema.h b/src/backend/cassandra/Schema.h index 340388b0..23322110 100644 --- a/src/backend/cassandra/Schema.h +++ b/src/backend/cassandra/Schema.h @@ -32,15 +32,9 @@ namespace Backend::Cassandra { template -[[nodiscard]] std::string inline qualifiedTableName( - SettingsProviderType const& provider, - std::string_view name) +[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name) { - return fmt::format( - "{}.{}{}", - provider.getKeyspace(), - provider.getTablePrefix().value_or(""), - name); + return fmt::format("{}.{}{}", provider.getKeyspace(), provider.getTablePrefix().value_or(""), name); } /** @@ -58,8 +52,7 @@ class Schema std::reference_wrapper settingsProvider_; public: - explicit Schema(SettingsProviderType const& settingsProvider) - : settingsProvider_{std::cref(settingsProvider)} + explicit Schema(SettingsProviderType const& settingsProvider) : settingsProvider_{std::cref(settingsProvider)} { } @@ -229,8 +222,7 @@ public: PRIMARY KEY (issuer, taxon, token_id) ) )", - qualifiedTableName( - settingsProvider_.get(), "issuer_nf_tokens_v2"))); + qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"))); statements.emplace_back(fmt::format( R"( @@ -259,8 +251,7 @@ public: WITH CLUSTERING ORDER BY (seq_idx DESC) AND default_time_to_live = {} )", - qualifiedTableName( - settingsProvider_.get(), "nf_token_transactions"), + qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"), settingsProvider_.get().getTtl())); return statements; @@ -275,9 +266,7 @@ public: std::reference_wrapper handle_; public: - Statements( - SettingsProviderType const& settingsProvider, - Handle const& handle) + Statements(SettingsProviderType const& settingsProvider, Handle const& handle) : settingsProvider_{settingsProvider}, handle_{std::cref(handle)} { } @@ -313,8 +302,7 @@ public: (ledger_sequence, hash) VALUES (?, ?) )", - qualifiedTableName( - settingsProvider_.get(), "ledger_transactions"))); + qualifiedTableName(settingsProvider_.get(), "ledger_transactions"))); }(); PreparedStatement insertSuccessor = [this]() { @@ -364,8 +352,7 @@ public: (issuer, taxon, token_id) VALUES (?, ?, ?) )", - qualifiedTableName( - settingsProvider_.get(), "issuer_nf_tokens_v2"))); + qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"))); }(); PreparedStatement insertNFTURI = [this]() { @@ -385,8 +372,7 @@ public: (token_id, seq_idx, hash) VALUES (?, ?, ?) )", - qualifiedTableName( - settingsProvider_.get(), "nf_token_transactions"))); + qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"))); }(); PreparedStatement insertLedgerHeader = [this]() { @@ -491,8 +477,7 @@ public: FROM {} WHERE ledger_sequence = ? )", - qualifiedTableName( - settingsProvider_.get(), "ledger_transactions"))); + qualifiedTableName(settingsProvider_.get(), "ledger_transactions"))); }(); PreparedStatement selectLedgerPageKeys = [this]() { @@ -595,8 +580,7 @@ public: ORDER BY seq_idx DESC LIMIT ? )", - qualifiedTableName( - settingsProvider_.get(), "nf_token_transactions"))); + qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"))); }(); PreparedStatement selectNFTTxForward = [this]() { @@ -609,8 +593,7 @@ public: ORDER BY seq_idx ASC LIMIT ? )", - qualifiedTableName( - settingsProvider_.get(), "nf_token_transactions"))); + qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"))); }(); PreparedStatement selectLedgerByHash = [this]() { diff --git a/src/backend/cassandra/SettingsProvider.cpp b/src/backend/cassandra/SettingsProvider.cpp index 68b05a0b..53cda430 100644 --- a/src/backend/cassandra/SettingsProvider.cpp +++ b/src/backend/cassandra/SettingsProvider.cpp @@ -31,9 +31,7 @@ namespace Backend::Cassandra { namespace detail { inline Settings::ContactPoints -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& value) +tag_invoke(boost::json::value_to_tag, boost::json::value const& value) { if (not value.is_object()) throw std::runtime_error( @@ -43,17 +41,14 @@ tag_invoke( clio::Config obj{value}; Settings::ContactPoints out; - out.contactPoints = obj.valueOrThrow( - "contact_points", "`contact_points` must be a string"); + out.contactPoints = obj.valueOrThrow("contact_points", "`contact_points` must be a string"); out.port = obj.maybeValue("port"); return out; } inline Settings::SecureConnectionBundle -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& value) +tag_invoke(boost::json::value_to_tag, boost::json::value const& value) { if (not value.is_string()) throw std::runtime_error("`secure_connect_bundle` must be a string"); @@ -80,28 +75,19 @@ SettingsProvider::getSettings() const std::optional SettingsProvider::parseOptionalCertificate() const { - if (auto const certPath = config_.maybeValue("certfile"); - certPath) + if (auto const certPath = config_.maybeValue("certfile"); certPath) { auto const path = std::filesystem::path(*certPath); std::ifstream fileStream(path.string(), std::ios::in); if (!fileStream) { - throw std::system_error( - errno, - std::generic_category(), - "Opening certificate " + path.string()); + throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string()); } - std::string contents( - std::istreambuf_iterator{fileStream}, - std::istreambuf_iterator{}); + std::string contents(std::istreambuf_iterator{fileStream}, std::istreambuf_iterator{}); if (fileStream.bad()) { - throw std::system_error( - errno, - std::generic_category(), - "Reading certificate " + path.string()); + throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string()); } return contents; @@ -114,24 +100,21 @@ Settings SettingsProvider::parseSettings() const { auto settings = Settings::defaultSettings(); - if (auto const bundle = - config_.maybeValue( - "secure_connect_bundle"); - bundle) + if (auto const bundle = config_.maybeValue("secure_connect_bundle"); bundle) { settings.connectionInfo = *bundle; } else { - settings.connectionInfo = config_.valueOrThrow( - "Missing contact_points in Cassandra config"); + settings.connectionInfo = + config_.valueOrThrow("Missing contact_points in Cassandra config"); } settings.threads = config_.valueOr("threads", settings.threads); - settings.maxWriteRequestsOutstanding = config_.valueOr( - "max_write_requests_outstanding", settings.maxWriteRequestsOutstanding); - settings.maxReadRequestsOutstanding = config_.valueOr( - "max_read_requests_outstanding", settings.maxReadRequestsOutstanding); + settings.maxWriteRequestsOutstanding = + config_.valueOr("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding); + settings.maxReadRequestsOutstanding = + config_.valueOr("max_read_requests_outstanding", settings.maxReadRequestsOutstanding); settings.certificate = parseOptionalCertificate(); settings.username = config_.maybeValue("username"); settings.password = config_.maybeValue("password"); diff --git a/src/backend/cassandra/impl/AsyncExecutor.h b/src/backend/cassandra/impl/AsyncExecutor.h index b11f0ac3..d04bb281 100644 --- a/src/backend/cassandra/impl/AsyncExecutor.h +++ b/src/backend/cassandra/impl/AsyncExecutor.h @@ -48,13 +48,10 @@ template < typename StatementType, typename HandleType = Handle, SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy> -class AsyncExecutor - : public std::enable_shared_from_this< - AsyncExecutor> +class AsyncExecutor : public std::enable_shared_from_this> { using FutureWithCallbackType = typename HandleType::FutureWithCallbackType; - using CallbackType = - std::function; + using CallbackType = std::function; clio::Logger log_{"Backend"}; @@ -71,37 +68,24 @@ public: * @brief Create a new instance of the AsyncExecutor and execute it. */ static void - run(boost::asio::io_context& ioc, - HandleType const& handle, - StatementType data, - CallbackType&& onComplete) + run(boost::asio::io_context& ioc, HandleType const& handle, StatementType data, CallbackType&& onComplete) { // this is a helper that allows us to use std::make_shared below - struct EnableMakeShared - : public AsyncExecutor + struct EnableMakeShared : public AsyncExecutor { - EnableMakeShared( - boost::asio::io_context& ioc, - StatementType&& data, - CallbackType&& onComplete) + EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete) : AsyncExecutor(ioc, std::move(data), std::move(onComplete)) { } }; - auto ptr = std::make_shared( - ioc, std::move(data), std::move(onComplete)); + auto ptr = std::make_shared(ioc, std::move(data), std::move(onComplete)); ptr->execute(handle); } private: - AsyncExecutor( - boost::asio::io_context& ioc, - StatementType&& data, - CallbackType&& onComplete) - : data_{std::move(data)} - , retryPolicy_{ioc} - , onComplete_{std::move(onComplete)} + AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete) + : data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)} { } @@ -119,8 +103,7 @@ private: else { if (retryPolicy_.shouldRetry(res.error())) - retryPolicy_.retry( - [self, &handle]() { self->execute(handle); }); + retryPolicy_.retry([self, &handle]() { self->execute(handle); }); else onComplete_(std::move(res)); // report error } diff --git a/src/backend/cassandra/impl/Batch.cpp b/src/backend/cassandra/impl/Batch.cpp index e9a918eb..751124fb 100644 --- a/src/backend/cassandra/impl/Batch.cpp +++ b/src/backend/cassandra/impl/Batch.cpp @@ -26,9 +26,7 @@ #include namespace { -static constexpr auto batchDeleter = [](CassBatch* ptr) { - cass_batch_free(ptr); -}; +static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); }; }; namespace Backend::Cassandra::detail { @@ -42,15 +40,13 @@ Batch::Batch(std::vector const& statements) for (auto const& statement : statements) if (auto const res = add(statement); not res) - throw std::runtime_error( - "Failed to add statement to batch: " + res.error()); + throw std::runtime_error("Failed to add statement to batch: " + res.error()); } MaybeError Batch::add(Statement const& statement) { - if (auto const rc = cass_batch_add_statement(*this, statement); - rc != CASS_OK) + if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK) { return Error{CassandraError{cass_error_desc(rc), rc}}; } diff --git a/src/backend/cassandra/impl/Cluster.cpp b/src/backend/cassandra/impl/Cluster.cpp index c1519771..d716b39a 100644 --- a/src/backend/cassandra/impl/Cluster.cpp +++ b/src/backend/cassandra/impl/Cluster.cpp @@ -26,9 +26,7 @@ #include namespace { -static constexpr auto clusterDeleter = [](CassCluster* ptr) { - cass_cluster_free(ptr); -}; +static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); }; template struct overloadSet : Ts... @@ -43,28 +41,21 @@ overloadSet(Ts...) -> overloadSet; namespace Backend::Cassandra::detail { -Cluster::Cluster(Settings const& settings) - : ManagedObject{cass_cluster_new(), clusterDeleter} +Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter} { using std::to_string; cass_cluster_set_token_aware_routing(*this, cass_true); - if (auto const rc = - cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); - rc != CASS_OK) + if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) { - throw std::runtime_error( - std::string{"Error setting cassandra protocol version to v4: "} + - cass_error_desc(rc)); + throw std::runtime_error(std::string{"Error setting cassandra protocol version to v4: "} + cass_error_desc(rc)); } - if (auto const rc = - cass_cluster_set_num_threads_io(*this, settings.threads); - rc != CASS_OK) + if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK) { throw std::runtime_error( - std::string{"Error setting cassandra io threads to "} + - to_string(settings.threads) + ": " + cass_error_desc(rc)); + std::string{"Error setting cassandra io threads to "} + to_string(settings.threads) + ": " + + cass_error_desc(rc)); } cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED); @@ -75,28 +66,21 @@ Cluster::Cluster(Settings const& settings) // cass_cluster_set_max_concurrent_requests_threshold(*this, 10000); // cass_cluster_set_queue_size_event(*this, 100000); // cass_cluster_set_queue_size_io(*this, 100000); - // cass_cluster_set_write_bytes_high_water_mark( - // *this, 16 * 1024 * 1024); // 16mb - // cass_cluster_set_write_bytes_low_water_mark( - // *this, 8 * 1024 * 1024); // half of allowance + // cass_cluster_set_write_bytes_high_water_mark(*this, 16 * 1024 * 1024); // 16mb + // cass_cluster_set_write_bytes_low_water_mark(*this, 8 * 1024 * 1024); // half of allowance // cass_cluster_set_pending_requests_high_water_mark(*this, 5000); // cass_cluster_set_pending_requests_low_water_mark(*this, 2500); // half // cass_cluster_set_max_requests_per_flush(*this, 1000); // cass_cluster_set_max_concurrent_creation(*this, 8); // cass_cluster_set_max_connections_per_host(*this, 6); // cass_cluster_set_core_connections_per_host(*this, 4); - // cass_cluster_set_constant_speculative_execution_policy(*this, 1000, - // 1024); + // cass_cluster_set_constant_speculative_execution_policy(*this, 1000, 1024); if (auto const rc = cass_cluster_set_queue_size_io( - *this, - settings.maxWriteRequestsOutstanding + - settings.maxReadRequestsOutstanding); + *this, settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding); rc != CASS_OK) { - throw std::runtime_error( - std::string{"Could not set queue size for IO per host: "} + - cass_error_desc(rc)); + throw std::runtime_error(std::string{"Could not set queue size for IO per host: "} + cass_error_desc(rc)); } setupConnection(settings); @@ -109,12 +93,8 @@ Cluster::setupConnection(Settings const& settings) { std::visit( overloadSet{ - [this](Settings::ContactPoints const& points) { - setupContactPoints(points); - }, - [this](Settings::SecureConnectionBundle const& bundle) { - setupSecureBundle(bundle); - }}, + [this](Settings::ContactPoints const& points) { setupContactPoints(points); }, + [this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }}, settings.connectionInfo); } @@ -122,17 +102,13 @@ void Cluster::setupContactPoints(Settings::ContactPoints const& points) { using std::to_string; - auto throwErrorIfNeeded = - [](CassError rc, std::string const label, std::string const value) { - if (rc != CASS_OK) - throw std::runtime_error( - "Cassandra: Error setting " + label + " [" + value + - "]: " + cass_error_desc(rc)); - }; + auto throwErrorIfNeeded = [](CassError rc, std::string const label, std::string const value) { + if (rc != CASS_OK) + throw std::runtime_error("Cassandra: Error setting " + label + " [" + value + "]: " + cass_error_desc(rc)); + }; { - auto const rc = - cass_cluster_set_contact_points(*this, points.contactPoints.data()); + auto const rc = cass_cluster_set_contact_points(*this, points.contactPoints.data()); throwErrorIfNeeded(rc, "contact_points", points.contactPoints); } @@ -146,12 +122,9 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points) void Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle) { - if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle( - *this, bundle.bundle.data()); - rc != CASS_OK) + if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK) { - throw std::runtime_error( - "Failed to connect using secure connection bundle" + bundle.bundle); + throw std::runtime_error("Failed to connect using secure connection bundle" + bundle.bundle); } } @@ -171,10 +144,7 @@ Cluster::setupCredentials(Settings const& settings) if (not settings.username || not settings.password) return; - cass_cluster_set_credentials( - *this, - settings.username.value().c_str(), - settings.password.value().c_str()); + cass_cluster_set_credentials(*this, settings.username.value().c_str(), settings.password.value().c_str()); } } // namespace Backend::Cassandra::detail diff --git a/src/backend/cassandra/impl/Cluster.h b/src/backend/cassandra/impl/Cluster.h index fb8bb5aa..7cba6f6d 100644 --- a/src/backend/cassandra/impl/Cluster.h +++ b/src/backend/cassandra/impl/Cluster.h @@ -46,12 +46,9 @@ struct Settings }; bool enableLog = false; - std::chrono::milliseconds connectionTimeout = - std::chrono::milliseconds{1000}; - std::chrono::milliseconds requestTimeout = - std::chrono::milliseconds{0}; // no timeout at all - std::variant connectionInfo = - ContactPoints{}; + std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{1000}; + std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all + std::variant connectionInfo = ContactPoints{}; uint32_t threads = std::thread::hardware_concurrency(); uint32_t maxWriteRequestsOutstanding = 10'000; uint32_t maxReadRequestsOutstanding = 100'000; diff --git a/src/backend/cassandra/impl/ExecutionStrategy.h b/src/backend/cassandra/impl/ExecutionStrategy.h index 8aec621d..69c38fc8 100644 --- a/src/backend/cassandra/impl/ExecutionStrategy.h +++ b/src/backend/cassandra/impl/ExecutionStrategy.h @@ -78,8 +78,7 @@ public: using CompletionTokenType = boost::asio::yield_context; using FunctionType = void(boost::system::error_code); - using AsyncResultType = - boost::asio::async_result; + using AsyncResultType = boost::asio::async_result; using HandlerType = typename AsyncResultType::completion_handler_type; DefaultExecutionStrategy(Settings settings, HandleType const& handle) @@ -89,10 +88,8 @@ public: , handle_{std::cref(handle)} , thread_{[this]() { ioc_.run(); }} { - log_.info() << "Max write requests outstanding is " - << maxWriteRequestsOutstanding_ - << "; Max read requests outstanding is " - << maxReadRequestsOutstanding_; + log_.info() << "Max write requests outstanding is " << maxWriteRequestsOutstanding_ + << "; Max read requests outstanding is " << maxReadRequestsOutstanding_; } ~DefaultExecutionStrategy() @@ -136,8 +133,7 @@ public: } else { - log_.warn() - << "Cassandra sync write error, retrying: " << res.error(); + log_.warn() << "Cassandra sync write error, retrying: " << res.error(); std::this_thread::sleep_for(std::chrono::milliseconds(5)); } } @@ -173,9 +169,7 @@ public: // Note: lifetime is controlled by std::shared_from_this internally AsyncExecutor::run( - ioc_, handle_.get(), std::move(statement), [this](auto const&) { - decrementOutstandingRequestCount(); - }); + ioc_, handle_.get(), std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); }); } /** @@ -193,9 +187,7 @@ public: // Note: lifetime is controlled by std::shared_from_this internally AsyncExecutor::run( - ioc_, handle_.get(), statements, [this](auto const&) { - decrementOutstandingRequestCount(); - }); + ioc_, handle_.get(), statements, [this](auto const&) { decrementOutstandingRequestCount(); }); } /** @@ -211,10 +203,7 @@ public: */ template [[maybe_unused]] ResultOrErrorType - read( - CompletionTokenType token, - PreparedStatementType const& preparedStatement, - Args&&... args) + read(CompletionTokenType token, PreparedStatementType const& preparedStatement, Args&&... args) { return read(token, preparedStatement.bind(std::forward(args)...)); } @@ -230,9 +219,7 @@ public: * @return ResultType or error wrapped in Expected */ [[maybe_unused]] ResultOrErrorType - read( - CompletionTokenType token, - std::vector const& statements) + read(CompletionTokenType token, std::vector const& statements) { auto handler = HandlerType{token}; auto result = AsyncResultType{handler}; @@ -242,14 +229,11 @@ public: { numReadRequestsOutstanding_ += statements.size(); - auto const future = handle_.get().asyncExecute( - statements, [handler](auto&&) mutable { - boost::asio::post( - boost::asio::get_associated_executor(handler), - [handler]() mutable { - handler(boost::system::error_code{}); - }); + auto const future = handle_.get().asyncExecute(statements, [handler](auto&&) mutable { + boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable { + handler(boost::system::error_code{}); }); + }); // suspend coroutine until completion handler is called result.get(); @@ -264,8 +248,7 @@ public: } else { - log_.error() - << "Failed batch read in coroutine: " << res.error(); + log_.error() << "Failed batch read in coroutine: " << res.error(); throwErrorIfNeeded(res.error()); } } @@ -292,14 +275,11 @@ public: { ++numReadRequestsOutstanding_; - auto const future = handle_.get().asyncExecute( - statement, [handler](auto const&) mutable { - boost::asio::post( - boost::asio::get_associated_executor(handler), - [handler]() mutable { - handler(boost::system::error_code{}); - }); + auto const future = handle_.get().asyncExecute(statement, [handler](auto const&) mutable { + boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable { + handler(boost::system::error_code{}); }); + }); // suspend coroutine until completion handler is called result.get(); @@ -332,9 +312,7 @@ public: * @return Vector of results */ std::vector - readEach( - CompletionTokenType token, - std::vector const& statements) + readEach(CompletionTokenType token, std::vector const& statements) { auto handler = HandlerType{token}; auto result = AsyncResultType{handler}; @@ -347,19 +325,16 @@ public: futures.reserve(numOutstanding); // used as the handler for each async statement individually - auto executionHandler = - [handler, &hadError, &numOutstanding](auto const& res) mutable { - if (not res) - hadError = true; + auto executionHandler = [handler, &hadError, &numOutstanding](auto const& res) mutable { + if (not res) + hadError = true; - // when all async operations complete unblock the result - if (--numOutstanding == 0) - boost::asio::post( - boost::asio::get_associated_executor(handler), - [handler]() mutable { - handler(boost::system::error_code{}); - }); - }; + // when all async operations complete unblock the result + if (--numOutstanding == 0) + boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable { + handler(boost::system::error_code{}); + }); + }; std::transform( std::cbegin(statements), @@ -407,8 +382,7 @@ private: { log_.trace() << "Max outstanding requests reached. " << "Waiting for other requests to finish"; - throttleCv_.wait( - lck, [this]() { return canAddWriteRequest(); }); + throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); }); } } ++numWriteRequestsOutstanding_; diff --git a/src/backend/cassandra/impl/Future.cpp b/src/backend/cassandra/impl/Future.cpp index 1ecdb792..3f227992 100644 --- a/src/backend/cassandra/impl/Future.cpp +++ b/src/backend/cassandra/impl/Future.cpp @@ -25,15 +25,12 @@ #include namespace { -static constexpr auto futureDeleter = [](CassFuture* ptr) { - cass_future_free(ptr); -}; +static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); }; } // namespace namespace Backend::Cassandra::detail { -/* implicit */ Future::Future(CassFuture* ptr) - : ManagedObject{ptr, futureDeleter} +/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter} { } @@ -62,9 +59,7 @@ Future::get() const char const* message; std::size_t len; cass_future_error_message(*this, &message, &len); - return std::make_pair( - label + ": " + std::string{message, len}, - cass_future_error_code(*this)); + return std::make_pair(label + ": " + std::string{message, len}, cass_future_error_code(*this)); }("future::get()"); return Error{CassandraError{errMsg, code}}; } @@ -85,9 +80,7 @@ invokeHelper(CassFuture* ptr, void* cbPtr) char const* message; std::size_t len; cass_future_error_message(ptr, &message, &len); - return std::make_pair( - label + ": " + std::string{message, len}, - cass_future_error_code(ptr)); + return std::make_pair(label + ": " + std::string{message, len}, cass_future_error_code(ptr)); }("invokeHelper"); (*cb)(Error{CassandraError{errMsg, code}}); } @@ -97,9 +90,7 @@ invokeHelper(CassFuture* ptr, void* cbPtr) } } -/* implicit */ FutureWithCallback::FutureWithCallback( - CassFuture* ptr, - fn_t&& cb) +/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, fn_t&& cb) : Future{ptr}, cb_{std::make_unique(std::move(cb))} { // Instead of passing `this` as the userdata void*, we pass the address of diff --git a/src/backend/cassandra/impl/ManagedObject.h b/src/backend/cassandra/impl/ManagedObject.h index 30ea39a9..565529d6 100644 --- a/src/backend/cassandra/impl/ManagedObject.h +++ b/src/backend/cassandra/impl/ManagedObject.h @@ -31,12 +31,10 @@ protected: public: template - ManagedObject(Managed* rawPtr, deleterCallable deleter) - : ptr_{rawPtr, deleter} + ManagedObject(Managed* rawPtr, deleterCallable deleter) : ptr_{rawPtr, deleter} { if (rawPtr == nullptr) - throw std::runtime_error( - "Could not create DB object - got nullptr"); + throw std::runtime_error("Could not create DB object - got nullptr"); } ManagedObject(ManagedObject&&) = default; diff --git a/src/backend/cassandra/impl/Result.cpp b/src/backend/cassandra/impl/Result.cpp index 78a72259..2596ceba 100644 --- a/src/backend/cassandra/impl/Result.cpp +++ b/src/backend/cassandra/impl/Result.cpp @@ -20,18 +20,13 @@ #include namespace { -static constexpr auto resultDeleter = [](CassResult const* ptr) { - cass_result_free(ptr); -}; -static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { - cass_iterator_free(ptr); -}; +static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); }; +static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); }; } // namespace namespace Backend::Cassandra::detail { -/* implicit */ Result::Result(CassResult const* ptr) - : ManagedObject{ptr, resultDeleter} +/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter} { } @@ -48,8 +43,7 @@ Result::hasRows() const } /* implicit */ ResultIterator::ResultIterator(CassIterator* ptr) - : ManagedObject{ptr, resultIteratorDeleter} - , hasMore_{cass_iterator_next(ptr)} + : ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)} { } diff --git a/src/backend/cassandra/impl/Result.h b/src/backend/cassandra/impl/Result.h index 67ae8aed..b870ac18 100644 --- a/src/backend/cassandra/impl/Result.h +++ b/src/backend/cassandra/impl/Result.h @@ -59,8 +59,7 @@ extractColumn(CassRow const* row, std::size_t idx) { cass_byte_t const* buf; std::size_t bufSize; - auto const rc = - cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); + auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); throwErrorIfNeeded(rc, "Extract ripple::uint256"); output = ripple::uint256::fromVoid(buf); } @@ -68,8 +67,7 @@ extractColumn(CassRow const* row, std::size_t idx) { cass_byte_t const* buf; std::size_t bufSize; - auto const rc = - cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); + auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); throwErrorIfNeeded(rc, "Extract ripple::AccountID"); output = ripple::AccountID::fromVoid(buf); } @@ -77,8 +75,7 @@ extractColumn(CassRow const* row, std::size_t idx) { cass_byte_t const* buf; std::size_t bufSize; - auto const rc = - cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); + auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize); throwErrorIfNeeded(rc, "Extract vector"); output = uchar_vector_t{buf, buf + bufSize}; } @@ -91,16 +88,14 @@ extractColumn(CassRow const* row, std::size_t idx) { char const* value; std::size_t len; - auto const rc = - cass_value_get_string(cass_row_get_column(row, idx), &value, &len); + auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len); throwErrorIfNeeded(rc, "Extract string"); output = std::string{value, len}; } else if constexpr (std::is_same_v) { cass_bool_t flag; - auto const rc = - cass_value_get_bool(cass_row_get_column(row, idx), &flag); + auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag); throwErrorIfNeeded(rc, "Extract bool"); output = flag ? true : false; } @@ -108,8 +103,7 @@ extractColumn(CassRow const* row, std::size_t idx) else if constexpr (std::is_convertible_v) { int64_t out; - auto const rc = - cass_value_get_int64(cass_row_get_column(row, idx), &out); + auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out); throwErrorIfNeeded(rc, "Extract int64"); output = static_cast(out); } @@ -144,8 +138,7 @@ struct Result : public ManagedObject std::size_t idx = 0; auto advanceId = [&idx]() { return idx++; }; - return std::make_optional>( - {extractColumn(row, advanceId())...}); + return std::make_optional>({extractColumn(row, advanceId())...}); } template @@ -207,8 +200,7 @@ public: using difference_type = std::size_t; // rows count using value_type = std::tuple; - /* implicit */ Iterator(ResultIterator iterator) - : iterator_{std::move(iterator)} + /* implicit */ Iterator(ResultIterator iterator) : iterator_{std::move(iterator)} { } diff --git a/src/backend/cassandra/impl/RetryPolicy.h b/src/backend/cassandra/impl/RetryPolicy.h index d90001fa..388ba17a 100644 --- a/src/backend/cassandra/impl/RetryPolicy.h +++ b/src/backend/cassandra/impl/RetryPolicy.h @@ -59,9 +59,8 @@ public: shouldRetry([[maybe_unused]] CassandraError err) { auto const delay = calculateDelay(attempt_); - log_.error() << "Cassandra write error: " << err << ", current retries " - << attempt_ << ", retrying in " << delay.count() - << " milliseconds"; + log_.error() << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in " + << delay.count() << " milliseconds"; return true; // keep retrying forever } @@ -76,11 +75,10 @@ public: retry(Fn&& fn) { timer_.expires_after(calculateDelay(attempt_++)); - timer_.async_wait( - [fn = std::move(fn)]([[maybe_unused]] const auto& err) { - // todo: deal with cancellation (thru err) - fn(); - }); + timer_.async_wait([fn = std::move(fn)]([[maybe_unused]] const auto& err) { + // todo: deal with cancellation (thru err) + fn(); + }); } /** @@ -89,8 +87,7 @@ public: std::chrono::milliseconds calculateDelay(uint32_t attempt) { - return std::chrono::milliseconds{ - lround(std::pow(2, std::min(10u, attempt)))}; + return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))}; } }; diff --git a/src/backend/cassandra/impl/Session.h b/src/backend/cassandra/impl/Session.h index c87503fc..9becb1ab 100644 --- a/src/backend/cassandra/impl/Session.h +++ b/src/backend/cassandra/impl/Session.h @@ -27,9 +27,7 @@ namespace Backend::Cassandra::detail { class Session : public ManagedObject { - static constexpr auto deleter = [](CassSession* ptr) { - cass_session_free(ptr); - }; + static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); }; public: Session() : ManagedObject{cass_session_new(), deleter} diff --git a/src/backend/cassandra/impl/SslContext.cpp b/src/backend/cassandra/impl/SslContext.cpp index 78f57d54..baadb619 100644 --- a/src/backend/cassandra/impl/SslContext.cpp +++ b/src/backend/cassandra/impl/SslContext.cpp @@ -25,16 +25,12 @@ static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); }; namespace Backend::Cassandra::detail { -SslContext::SslContext(std::string const& certificate) - : ManagedObject{cass_ssl_new(), contextDeleter} +SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter} { cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE); - if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); - rc != CASS_OK) + if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) { - throw std::runtime_error( - std::string{"Error setting Cassandra SSL Context: "} + - cass_error_desc(rc)); + throw std::runtime_error(std::string{"Error setting Cassandra SSL Context: "} + cass_error_desc(rc)); } } diff --git a/src/backend/cassandra/impl/Statement.h b/src/backend/cassandra/impl/Statement.h index b54a1450..1f8d2895 100644 --- a/src/backend/cassandra/impl/Statement.h +++ b/src/backend/cassandra/impl/Statement.h @@ -37,9 +37,7 @@ namespace Backend::Cassandra::detail { class Statement : public ManagedObject { - static constexpr auto deleter = [](CassStatement* ptr) { - cass_statement_free(ptr); - }; + static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); }; template static constexpr bool unsupported_v = false; @@ -53,9 +51,7 @@ public: */ template explicit Statement(std::string_view query, Args&&... args) - : ManagedObject{ - cass_statement_new(query.data(), sizeof...(args)), - deleter} + : ManagedObject{cass_statement_new(query.data(), sizeof...(args)), deleter} { cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM); cass_statement_set_is_idempotent(*this, cass_true); @@ -85,13 +81,11 @@ public: using std::to_string; auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) { if (rc != CASS_OK) - throw std::logic_error(fmt::format( - "[{}] at idx {}: {}", label, idx, cass_error_desc(rc))); + throw std::logic_error(fmt::format("[{}] at idx {}: {}", label, idx, cass_error_desc(rc))); }; auto bindBytes = [this, idx](auto const* data, size_t size) { - return cass_statement_bind_bytes( - *this, idx, static_cast(data), size); + return cass_statement_bind_bytes(*this, idx, static_cast(data), size); }; using decayed_t = std::decay_t; @@ -116,21 +110,17 @@ public: else if constexpr (std::is_convertible_v) { // reinterpret_cast is needed here :'( - auto const rc = bindBytes( - reinterpret_cast(value.data()), - value.size()); + auto const rc = bindBytes(reinterpret_cast(value.data()), value.size()); throwErrorIfNeeded(rc, "Bind string (as bytes)"); } else if constexpr (std::is_same_v) { - auto const rc = - cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)}); + auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)}); throwErrorIfNeeded(rc, "Bind tuple"); } else if constexpr (std::is_same_v) { - auto const rc = cass_statement_bind_bool( - *this, idx, value ? cass_true : cass_false); + auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false); throwErrorIfNeeded(rc, "Bind bool"); } else if constexpr (std::is_same_v) @@ -154,13 +144,10 @@ public: class PreparedStatement : public ManagedObject { - static constexpr auto deleter = [](CassPrepared const* ptr) { - cass_prepared_free(ptr); - }; + static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); }; public: - /* implicit */ PreparedStatement(CassPrepared const* ptr) - : ManagedObject{ptr, deleter} + /* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, deleter} { } diff --git a/src/backend/cassandra/impl/Tuple.cpp b/src/backend/cassandra/impl/Tuple.cpp index b19bb21b..990903b2 100644 --- a/src/backend/cassandra/impl/Tuple.cpp +++ b/src/backend/cassandra/impl/Tuple.cpp @@ -20,12 +20,8 @@ #include namespace { -static constexpr auto tupleDeleter = [](CassTuple* ptr) { - cass_tuple_free(ptr); -}; -static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { - cass_iterator_free(ptr); -}; +static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); }; +static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); }; } // namespace namespace Backend::Cassandra::detail { @@ -34,8 +30,7 @@ namespace Backend::Cassandra::detail { { } -/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) - : ManagedObject{ptr, tupleIteratorDeleter} +/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, tupleIteratorDeleter} { } diff --git a/src/backend/cassandra/impl/Tuple.h b/src/backend/cassandra/impl/Tuple.h index c36496dd..cf24bfe1 100644 --- a/src/backend/cassandra/impl/Tuple.h +++ b/src/backend/cassandra/impl/Tuple.h @@ -32,9 +32,7 @@ namespace Backend::Cassandra::detail { class Tuple : public ManagedObject { - static constexpr auto deleter = [](CassTuple* ptr) { - cass_tuple_free(ptr); - }; + static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); }; template static constexpr bool unsupported_v = false; @@ -44,12 +42,9 @@ public: template explicit Tuple(std::tuple&& value) - : ManagedObject{ - cass_tuple_new(std::tuple_size>{}), - deleter} + : ManagedObject{cass_tuple_new(std::tuple_size>{}), deleter} { - std::apply( - std::bind_front(&Tuple::bind, this), std::move(value)); + std::apply(std::bind_front(&Tuple::bind, this), std::move(value)); } template @@ -69,9 +64,7 @@ public: if (rc != CASS_OK) { auto const tag = '[' + std::string{label} + ']'; - throw std::logic_error( - tag + " at idx " + to_string(idx) + ": " + - cass_error_desc(rc)); + throw std::logic_error(tag + " at idx " + to_string(idx) + ": " + cass_error_desc(rc)); } }; @@ -79,8 +72,7 @@ public: if constexpr (std::is_same_v) { - auto const rc = - cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false); + auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false); throwErrorIfNeeded(rc, "Bind bool"); } // clio only uses bigint (int64_t) so we convert any incoming type @@ -124,8 +116,7 @@ private: Type output; if (not cass_iterator_next(*this)) - throw std::logic_error( - "Could not extract next value from tuple iterator"); + throw std::logic_error("Could not extract next value from tuple iterator"); auto throwErrorIfNeeded = [](CassError rc, std::string_view label) { if (rc != CASS_OK) @@ -141,8 +132,7 @@ private: if constexpr (std::is_convertible_v) { int64_t out; - auto const rc = - cass_value_get_int64(cass_iterator_get_value(*this), &out); + auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out); throwErrorIfNeeded(rc, "Extract int64 from tuple"); output = static_cast(out); } diff --git a/src/config/Config.cpp b/src/config/Config.cpp index e5d90cdf..0c7b8262 100644 --- a/src/config/Config.cpp +++ b/src/config/Config.cpp @@ -62,8 +62,7 @@ Config::lookup(key_type key) const if (not hasBrokenPath) { if (not cur.get().is_object()) - throw detail::StoreException( - "Not an object at '" + subkey + "'"); + throw detail::StoreException("Not an object at '" + subkey + "'"); if (not cur.get().as_object().contains(section)) hasBrokenPath = true; else @@ -91,11 +90,9 @@ Config::maybeArray(key_type key) const array_type out; out.reserve(arr.size()); - std::transform( - std::begin(arr), - std::end(arr), - std::back_inserter(out), - [](auto&& element) { return Config{std::move(element)}; }); + std::transform(std::begin(arr), std::end(arr), std::back_inserter(out), [](auto&& element) { + return Config{std::move(element)}; + }); return std::make_optional(std::move(out)); } } @@ -156,10 +153,7 @@ Config::array() const out.reserve(arr.size()); std::transform( - std::cbegin(arr), - std::cend(arr), - std::back_inserter(out), - [](auto const& element) { return Config{element}; }); + std::cbegin(arr), std::cend(arr), std::back_inserter(out), [](auto const& element) { return Config{element}; }); return out; } @@ -180,8 +174,7 @@ ConfigReader::open(std::filesystem::path path) } catch (std::exception const& e) { - LogService::error() << "Could not read configuration file from '" - << path.string() << "': " << e.what(); + LogService::error() << "Could not read configuration file from '" << path.string() << "': " << e.what(); } return Config{}; diff --git a/src/config/Config.h b/src/config/Config.h index 1793d372..6b6e8eac 100644 --- a/src/config/Config.h +++ b/src/config/Config.h @@ -43,9 +43,7 @@ class Config final public: using key_type = std::string; /*! The type of key used */ using array_type = std::vector; /*! The type of array used */ - using write_cursor_type = std::pair< - std::optional>, - key_type>; + using write_cursor_type = std::pair>, key_type>; /** * @brief Construct a new Config object. @@ -101,8 +99,7 @@ public: { auto maybe_element = lookup(key); if (maybe_element) - return std::make_optional( - checkedAs(key, *maybe_element)); + return std::make_optional(checkedAs(key, *maybe_element)); return std::nullopt; } @@ -367,9 +364,7 @@ private: if (not value.is_number()) has_error = true; } - else if constexpr ( - std::is_convertible_v || - std::is_convertible_v) + else if constexpr (std::is_convertible_v || std::is_convertible_v) { if (not value.is_int64() && not value.is_uint64()) has_error = true; @@ -377,9 +372,8 @@ private: if (has_error) throw std::runtime_error( - "Type for key '" + key + "' is '" + - std::string{to_string(value.kind())} + - "' in JSON but requested '" + detail::typeName() + "'"); + "Type for key '" + key + "' is '" + std::string{to_string(value.kind())} + "' in JSON but requested '" + + detail::typeName() + "'"); return value_to(value); } diff --git a/src/etl/ETLHelpers.h b/src/etl/ETLHelpers.h index 736cf9f2..1ce6a609 100644 --- a/src/etl/ETLHelpers.h +++ b/src/etl/ETLHelpers.h @@ -77,14 +77,10 @@ public: /// @return true if sequence was validated, false otherwise /// a return value of false means the datastructure has been stopped bool - waitUntilValidatedByNetwork( - uint32_t sequence, - std::optional maxWaitMs = {}) + waitUntilValidatedByNetwork(uint32_t sequence, std::optional maxWaitMs = {}) { std::unique_lock lck(m_); - auto pred = [sequence, this]() -> bool { - return (max_ && sequence <= *max_); - }; + auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); }; if (maxWaitMs) cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs)); else diff --git a/src/etl/ETLSource.cpp b/src/etl/ETLSource.cpp index af9aa872..7409d787 100644 --- a/src/etl/ETLSource.cpp +++ b/src/etl/ETLSource.cpp @@ -43,15 +43,12 @@ ForwardCache::freshen() { log_.trace() << "Freshening ForwardCache"; - auto numOutstanding = - std::make_shared(latestForwarded_.size()); + auto numOutstanding = std::make_shared(latestForwarded_.size()); for (auto const& cacheEntry : latestForwarded_) { boost::asio::spawn( - strand_, - [this, numOutstanding, command = cacheEntry.first]( - boost::asio::yield_context yield) { + strand_, [this, numOutstanding, command = cacheEntry.first](boost::asio::yield_context yield) { boost::json::object request = {{"command", command}}; auto resp = source_.requestFromRippled(request, {}, yield); @@ -78,12 +75,9 @@ std::optional ForwardCache::get(boost::json::object const& request) const { std::optional command = {}; - if (request.contains("command") && !request.contains("method") && - request.at("command").is_string()) + if (request.contains("command") && !request.contains("method") && request.at("command").is_string()) command = request.at("command").as_string().c_str(); - else if ( - request.contains("method") && !request.contains("command") && - request.at("method").is_string()) + else if (request.contains("method") && !request.contains("command") && request.at("method").is_string()) command = request.at("method").as_string().c_str(); if (!command) @@ -116,8 +110,7 @@ make_TimeoutOption() } else { - return boost::beast::websocket::stream_base::timeout::suggested( - boost::beast::role_type::client); + return boost::beast::websocket::stream_base::timeout::suggested(boost::beast::role_type::client); } } @@ -138,8 +131,7 @@ ETLSourceImpl::reconnect(boost::beast::error_code ec) // if we cannot connect to the transaction processing process if (ec.category() == boost::asio::error::get_ssl_category()) { - err = std::string(" (") + - boost::lexical_cast(ERR_GET_LIB(ec.value())) + "," + + err = std::string(" (") + boost::lexical_cast(ERR_GET_LIB(ec.value())) + "," + boost::lexical_cast(ERR_GET_REASON(ec.value())) + ") "; // ERR_PACK /* crypto/err/err.h */ char buf[128]; @@ -149,8 +141,7 @@ ETLSourceImpl::reconnect(boost::beast::error_code ec) std::cout << err << std::endl; } - if (ec != boost::asio::error::operation_aborted && - ec != boost::asio::error::connection_refused) + if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused) { log_.error() << "error code = " << ec << " - " << toString(); } @@ -184,30 +175,25 @@ PlainETLSource::close(bool startAgain) // an assertion fails. Using closing_ makes sure async_close is only // called once closing_ = true; - derived().ws().async_close( - boost::beast::websocket::close_code::normal, - [this, startAgain](auto ec) { - if (ec) - { - log_.error() - << " async_close : " - << "error code = " << ec << " - " << toString(); - } - closing_ = false; - if (startAgain) - { - ws_ = std::make_unique>( - boost::asio::make_strand(ioc_)); + derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) { + if (ec) + { + log_.error() << " async_close : " + << "error code = " << ec << " - " << toString(); + } + closing_ = false; + if (startAgain) + { + ws_ = std::make_unique>( + boost::asio::make_strand(ioc_)); - run(); - } - }); + run(); + } + }); } else if (startAgain) { - ws_ = std::make_unique< - boost::beast::websocket::stream>( + ws_ = std::make_unique>( boost::asio::make_strand(ioc_)); run(); @@ -229,31 +215,26 @@ SslETLSource::close(bool startAgain) // an assertion fails. Using closing_ makes sure async_close is only // called once closing_ = true; - derived().ws().async_close( - boost::beast::websocket::close_code::normal, - [this, startAgain](auto ec) { - if (ec) - { - log_.error() - << " async_close : " - << "error code = " << ec << " - " << toString(); - } - closing_ = false; - if (startAgain) - { - ws_ = std::make_unique>>( - boost::asio::make_strand(ioc_), *sslCtx_); + derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) { + if (ec) + { + log_.error() << " async_close : " + << "error code = " << ec << " - " << toString(); + } + closing_ = false; + if (startAgain) + { + ws_ = std::make_unique< + boost::beast::websocket::stream>>( + boost::asio::make_strand(ioc_), *sslCtx_); - run(); - } - }); + run(); + } + }); } else if (startAgain) { - ws_ = std::make_unique>>( + ws_ = std::make_unique>>( boost::asio::make_strand(ioc_), *sslCtx_); run(); @@ -263,9 +244,7 @@ SslETLSource::close(bool startAgain) template void -ETLSourceImpl::onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results) +ETLSourceImpl::onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results) { log_.trace() << "ec = " << ec << " - " << toString(); if (ec) @@ -275,12 +254,10 @@ ETLSourceImpl::onResolve( } else { - boost::beast::get_lowest_layer(derived().ws()) - .expires_after(std::chrono::seconds(30)); - boost::beast::get_lowest_layer(derived().ws()) - .async_connect(results, [this](auto ec, auto ep) { - derived().onConnect(ec, ep); - }); + boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30)); + boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) { + derived().onConnect(ec, ep); + }); } } @@ -307,21 +284,18 @@ PlainETLSource::onConnect( // Set a decorator to change the User-Agent of the handshake derived().ws().set_option( - boost::beast::websocket::stream_base::decorator( - [](boost::beast::websocket::request_type& req) { - req.set( - boost::beast::http::field::user_agent, "clio-client"); + boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) { + req.set(boost::beast::http::field::user_agent, "clio-client"); - req.set("X-User", "clio-client"); - })); + req.set("X-User", "clio-client"); + })); // Update the host_ string. This will provide the value of the // Host HTTP header during the WebSocket handshake. // See https://tools.ietf.org/html/rfc7230#section-5.4 auto host = ip_ + ':' + std::to_string(endpoint.port()); // Perform the websocket handshake - derived().ws().async_handshake( - host, "/", [this](auto ec) { onHandshake(ec); }); + derived().ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); }); } } @@ -348,13 +322,11 @@ SslETLSource::onConnect( // Set a decorator to change the User-Agent of the handshake derived().ws().set_option( - boost::beast::websocket::stream_base::decorator( - [](boost::beast::websocket::request_type& req) { - req.set( - boost::beast::http::field::user_agent, "clio-client"); + boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) { + req.set(boost::beast::http::field::user_agent, "clio-client"); - req.set("X-User", "clio-client"); - })); + req.set("X-User", "clio-client"); + })); // Update the host_ string. This will provide the value of the // Host HTTP header during the WebSocket handshake. @@ -362,8 +334,7 @@ SslETLSource::onConnect( auto host = ip_ + ':' + std::to_string(endpoint.port()); // Perform the websocket handshake ws().next_layer().async_handshake( - boost::asio::ssl::stream_base::client, - [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); }); + boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); }); } } @@ -390,8 +361,7 @@ void ETLSourceImpl::onHandshake(boost::beast::error_code ec) { log_.trace() << "ec = " << ec << " - " << toString(); - if (auto action = hooks_.onConnected(ec); - action == ETLSourceHooks::Action::STOP) + if (auto action = hooks_.onConnected(ec); action == ETLSourceHooks::Action::STOP) return; if (ec) @@ -402,35 +372,26 @@ ETLSourceImpl::onHandshake(boost::beast::error_code ec) else { boost::json::object jv{ - {"command", "subscribe"}, - {"streams", - {"ledger", "manifests", "validations", "transactions_proposed"}}}; + {"command", "subscribe"}, {"streams", {"ledger", "manifests", "validations", "transactions_proposed"}}}; std::string s = boost::json::serialize(jv); log_.trace() << "Sending subscribe stream message"; derived().ws().set_option( - boost::beast::websocket::stream_base::decorator( - [](boost::beast::websocket::request_type& req) { - req.set( - boost::beast::http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " clio-client"); + boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) { + req.set( + boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client"); - req.set("X-User", "coro-client"); - })); + req.set("X-User", "coro-client"); + })); // Send the message - derived().ws().async_write( - boost::asio::buffer(s), - [this](auto ec, size_t size) { onWrite(ec, size); }); + derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); }); } } template void -ETLSourceImpl::onWrite( - boost::beast::error_code ec, - size_t bytesWritten) +ETLSourceImpl::onWrite(boost::beast::error_code ec, size_t bytesWritten) { log_.trace() << "ec = " << ec << " - " << toString(); if (ec) @@ -440,8 +401,7 @@ ETLSourceImpl::onWrite( } else { - derived().ws().async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); + derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); } } @@ -462,8 +422,7 @@ ETLSourceImpl::onRead(boost::beast::error_code ec, size_t size) swap(readBuffer_, buffer); log_.trace() << "calling async_read - " << toString(); - derived().ws().async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); + derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); } } @@ -477,9 +436,7 @@ ETLSourceImpl::handleMessage() connected_ = true; try { - std::string msg{ - static_cast(readBuffer_.data().data()), - readBuffer_.size()}; + std::string msg{static_cast(readBuffer_.data().data()), readBuffer_.size()}; log_.trace() << msg; boost::json::value raw = boost::json::parse(msg); log_.trace() << "parsed"; @@ -495,32 +452,25 @@ ETLSourceImpl::handleMessage() } if (result.contains("validated_ledgers")) { - boost::json::string const& validatedLedgers = - result["validated_ledgers"].as_string(); + boost::json::string const& validatedLedgers = result["validated_ledgers"].as_string(); - setValidatedRange( - {validatedLedgers.c_str(), validatedLedgers.size()}); + setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()}); } log_.info() << "Received a message on ledger " - << " subscription stream. Message : " << response - << " - " << toString(); + << " subscription stream. Message : " << response << " - " << toString(); } - else if ( - response.contains("type") && response["type"] == "ledgerClosed") + else if (response.contains("type") && response["type"] == "ledgerClosed") { log_.info() << "Received a message on ledger " - << " subscription stream. Message : " << response - << " - " << toString(); + << " subscription stream. Message : " << response << " - " << toString(); if (response.contains("ledger_index")) { ledgerIndex = response["ledger_index"].as_int64(); } if (response.contains("validated_ledgers")) { - boost::json::string const& validatedLedgers = - response["validated_ledgers"].as_string(); - setValidatedRange( - {validatedLedgers.c_str(), validatedLedgers.size()}); + boost::json::string const& validatedLedgers = response["validated_ledgers"].as_string(); + setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()}); } } else @@ -532,15 +482,11 @@ ETLSourceImpl::handleMessage() forwardCache_.freshen(); subscriptions_->forwardProposedTransaction(response); } - else if ( - response.contains("type") && - response["type"] == "validationReceived") + else if (response.contains("type") && response["type"] == "validationReceived") { subscriptions_->forwardValidation(response); } - else if ( - response.contains("type") && - response["type"] == "manifestReceived") + else if (response.contains("type") && response["type"] == "manifestReceived") { subscriptions_->forwardManifest(response); } @@ -549,8 +495,7 @@ ETLSourceImpl::handleMessage() if (ledgerIndex != 0) { - log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " - << toString(); + log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " << toString(); networkValidatedLedgers_->push(ledgerIndex); } return true; @@ -578,10 +523,7 @@ class AsyncCallData std::string lastKey_; public: - AsyncCallData( - uint32_t seq, - ripple::uint256 const& marker, - std::optional const& nextMarker) + AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional const& nextMarker) { request_.mutable_ledger()->set_sequence(seq); if (marker.isNonZero()) @@ -595,11 +537,9 @@ public: unsigned char prefix = marker.data()[0]; - log_.debug() << "Setting up AsyncCallData. marker = " - << ripple::strHex(marker) + log_.debug() << "Setting up AsyncCallData. marker = " << ripple::strHex(marker) << " . prefix = " << ripple::strHex(std::string(1, prefix)) - << " . nextPrefix_ = " - << ripple::strHex(std::string(1, nextPrefix_)); + << " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_)); assert(nextPrefix_ > prefix || nextPrefix_ == 0x00); @@ -629,8 +569,7 @@ public: if (!status_.ok()) { log_.error() << "AsyncCallData status_ not ok: " - << " code = " << status_.error_code() - << " message = " << status_.error_message(); + << " code = " << status_.error_code() << " message = " << status_.error_message(); return CallStatus::ERRORED; } if (!next_->is_unlimited()) @@ -679,37 +618,26 @@ public: if (!cacheOnly) { if (lastKey_.size()) - backend.writeSuccessor( - std::move(lastKey_), - request_.ledger().sequence(), - std::string{obj.key()}); + backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()}); lastKey_ = obj.key(); - backend.writeNFTs(getNFTDataFromObj( - request_.ledger().sequence(), obj.key(), obj.data())); + backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data())); backend.writeLedgerObject( - std::move(*obj.mutable_key()), - request_.ledger().sequence(), - std::move(*obj.mutable_data())); + std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data())); } } - backend.cache().update( - cacheUpdates, request_.ledger().sequence(), cacheOnly); - log_.debug() << "Wrote " << numObjects - << " objects. Got more: " << (more ? "YES" : "NO"); + backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly); + log_.debug() << "Wrote " << numObjects << " objects. Got more: " << (more ? "YES" : "NO"); return more ? CallStatus::MORE : CallStatus::DONE; } void - call( - std::unique_ptr& stub, - grpc::CompletionQueue& cq) + call(std::unique_ptr& stub, grpc::CompletionQueue& cq) { context_ = std::make_unique(); - std::unique_ptr> - rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)); + std::unique_ptr> rpc( + stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)); rpc->StartCall(); @@ -734,10 +662,7 @@ public: template bool -ETLSourceImpl::loadInitialLedger( - uint32_t sequence, - uint32_t numMarkers, - bool cacheOnly) +ETLSourceImpl::loadInitialLedger(uint32_t sequence, uint32_t numMarkers, bool cacheOnly) { if (!stub_) return false; @@ -759,8 +684,7 @@ ETLSourceImpl::loadInitialLedger( calls.emplace_back(sequence, markers[i], nextMarker); } - log_.debug() << "Starting data download for ledger " << sequence - << ". Using source = " << toString(); + log_.debug() << "Starting data download for ledger " << sequence << ". Using source = " << toString(); for (auto& c : calls) c.call(stub_, cq); @@ -801,14 +725,12 @@ ETLSourceImpl::loadInitialLedger( } if (backend_->cache().size() > progress) { - log_.info() << "Downloaded " << backend_->cache().size() - << " records from rippled"; + log_.info() << "Downloaded " << backend_->cache().size() << " records from rippled"; progress += incr; } } } - log_.info() << "Finished loadInitialLedger. cache size = " - << backend_->cache().size(); + log_.info() << "Finished loadInitialLedger. cache size = " << backend_->cache().size(); size_t numWrites = 0; if (!abort) { @@ -818,27 +740,18 @@ ETLSourceImpl::loadInitialLedger( auto seconds = util::timed([&]() { for (auto& key : edgeKeys) { - log_.debug() - << "Writing edge key = " << ripple::strHex(key); - auto succ = backend_->cache().getSuccessor( - *ripple::uint256::fromVoidChecked(key), sequence); + log_.debug() << "Writing edge key = " << ripple::strHex(key); + auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence); if (succ) - backend_->writeSuccessor( - std::move(key), - sequence, - uint256ToString(succ->key)); + backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key)); } ripple::uint256 prev = Backend::firstKey; - while (auto cur = - backend_->cache().getSuccessor(prev, sequence)) + while (auto cur = backend_->cache().getSuccessor(prev, sequence)) { assert(cur); if (prev == Backend::firstKey) { - backend_->writeSuccessor( - uint256ToString(prev), - sequence, - uint256ToString(cur->key)); + backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key)); } if (isBookDir(cur->key, cur->blob)) @@ -847,40 +760,29 @@ ETLSourceImpl::loadInitialLedger( // make sure the base is not an actual object if (!backend_->cache().get(cur->key, sequence)) { - auto succ = - backend_->cache().getSuccessor(base, sequence); + auto succ = backend_->cache().getSuccessor(base, sequence); assert(succ); if (succ->key == cur->key) { - log_.debug() << "Writing book successor = " - << ripple::strHex(base) << " - " + log_.debug() << "Writing book successor = " << ripple::strHex(base) << " - " << ripple::strHex(cur->key); - backend_->writeSuccessor( - uint256ToString(base), - sequence, - uint256ToString(cur->key)); + backend_->writeSuccessor(uint256ToString(base), sequence, uint256ToString(cur->key)); } } ++numWrites; } prev = std::move(cur->key); if (numWrites % 100000 == 0 && numWrites != 0) - log_.info() - << "Wrote " << numWrites << " book successors"; + log_.info() << "Wrote " << numWrites << " book successors"; } - backend_->writeSuccessor( - uint256ToString(prev), - sequence, - uint256ToString(Backend::lastKey)); + backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(Backend::lastKey)); ++numWrites; }); - log_.info() - << "Looping through cache and submitting all writes took " - << seconds - << " seconds. numWrites = " << std::to_string(numWrites); + log_.info() << "Looping through cache and submitting all writes took " << seconds + << " seconds. numWrites = " << std::to_string(numWrites); } } return !abort; @@ -888,10 +790,7 @@ ETLSourceImpl::loadInitialLedger( template std::pair -ETLSourceImpl::fetchLedger( - uint32_t ledgerSequence, - bool getObjects, - bool getObjectNeighbors) +ETLSourceImpl::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors) { org::xrpl::rpc::v1::GetLedgerResponse response; if (!stub_) @@ -927,12 +826,7 @@ make_ETLSource( ETLLoadBalancer& balancer) { auto src = std::make_unique( - config, - ioContext, - backend, - subscriptions, - networkValidatedLedgers, - balancer); + config, ioContext, backend, subscriptions, networkValidatedLedgers, balancer); src->run(); @@ -953,8 +847,7 @@ ETLLoadBalancer::ETLLoadBalancer( for (auto const& entry : config.array("etl_sources")) { - std::unique_ptr source = make_ETLSource( - entry, ioContext, backend, subscriptions, nwvl, *this); + std::unique_ptr source = make_ETLSource(entry, ioContext, backend, subscriptions, nwvl, *this); sources_.push_back(std::move(source)); log_.info() << "Added etl source - " << sources_.back()->toString(); @@ -966,13 +859,11 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly) { execute( [this, &sequence, cacheOnly](auto& source) { - bool res = - source->loadInitialLedger(sequence, downloadRanges_, cacheOnly); + bool res = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly); if (!res) { log_.error() << "Failed to download initial ledger." - << " Sequence = " << sequence - << " source = " << source->toString(); + << " Sequence = " << sequence << " source = " << source->toString(); } return res; }, @@ -980,17 +871,12 @@ ETLLoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly) } std::optional -ETLLoadBalancer::fetchLedger( - uint32_t ledgerSequence, - bool getObjects, - bool getObjectNeighbors) +ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors) { org::xrpl::rpc::v1::GetLedgerResponse response; bool success = execute( - [&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_]( - auto& source) { - auto [status, data] = source->fetchLedger( - ledgerSequence, getObjects, getObjectNeighbors); + [&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) { + auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors); response = std::move(data); if (status.ok() && response.validated()) { @@ -1000,10 +886,8 @@ ETLLoadBalancer::fetchLedger( } else { - log.warn() << "Could not fetch ledger " << ledgerSequence - << ", Reply: " << response.DebugString() - << ", error_code: " << status.error_code() - << ", error_msg: " << status.error_message() + log.warn() << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString() + << ", error_code: " << status.error_code() << ", error_msg: " << status.error_message() << ", source = " << source->toString(); return false; } @@ -1026,8 +910,7 @@ ETLLoadBalancer::forwardToRippled( auto numAttempts = 0; while (numAttempts < sources_.size()) { - if (auto res = - sources_[sourceIdx]->forwardToRippled(request, clientIp, yield)) + if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, yield)) return res; sourceIdx = (sourceIdx + 1) % sources_.size(); @@ -1100,14 +983,10 @@ ETLSourceImpl::requestFromRippled( // resources. See "secure_gateway" in // // https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg - ws->set_option(websocket::stream_base::decorator( - [&clientIp](websocket::request_type& req) { - req.set( - http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-client-coro"); - req.set(http::field::forwarded, "for=" + clientIp); - })); + ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) { + req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro"); + req.set(http::field::forwarded, "for=" + clientIp); + })); log_.trace() << "client ip: " << clientIp; log_.trace() << "Performing websocket handshake"; @@ -1118,8 +997,7 @@ ETLSourceImpl::requestFromRippled( log_.trace() << "Sending request"; // Send the message - ws->async_write( - net::buffer(boost::json::serialize(request)), yield[ec]); + ws->async_write(net::buffer(boost::json::serialize(request)), yield[ec]); if (ec) return {}; @@ -1134,8 +1012,7 @@ ETLSourceImpl::requestFromRippled( if (!parsed.is_object()) { - log_.error() << "Error parsing response: " - << std::string{begin, end}; + log_.error() << "Error parsing response: " << std::string{begin, end}; return {}; } log_.trace() << "Successfully forward request"; @@ -1164,8 +1041,8 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) { auto& source = sources_[sourceIdx]; - log_.debug() << "Attempting to execute func. ledger sequence = " - << ledgerSequence << " - source = " << source->toString(); + log_.debug() << "Attempting to execute func. ledger sequence = " << ledgerSequence + << " - source = " << source->toString(); // Originally, it was (source->hasLedger(ledgerSequence) || true) /* Sometimes rippled has ledger but doesn't actually know. However, but this does NOT happen in the normal case and is safe to remove @@ -1175,30 +1052,26 @@ ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) bool res = f(source); if (res) { - log_.debug() << "Successfully executed func at source = " - << source->toString() + log_.debug() << "Successfully executed func at source = " << source->toString() << " - ledger sequence = " << ledgerSequence; break; } else { - log_.warn() << "Failed to execute func at source = " - << source->toString() + log_.warn() << "Failed to execute func at source = " << source->toString() << " - ledger sequence = " << ledgerSequence; } } else { - log_.warn() << "Ledger not present at source = " - << source->toString() + log_.warn() << "Ledger not present at source = " << source->toString() << " - ledger sequence = " << ledgerSequence; } sourceIdx = (sourceIdx + 1) % sources_.size(); numAttempts++; if (numAttempts % sources_.size() == 0) { - log_.info() << "Ledger sequence " << ledgerSequence - << " is not yet available from any configured sources. " + log_.info() << "Ledger sequence " << ledgerSequence << " is not yet available from any configured sources. " << "Sleeping and trying again"; std::this_thread::sleep_for(std::chrono::seconds(2)); } diff --git a/src/etl/ETLSource.h b/src/etl/ETLSource.h index 67dba270..199ccb27 100644 --- a/src/etl/ETLSource.h +++ b/src/etl/ETLSource.h @@ -67,26 +67,20 @@ class ForwardCache clear(); public: - ForwardCache( - clio::Config const& config, - boost::asio::io_context& ioc, - ETLSource const& source) + ForwardCache(clio::Config const& config, boost::asio::io_context& ioc, ETLSource const& source) : strand_(ioc), timer_(strand_), source_(source) { if (config.contains("cache")) { - auto commands = - config.arrayOrThrow("cache", "ETLSource cache must be array"); + auto commands = config.arrayOrThrow("cache", "ETLSource cache must be array"); if (config.contains("cache_duration")) - duration_ = config.valueOrThrow( - "cache_duration", - "ETLSource cache_duration must be a number"); + duration_ = + config.valueOrThrow("cache_duration", "ETLSource cache_duration must be a number"); for (auto const& command : commands) { - auto key = command.valueOrThrow( - "ETLSource forward command must be array of strings"); + auto key = command.valueOrThrow("ETLSource forward command must be array of strings"); latestForwarded_[key] = {}; } } @@ -128,22 +122,14 @@ public: hasLedger(uint32_t sequence) const = 0; virtual std::pair - fetchLedger( - uint32_t ledgerSequence, - bool getObjects = true, - bool getObjectNeighbors = false) = 0; + fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) = 0; virtual bool - loadInitialLedger( - uint32_t sequence, - std::uint32_t numMarkers, - bool cacheOnly = false) = 0; + loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false) = 0; virtual std::optional - forwardToRippled( - boost::json::object const& request, - std::string const& clientIp, - boost::asio::yield_context& yield) const = 0; + forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield) + const = 0; virtual boost::uuids::uuid token() const = 0; @@ -258,9 +244,7 @@ protected: auto const host = ip_; auto const port = wsPort_; - resolver_.async_resolve(host, port, [this](auto ec, auto results) { - onResolve(ec, results); - }); + resolver_.async_resolve(host, port, [this](auto ec, auto results) { onResolve(ec, results); }); } public: @@ -327,21 +311,18 @@ public: grpcPort_ = *value; try { - boost::asio::ip::tcp::endpoint endpoint{ - boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)}; + boost::asio::ip::tcp::endpoint endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)}; std::stringstream ss; ss << endpoint; grpc::ChannelArguments chArgs; chArgs.SetMaxReceiveMessageSize(-1); stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( - grpc::CreateCustomChannel( - ss.str(), grpc::InsecureChannelCredentials(), chArgs)); + grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs)); log_.debug() << "Made stub for remote = " << toString(); } catch (std::exception const& e) { - log_.debug() << "Exception while creating stub = " << e.what() - << " . Remote = " << toString(); + log_.debug() << "Exception while creating stub = " << e.what() << " . Remote = " << toString(); } } } @@ -397,9 +378,7 @@ public: pairs.push_back(std::make_pair(min, max)); } } - std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { - return left.first < right.first; - }); + std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; }); // we only hold the lock here, to avoid blocking while string processing std::lock_guard lck(mtx_); @@ -422,16 +401,13 @@ public: /// and the prior one /// @return the extracted data and the result status std::pair - fetchLedger( - uint32_t ledgerSequence, - bool getObjects = true, - bool getObjectNeighbors = false) override; + fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override; std::string toString() const override { - return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ + - ", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}"; + return "{validated_ledger: " + getValidatedRange() + ", ip: " + ip_ + ", web socket port: " + wsPort_ + + ", grpc port: " + grpcPort_ + "}"; } boost::json::object @@ -446,8 +422,7 @@ public: auto last = getLastMsgTime(); if (last.time_since_epoch().count() != 0) res["last_msg_age_seconds"] = std::to_string( - std::chrono::duration_cast( - std::chrono::system_clock::now() - getLastMsgTime()) + std::chrono::duration_cast(std::chrono::system_clock::now() - getLastMsgTime()) .count()); return res; } @@ -457,10 +432,7 @@ public: /// @param writeQueue queue to push downloaded ledger objects /// @return true if the download was successful bool - loadInitialLedger( - std::uint32_t ledgerSequence, - std::uint32_t numMarkers, - bool cacheOnly = false) override; + loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override; /// Attempt to reconnect to the ETL source void @@ -484,16 +456,11 @@ public: /// Callback void - onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results); + onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results); /// Callback virtual void - onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type - endpoint) = 0; + onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) = 0; /// Callback void @@ -513,16 +480,13 @@ public: handleMessage(); std::optional - forwardToRippled( - boost::json::object const& request, - std::string const& clientIp, - boost::asio::yield_context& yield) const override; + forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield) + const override; }; class PlainETLSource : public ETLSourceImpl { - std::unique_ptr> - ws_; + std::unique_ptr> ws_; public: PlainETLSource( @@ -533,24 +497,14 @@ public: std::shared_ptr nwvl, ETLLoadBalancer& balancer, ETLSourceHooks hooks) - : ETLSourceImpl( - config, - ioc, - backend, - subscriptions, - nwvl, - balancer, - std::move(hooks)) - , ws_(std::make_unique< - boost::beast::websocket::stream>( + : ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer, std::move(hooks)) + , ws_(std::make_unique>( boost::asio::make_strand(ioc))) { } void - onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) + onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) override; /// Close the websocket @@ -569,9 +523,7 @@ class SslETLSource : public ETLSourceImpl { std::optional> sslCtx_; - std::unique_ptr>> - ws_; + std::unique_ptr>> ws_; public: SslETLSource( @@ -583,40 +535,27 @@ public: std::shared_ptr nwvl, ETLLoadBalancer& balancer, ETLSourceHooks hooks) - : ETLSourceImpl( - config, - ioc, - backend, - subscriptions, - nwvl, - balancer, - std::move(hooks)) + : ETLSourceImpl(config, ioc, backend, subscriptions, nwvl, balancer, std::move(hooks)) , sslCtx_(sslCtx) - , ws_(std::make_unique>>( + , ws_(std::make_unique>>( boost::asio::make_strand(ioc_), *sslCtx_)) { } void - onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) + onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) override; void - onSslHandshake( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint); + onSslHandshake(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint); /// Close the websocket /// @param startAgain whether to reconnect void close(bool startAgain); - boost::beast::websocket::stream< - boost::beast::ssl_stream>& + boost::beast::websocket::stream>& ws() { return *ws_; @@ -652,8 +591,7 @@ public: std::shared_ptr subscriptions, std::shared_ptr validatedLedgers) { - return std::make_shared( - config, ioc, backend, subscriptions, validatedLedgers); + return std::make_shared(config, ioc, backend, subscriptions, validatedLedgers); } ~ETLLoadBalancer() @@ -676,10 +614,7 @@ public: /// was found in the database or the server is shutting down, the optional /// will be empty std::optional - fetchLedger( - uint32_t ledgerSequence, - bool getObjects, - bool getObjectNeighbors); + fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors); /// Determine whether messages received on the transactions_proposed stream /// should be forwarded to subscribing clients. The server subscribes to @@ -720,10 +655,8 @@ public: /// @param request JSON-RPC request /// @return response received from rippled node std::optional - forwardToRippled( - boost::json::object const& request, - std::string const& clientIp, - boost::asio::yield_context& yield) const; + forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield) + const; private: /// f is a function that takes an ETLSource as an argument and returns a diff --git a/src/etl/NFTHelpers.cpp b/src/etl/NFTHelpers.cpp index 68e812b7..d87b28f7 100644 --- a/src/etl/NFTHelpers.cpp +++ b/src/etl/NFTHelpers.cpp @@ -44,25 +44,18 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) for (ripple::STObject const& node : txMeta.getNodes()) { - if (node.getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_PAGE) + if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE) continue; if (!owner) - owner = ripple::AccountID::fromVoid( - node.getFieldH256(ripple::sfLedgerIndex).data()); + owner = ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()); if (node.getFName() == ripple::sfCreatedNode) { ripple::STArray const& toAddNFTs = - node.peekAtField(ripple::sfNewFields) - .downcast() - .getFieldArray(ripple::sfNFTokens); + node.peekAtField(ripple::sfNewFields).downcast().getFieldArray(ripple::sfNFTokens); std::transform( - toAddNFTs.begin(), - toAddNFTs.end(), - std::back_inserter(finalIDs), - [](ripple::STObject const& nft) { + toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(finalIDs), [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }); } @@ -80,32 +73,23 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) // as rippled outputs all fields in final fields even if they were // not changed. ripple::STObject const& previousFields = - node.peekAtField(ripple::sfPreviousFields) - .downcast(); + node.peekAtField(ripple::sfPreviousFields).downcast(); if (!previousFields.isFieldPresent(ripple::sfNFTokens)) continue; - ripple::STArray const& toAddNFTs = - previousFields.getFieldArray(ripple::sfNFTokens); + ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens); std::transform( - toAddNFTs.begin(), - toAddNFTs.end(), - std::back_inserter(prevIDs), - [](ripple::STObject const& nft) { + toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(prevIDs), [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }); ripple::STArray const& toAddFinalNFTs = - node.peekAtField(ripple::sfFinalFields) - .downcast() - .getFieldArray(ripple::sfNFTokens); + node.peekAtField(ripple::sfFinalFields).downcast().getFieldArray(ripple::sfNFTokens); std::transform( toAddFinalNFTs.begin(), toAddFinalNFTs.end(), std::back_inserter(finalIDs), - [](ripple::STObject const& nft) { - return nft.getFieldH256(ripple::sfNFTokenID); - }); + [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }); } } @@ -120,13 +104,8 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) std::inserter(tokenIDResult, tokenIDResult.begin())); if (tokenIDResult.size() == 1 && owner) return { - {NFTTransactionsData( - tokenIDResult.front(), txMeta, sttx.getTransactionID())}, - NFTsData( - tokenIDResult.front(), - *owner, - sttx.getFieldVL(ripple::sfURI), - txMeta)}; + {NFTTransactionsData(tokenIDResult.front(), txMeta, sttx.getTransactionID())}, + NFTsData(tokenIDResult.front(), *owner, sttx.getFieldVL(ripple::sfURI), txMeta)}; std::stringstream msg; msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID(); @@ -137,16 +116,14 @@ std::pair, std::optional> getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID); - std::vector const txs = { - NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}; + std::vector const txs = {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}; // Determine who owned the token when it was burned by finding an // NFTokenPage that was deleted or modified that contains this // tokenID. for (ripple::STObject const& node : txMeta.getNodes()) { - if (node.getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_PAGE || + if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE || node.getFName() == ripple::sfCreatedNode) continue; @@ -163,23 +140,19 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) if (node.isFieldPresent(ripple::sfPreviousFields)) { ripple::STObject const& previousFields = - node.peekAtField(ripple::sfPreviousFields) - .downcast(); + node.peekAtField(ripple::sfPreviousFields).downcast(); if (previousFields.isFieldPresent(ripple::sfNFTokens)) prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens); } else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) - prevNFTs = node.peekAtField(ripple::sfFinalFields) - .downcast() - .getFieldArray(ripple::sfNFTokens); + prevNFTs = + node.peekAtField(ripple::sfFinalFields).downcast().getFieldArray(ripple::sfNFTokens); if (!prevNFTs) continue; - auto const nft = std::find_if( - prevNFTs->begin(), - prevNFTs->end(), - [&tokenID](ripple::STObject const& candidate) { + auto const nft = + std::find_if(prevNFTs->begin(), prevNFTs->end(), [&tokenID](ripple::STObject const& candidate) { return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; }); if (nft != prevNFTs->end()) @@ -187,92 +160,74 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) txs, NFTsData( tokenID, - ripple::AccountID::fromVoid( - node.getFieldH256(ripple::sfLedgerIndex).data()), + ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()), txMeta, true)); } std::stringstream msg; - msg << " - could not determine owner at burntime for tx " - << sttx.getTransactionID(); + msg << " - could not determine owner at burntime for tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } std::pair, std::optional> -getNFTokenAcceptOfferData( - ripple::TxMeta const& txMeta, - ripple::STTx const& sttx) +getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { // If we have the buy offer from this tx, we can determine the owner // more easily by just looking at the owner of the accepted NFTokenOffer // object. if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer)) { - auto const affectedBuyOffer = std::find_if( - txMeta.getNodes().begin(), - txMeta.getNodes().end(), - [&sttx](ripple::STObject const& node) { - return node.getFieldH256(ripple::sfLedgerIndex) == - sttx.getFieldH256(ripple::sfNFTokenBuyOffer); + auto const affectedBuyOffer = + std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) { + return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenBuyOffer); }); if (affectedBuyOffer == txMeta.getNodes().end()) { std::stringstream msg; - msg << " - unexpected NFTokenAcceptOffer data in tx " - << sttx.getTransactionID(); + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } - ripple::uint256 const tokenID = - affectedBuyOffer->peekAtField(ripple::sfFinalFields) - .downcast() - .getFieldH256(ripple::sfNFTokenID); + ripple::uint256 const tokenID = affectedBuyOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldH256(ripple::sfNFTokenID); - ripple::AccountID const owner = - affectedBuyOffer->peekAtField(ripple::sfFinalFields) - .downcast() - .getAccountID(ripple::sfOwner); + ripple::AccountID const owner = affectedBuyOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getAccountID(ripple::sfOwner); return { - {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, - NFTsData(tokenID, owner, txMeta, false)}; + {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, NFTsData(tokenID, owner, txMeta, false)}; } // Otherwise we have to infer the new owner from the affected nodes. - auto const affectedSellOffer = std::find_if( - txMeta.getNodes().begin(), - txMeta.getNodes().end(), - [&sttx](ripple::STObject const& node) { - return node.getFieldH256(ripple::sfLedgerIndex) == - sttx.getFieldH256(ripple::sfNFTokenSellOffer); + auto const affectedSellOffer = + std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) { + return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenSellOffer); }); if (affectedSellOffer == txMeta.getNodes().end()) { std::stringstream msg; - msg << " - unexpected NFTokenAcceptOffer data in tx " - << sttx.getTransactionID(); + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } - ripple::uint256 const tokenID = - affectedSellOffer->peekAtField(ripple::sfFinalFields) - .downcast() - .getFieldH256(ripple::sfNFTokenID); + ripple::uint256 const tokenID = affectedSellOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getFieldH256(ripple::sfNFTokenID); - ripple::AccountID const seller = - affectedSellOffer->peekAtField(ripple::sfFinalFields) - .downcast() - .getAccountID(ripple::sfOwner); + ripple::AccountID const seller = affectedSellOffer->peekAtField(ripple::sfFinalFields) + .downcast() + .getAccountID(ripple::sfOwner); for (ripple::STObject const& node : txMeta.getNodes()) { - if (node.getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_PAGE || + if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE || node.getFName() == ripple::sfDeletedNode) continue; - ripple::AccountID const nodeOwner = ripple::AccountID::fromVoid( - node.getFieldH256(ripple::sfLedgerIndex).data()); + ripple::AccountID const nodeOwner = + ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()); if (nodeOwner == seller) continue; @@ -286,12 +241,9 @@ getNFTokenAcceptOfferData( .getFieldArray(ripple::sfNFTokens); }(); - auto const nft = std::find_if( - nfts.begin(), - nfts.end(), - [&tokenID](ripple::STObject const& candidate) { - return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; - }); + auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) { + return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; + }); if (nft != nfts.end()) return { {NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, @@ -299,8 +251,7 @@ getNFTokenAcceptOfferData( } std::stringstream msg; - msg << " - unexpected NFTokenAcceptOffer data in tx " - << sttx.getTransactionID(); + msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID(); throw std::runtime_error(msg.str()); } @@ -309,40 +260,28 @@ getNFTokenAcceptOfferData( // transaction using this feature. This transaction also never returns an // NFTsData because it does not change the state of an NFT itself. std::pair, std::optional> -getNFTokenCancelOfferData( - ripple::TxMeta const& txMeta, - ripple::STTx const& sttx) +getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { std::vector txs; for (ripple::STObject const& node : txMeta.getNodes()) { - if (node.getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_OFFER) + if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER) continue; - ripple::uint256 const tokenID = node.peekAtField(ripple::sfFinalFields) - .downcast() - .getFieldH256(ripple::sfNFTokenID); + ripple::uint256 const tokenID = + node.peekAtField(ripple::sfFinalFields).downcast().getFieldH256(ripple::sfNFTokenID); txs.emplace_back(tokenID, txMeta, sttx.getTransactionID()); } // Deduplicate any transactions based on tokenID/txIdx combo. Can't just // use txIdx because in this case one tx can cancel offers for several // NFTs. - std::sort( - txs.begin(), - txs.end(), - [](NFTTransactionsData const& a, NFTTransactionsData const& b) { - return a.tokenID < b.tokenID && - a.transactionIndex < b.transactionIndex; - }); - auto last = std::unique( - txs.begin(), - txs.end(), - [](NFTTransactionsData const& a, NFTTransactionsData const& b) { - return a.tokenID == b.tokenID && - a.transactionIndex == b.transactionIndex; - }); + std::sort(txs.begin(), txs.end(), [](NFTTransactionsData const& a, NFTTransactionsData const& b) { + return a.tokenID < b.tokenID && a.transactionIndex < b.transactionIndex; + }); + auto last = std::unique(txs.begin(), txs.end(), [](NFTTransactionsData const& a, NFTTransactionsData const& b) { + return a.tokenID == b.tokenID && a.transactionIndex == b.transactionIndex; + }); txs.erase(last, txs.end()); return {txs, {}}; } @@ -350,16 +289,9 @@ getNFTokenCancelOfferData( // This transaction never returns an NFTokensData because it does not // change the state of an NFT itself. std::pair, std::optional> -getNFTokenCreateOfferData( - ripple::TxMeta const& txMeta, - ripple::STTx const& sttx) +getNFTokenCreateOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { - return { - {NFTTransactionsData( - sttx.getFieldH256(ripple::sfNFTokenID), - txMeta, - sttx.getTransactionID())}, - {}}; + return {{NFTTransactionsData(sttx.getFieldH256(ripple::sfNFTokenID), txMeta, sttx.getTransactionID())}, {}}; } std::pair, std::optional> @@ -391,26 +323,18 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::vector -getNFTDataFromObj( - std::uint32_t const seq, - std::string const& key, - std::string const& blob) +getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob) { std::vector nfts; - ripple::STLedgerEntry const sle = ripple::STLedgerEntry( - ripple::SerialIter{blob.data(), blob.size()}, - ripple::uint256::fromVoid(key.data())); + ripple::STLedgerEntry const sle = + ripple::STLedgerEntry(ripple::SerialIter{blob.data(), blob.size()}, ripple::uint256::fromVoid(key.data())); if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE) return nfts; auto const owner = ripple::AccountID::fromVoid(key.data()); for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens)) - nfts.emplace_back( - node.getFieldH256(ripple::sfNFTokenID), - seq, - owner, - node.getFieldVL(ripple::sfURI)); + nfts.emplace_back(node.getFieldH256(ripple::sfNFTokenID), seq, owner, node.getFieldVL(ripple::sfURI)); return nfts; } diff --git a/src/etl/NFTHelpers.h b/src/etl/NFTHelpers.h index e0e64cfc..597ff272 100644 --- a/src/etl/NFTHelpers.h +++ b/src/etl/NFTHelpers.h @@ -30,7 +30,4 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx); // Pulling from ledger object via loadInitialLedger std::vector -getNFTDataFromObj( - std::uint32_t const seq, - std::string const& key, - std::string const& blob); +getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob); diff --git a/src/etl/ProbingETLSource.cpp b/src/etl/ProbingETLSource.cpp index 77c9efbe..ec08f36c 100644 --- a/src/etl/ProbingETLSource.cpp +++ b/src/etl/ProbingETLSource.cpp @@ -31,23 +31,9 @@ ProbingETLSource::ProbingETLSource( ETLLoadBalancer& balancer, boost::asio::ssl::context sslCtx) : sslCtx_{std::move(sslCtx)} - , sslSrc_{make_shared( - config, - ioc, - std::ref(sslCtx_), - backend, - subscriptions, - nwvl, - balancer, - make_SSLHooks())} - , plainSrc_{make_shared( - config, - ioc, - backend, - subscriptions, - nwvl, - balancer, - make_PlainHooks())} + , sslSrc_{make_shared< + SslETLSource>(config, ioc, std::ref(sslCtx_), backend, subscriptions, nwvl, balancer, make_SSLHooks())} + , plainSrc_{make_shared(config, ioc, backend, subscriptions, nwvl, balancer, make_PlainHooks())} { } @@ -107,8 +93,7 @@ std::string ProbingETLSource::toString() const { if (!currentSrc_) - return "{probing... ws: " + plainSrc_->toString() + - ", wss: " + sslSrc_->toString() + "}"; + return "{probing... ws: " + plainSrc_->toString() + ", wss: " + sslSrc_->toString() + "}"; return currentSrc_->toString(); } @@ -121,27 +106,19 @@ ProbingETLSource::token() const } bool -ProbingETLSource::loadInitialLedger( - std::uint32_t ledgerSequence, - std::uint32_t numMarkers, - bool cacheOnly) +ProbingETLSource::loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly) { if (!currentSrc_) return false; - return currentSrc_->loadInitialLedger( - ledgerSequence, numMarkers, cacheOnly); + return currentSrc_->loadInitialLedger(ledgerSequence, numMarkers, cacheOnly); } std::pair -ProbingETLSource::fetchLedger( - uint32_t ledgerSequence, - bool getObjects, - bool getObjectNeighbors) +ProbingETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors) { if (!currentSrc_) return {}; - return currentSrc_->fetchLedger( - ledgerSequence, getObjects, getObjectNeighbors); + return currentSrc_->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors); } std::optional @@ -179,8 +156,7 @@ ProbingETLSource::make_SSLHooks() noexcept { plainSrc_->pause(); currentSrc_ = sslSrc_; - log_.info() << "Selected WSS as the main source: " - << currentSrc_->toString(); + log_.info() << "Selected WSS as the main source: " << currentSrc_->toString(); } return ETLSourceHooks::Action::PROCEED; }, @@ -209,8 +185,7 @@ ProbingETLSource::make_PlainHooks() noexcept { sslSrc_->pause(); currentSrc_ = plainSrc_; - log_.info() << "Selected Plain WS as the main source: " - << currentSrc_->toString(); + log_.info() << "Selected Plain WS as the main source: " << currentSrc_->toString(); } return ETLSourceHooks::Action::PROCEED; }, diff --git a/src/etl/ProbingETLSource.h b/src/etl/ProbingETLSource.h index ad936535..89cc2854 100644 --- a/src/etl/ProbingETLSource.h +++ b/src/etl/ProbingETLSource.h @@ -53,8 +53,7 @@ public: std::shared_ptr subscriptions, std::shared_ptr nwvl, ETLLoadBalancer& balancer, - boost::asio::ssl::context sslCtx = boost::asio::ssl::context{ - boost::asio::ssl::context::tlsv12}); + boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12}); ~ProbingETLSource() = default; @@ -80,22 +79,14 @@ public: toString() const override; bool - loadInitialLedger( - std::uint32_t ledgerSequence, - std::uint32_t numMarkers, - bool cacheOnly = false) override; + loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override; std::pair - fetchLedger( - uint32_t ledgerSequence, - bool getObjects = true, - bool getObjectNeighbors = false) override; + fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override; std::optional - forwardToRippled( - boost::json::object const& request, - std::string const& clientIp, - boost::asio::yield_context& yield) const override; + forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield) + const override; boost::uuids::uuid token() const override; diff --git a/src/etl/ReportingETL.cpp b/src/etl/ReportingETL.cpp index 2ecd4e2b..d7194f4e 100644 --- a/src/etl/ReportingETL.cpp +++ b/src/etl/ReportingETL.cpp @@ -47,23 +47,19 @@ std::string toString(ripple::LedgerInfo const& info) { std::stringstream ss; - ss << "LedgerInfo { Sequence : " << info.seq - << " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash) - << " AccountHash : " << strHex(info.accountHash) + ss << "LedgerInfo { Sequence : " << info.seq << " Hash : " << strHex(info.hash) + << " TxHash : " << strHex(info.txHash) << " AccountHash : " << strHex(info.accountHash) << " ParentHash : " << strHex(info.parentHash) << " }"; return ss.str(); } } // namespace clio::detail FormattedTransactionsData -ReportingETL::insertTransactions( - ripple::LedgerInfo const& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data) +ReportingETL::insertTransactions(ripple::LedgerInfo const& ledger, org::xrpl::rpc::v1::GetLedgerResponse& data) { FormattedTransactionsData result; - for (auto& txn : - *(data.mutable_transactions_list()->mutable_transactions())) + for (auto& txn : *(data.mutable_transactions_list()->mutable_transactions())) { std::string* raw = txn.mutable_transaction_blob(); @@ -72,18 +68,15 @@ ReportingETL::insertTransactions( log_.trace() << "Inserting transaction = " << sttx.getTransactionID(); - ripple::TxMeta txMeta{ - sttx.getTransactionID(), ledger.seq, txn.metadata_blob()}; + ripple::TxMeta txMeta{sttx.getTransactionID(), ledger.seq, txn.metadata_blob()}; auto const [nftTxs, maybeNFT] = getNFTDataFromTx(txMeta, sttx); - result.nfTokenTxData.insert( - result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end()); + result.nfTokenTxData.insert(result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end()); if (maybeNFT) result.nfTokensData.push_back(*maybeNFT); auto journal = ripple::debugLog(); - result.accountTxData.emplace_back( - txMeta, sttx.getTransactionID(), journal); + result.accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal); std::string keyStr{(const char*)sttx.getTransactionID().data(), 32}; backend_->writeTransaction( std::move(keyStr), @@ -96,18 +89,12 @@ ReportingETL::insertTransactions( // Remove all but the last NFTsData for each id. unique removes all // but the first of a group, so we want to reverse sort by transaction // index - std::sort( - result.nfTokensData.begin(), - result.nfTokensData.end(), - [](NFTsData const& a, NFTsData const& b) { - return a.tokenID > b.tokenID && - a.transactionIndex > b.transactionIndex; - }); + std::sort(result.nfTokensData.begin(), result.nfTokensData.end(), [](NFTsData const& a, NFTsData const& b) { + return a.tokenID > b.tokenID && a.transactionIndex > b.transactionIndex; + }); // Now we can unique the NFTs by tokenID. - auto last = std::unique( - result.nfTokensData.begin(), - result.nfTokensData.end(), - [](NFTsData const& a, NFTsData const& b) { + auto last = + std::unique(result.nfTokensData.begin(), result.nfTokensData.end(), [](NFTsData const& a, NFTsData const& b) { return a.tokenID == b.tokenID; }); result.nfTokensData.erase(last, result.nfTokensData.end()); @@ -130,13 +117,11 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) // fetch the ledger from the network. This function will not return until // either the fetch is successful, or the server is being shutdown. This // only fetches the ledger header and the transactions+metadata - std::optional ledgerData{ - fetchLedgerData(startingSequence)}; + std::optional ledgerData{fetchLedgerData(startingSequence)}; if (!ledgerData) return {}; - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(ledgerData->ledger_header())); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(ledgerData->ledger_header())); log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); @@ -145,12 +130,10 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) log_.debug() << "Started writes"; - backend_->writeLedger( - lgrInfo, std::move(*ledgerData->mutable_ledger_header())); + backend_->writeLedger(lgrInfo, std::move(*ledgerData->mutable_ledger_header())); log_.debug() << "Wrote ledger"; - FormattedTransactionsData insertTxResult = - insertTransactions(lgrInfo, *ledgerData); + FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, *ledgerData); log_.debug() << "Inserted txns"; // download the full account state map. This function downloads full @@ -164,11 +147,9 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) if (!stopping_) { - backend_->writeAccountTransactions( - std::move(insertTxResult.accountTxData)); + backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData)); backend_->writeNFTs(std::move(insertTxResult.nfTokensData)); - backend_->writeNFTTransactions( - std::move(insertTxResult.nfTokenTxData)); + backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData)); } backend_->finishWrites(startingSequence); }); @@ -185,10 +166,8 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) { log_.info() << "Updating cache"; - std::vector diff = - Backend::synchronousAndRetryOnTimeout([&](auto yield) { - return backend_->fetchLedgerDiff(lgrInfo.seq, yield); - }); + std::vector diff = Backend::synchronousAndRetryOnTimeout( + [&](auto yield) { return backend_->fetchLedgerDiff(lgrInfo.seq, yield); }); backend_->cache().update(diff, lgrInfo.seq); backend_->updateRange(lgrInfo.seq); @@ -201,22 +180,16 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) if (age < 600) { std::optional fees = - Backend::synchronousAndRetryOnTimeout([&](auto yield) { - return backend_->fetchFees(lgrInfo.seq, yield); - }); + Backend::synchronousAndRetryOnTimeout([&](auto yield) { return backend_->fetchFees(lgrInfo.seq, yield); }); - std::vector transactions = - Backend::synchronousAndRetryOnTimeout([&](auto yield) { - return backend_->fetchAllTransactionsInLedger( - lgrInfo.seq, yield); - }); + std::vector transactions = Backend::synchronousAndRetryOnTimeout( + [&](auto yield) { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield); }); auto ledgerRange = backend_->fetchLedgerRange(); assert(ledgerRange); assert(fees); - std::string range = std::to_string(ledgerRange->minSequence) + "-" + - std::to_string(ledgerRange->maxSequence); + std::string range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence); subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size()); @@ -228,15 +201,12 @@ ReportingETL::publishLedger(ripple::LedgerInfo const& lgrInfo) log_.info() << "Published ledger " << std::to_string(lgrInfo.seq); } else - log_.info() << "Skipping publishing ledger " - << std::to_string(lgrInfo.seq); + log_.info() << "Skipping publishing ledger " << std::to_string(lgrInfo.seq); setLastPublish(); } bool -ReportingETL::publishLedger( - uint32_t ledgerSequence, - std::optional maxAttempts) +ReportingETL::publishLedger(uint32_t ledgerSequence, std::optional maxAttempts) { log_.info() << "Attempting to publish ledger = " << ledgerSequence; size_t numAttempts = 0; @@ -253,8 +223,7 @@ ReportingETL::publishLedger( // second in between each attempt. if (maxAttempts && numAttempts >= maxAttempts) { - log_.debug() << "Failed to publish ledger after " << numAttempts - << " attempts."; + log_.debug() << "Failed to publish ledger after " << numAttempts << " attempts."; return false; } std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -263,9 +232,8 @@ ReportingETL::publishLedger( } else { - auto lgr = Backend::synchronousAndRetryOnTimeout([&](auto yield) { - return backend_->fetchLedgerBySequence(ledgerSequence, yield); - }); + auto lgr = Backend::synchronousAndRetryOnTimeout( + [&](auto yield) { return backend_->fetchLedgerBySequence(ledgerSequence, yield); }); assert(lgr); publishLedger(*lgr); @@ -281,8 +249,7 @@ ReportingETL::fetchLedgerData(uint32_t seq) { log_.debug() << "Attempting to fetch ledger with sequence = " << seq; - std::optional response = - loadBalancer_->fetchLedger(seq, false, false); + std::optional response = loadBalancer_->fetchLedger(seq, false, false); if (response) log_.trace() << "GetLedger reply = " << response->DebugString(); return response; @@ -293,12 +260,8 @@ ReportingETL::fetchLedgerDataAndDiff(uint32_t seq) { log_.debug() << "Attempting to fetch ledger with sequence = " << seq; - std::optional response = - loadBalancer_->fetchLedger( - seq, - true, - !backend_->cache().isFull() || - backend_->cache().latestLedgerSequence() >= seq); + std::optional response = loadBalancer_->fetchLedger( + seq, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= seq); if (response) log_.trace() << "GetLedger reply = " << response->DebugString(); return response; @@ -308,8 +271,7 @@ std::pair ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) { log_.debug() << "Beginning ledger update"; - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(rawData.ledger_header())); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawData.ledger_header())); log_.debug() << "Deserialized ledger header. " << detail::toString(lgrInfo); backend_->startWrites(); @@ -327,14 +289,10 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) auto firstBook = std::move(*obj.mutable_first_book()); if (!firstBook.size()) firstBook = uint256ToString(Backend::lastKey); - log_.debug() << "writing book successor " - << ripple::strHex(obj.book_base()) << " - " + log_.debug() << "writing book successor " << ripple::strHex(obj.book_base()) << " - " << ripple::strHex(firstBook); - backend_->writeSuccessor( - std::move(*obj.mutable_book_base()), - lgrInfo.seq, - std::move(firstBook)); + backend_->writeSuccessor(std::move(*obj.mutable_book_base()), lgrInfo.seq, std::move(firstBook)); } for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects())) { @@ -347,32 +305,20 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) if (!succPtr->size()) *succPtr = uint256ToString(Backend::lastKey); - if (obj.mod_type() == - org::xrpl::rpc::v1::RawLedgerObject::DELETED) + if (obj.mod_type() == org::xrpl::rpc::v1::RawLedgerObject::DELETED) { - log_.debug() << "Modifying successors for deleted object " - << ripple::strHex(obj.key()) << " - " - << ripple::strHex(*predPtr) << " - " - << ripple::strHex(*succPtr); + log_.debug() << "Modifying successors for deleted object " << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr); - backend_->writeSuccessor( - std::move(*predPtr), lgrInfo.seq, std::move(*succPtr)); + backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::move(*succPtr)); } else { - log_.debug() << "adding successor for new object " - << ripple::strHex(obj.key()) << " - " - << ripple::strHex(*predPtr) << " - " - << ripple::strHex(*succPtr); + log_.debug() << "adding successor for new object " << ripple::strHex(obj.key()) << " - " + << ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr); - backend_->writeSuccessor( - std::move(*predPtr), - lgrInfo.seq, - std::string{obj.key()}); - backend_->writeSuccessor( - std::string{obj.key()}, - lgrInfo.seq, - std::move(*succPtr)); + backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::string{obj.key()}); + backend_->writeSuccessor(std::string{obj.key()}, lgrInfo.seq, std::move(*succPtr)); } } else @@ -388,17 +334,13 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) { auto key = ripple::uint256::fromVoidChecked(obj.key()); assert(key); - cacheUpdates.push_back( - {*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); - log_.debug() << "key = " << ripple::strHex(*key) - << " - mod type = " << obj.mod_type(); + cacheUpdates.push_back({*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}}); + log_.debug() << "key = " << ripple::strHex(*key) << " - mod type = " << obj.mod_type(); - if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED && - !rawData.object_neighbors_included()) + if (obj.mod_type() != org::xrpl::rpc::v1::RawLedgerObject::MODIFIED && !rawData.object_neighbors_included()) { log_.debug() << "object neighbors not included. using cache"; - if (!backend_->cache().isFull() || - backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1) + if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1) throw std::runtime_error( "Cache is not full, but object neighbors were not " "included"); @@ -417,20 +359,15 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) { log_.debug() << "Is book dir. key = " << ripple::strHex(*key); auto bookBase = getBookBase(*key); - auto oldFirstDir = - backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1); + auto oldFirstDir = backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1); assert(oldFirstDir); // We deleted the first directory, or we added a directory prior // to the old first directory - if ((isDeleted && key == oldFirstDir->key) || - (!isDeleted && key < oldFirstDir->key)) + if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key)) { - log_.debug() - << "Need to recalculate book base successor. base = " - << ripple::strHex(bookBase) - << " - key = " << ripple::strHex(*key) - << " - isDeleted = " << isDeleted - << " - seq = " << lgrInfo.seq; + log_.debug() << "Need to recalculate book base successor. base = " << ripple::strHex(bookBase) + << " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted + << " - seq = " << lgrInfo.seq; bookSuccessorsToCalculate.insert(bookBase); } } @@ -438,18 +375,14 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) if (obj.mod_type() == org::xrpl::rpc::v1::RawLedgerObject::MODIFIED) modified.insert(*key); - backend_->writeLedgerObject( - std::move(*obj.mutable_key()), - lgrInfo.seq, - std::move(*obj.mutable_data())); + backend_->writeLedgerObject(std::move(*obj.mutable_key()), lgrInfo.seq, std::move(*obj.mutable_data())); } backend_->cache().update(cacheUpdates, lgrInfo.seq); // rippled didn't send successor information, so use our cache if (!rawData.object_neighbors_included()) { log_.debug() << "object neighbors not included. using cache"; - if (!backend_->cache().isFull() || - backend_->cache().latestLedgerSequence() != lgrInfo.seq) + if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq) throw std::runtime_error( "Cache is not full, but object neighbors were not " "included"); @@ -465,31 +398,18 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) ub = {Backend::lastKey, {}}; if (obj.blob.size() == 0) { - log_.debug() << "writing successor for deleted object " - << ripple::strHex(obj.key) << " - " - << ripple::strHex(lb->key) << " - " - << ripple::strHex(ub->key); + log_.debug() << "writing successor for deleted object " << ripple::strHex(obj.key) << " - " + << ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key); - backend_->writeSuccessor( - uint256ToString(lb->key), - lgrInfo.seq, - uint256ToString(ub->key)); + backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(ub->key)); } else { - backend_->writeSuccessor( - uint256ToString(lb->key), - lgrInfo.seq, - uint256ToString(obj.key)); - backend_->writeSuccessor( - uint256ToString(obj.key), - lgrInfo.seq, - uint256ToString(ub->key)); + backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(obj.key)); + backend_->writeSuccessor(uint256ToString(obj.key), lgrInfo.seq, uint256ToString(ub->key)); - log_.debug() << "writing successor for new object " - << ripple::strHex(lb->key) << " - " - << ripple::strHex(obj.key) << " - " - << ripple::strHex(ub->key); + log_.debug() << "writing successor for new object " << ripple::strHex(lb->key) << " - " + << ripple::strHex(obj.key) << " - " << ripple::strHex(ub->key); } } for (auto const& base : bookSuccessorsToCalculate) @@ -497,34 +417,24 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) auto succ = backend_->cache().getSuccessor(base, lgrInfo.seq); if (succ) { - backend_->writeSuccessor( - uint256ToString(base), - lgrInfo.seq, - uint256ToString(succ->key)); + backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(succ->key)); - log_.debug() - << "Updating book successor " << ripple::strHex(base) - << " - " << ripple::strHex(succ->key); + log_.debug() << "Updating book successor " << ripple::strHex(base) << " - " + << ripple::strHex(succ->key); } else { - backend_->writeSuccessor( - uint256ToString(base), - lgrInfo.seq, - uint256ToString(Backend::lastKey)); + backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(Backend::lastKey)); - log_.debug() - << "Updating book successor " << ripple::strHex(base) - << " - " << ripple::strHex(Backend::lastKey); + log_.debug() << "Updating book successor " << ripple::strHex(base) << " - " + << ripple::strHex(Backend::lastKey); } } } - log_.debug() - << "Inserted/modified/deleted all objects. Number of objects = " - << rawData.ledger_objects().objects_size(); - FormattedTransactionsData insertTxResult = - insertTransactions(lgrInfo, rawData); + log_.debug() << "Inserted/modified/deleted all objects. Number of objects = " + << rawData.ledger_objects().objects_size(); + FormattedTransactionsData insertTxResult = insertTransactions(lgrInfo, rawData); log_.debug() << "Inserted all transactions. Number of transactions = " << rawData.transactions_list().transactions_size(); backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData)); @@ -532,8 +442,8 @@ ReportingETL::buildNextLedger(org::xrpl::rpc::v1::GetLedgerResponse& rawData) backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData)); log_.debug() << "wrote account_tx"; - auto [success, duration] = util::timed>( - [&]() { return backend_->finishWrites(lgrInfo.seq); }); + auto [success, duration] = + util::timed>([&]() { return backend_->finishWrites(lgrInfo.seq); }); log_.debug() << "Finished writes. took " << std::to_string(duration); log_.debug() << "Finished ledger update. " << detail::toString(lgrInfo); @@ -585,12 +495,10 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) std::optional lastPublishedSequence; uint32_t maxQueueSize = 1000 / numExtractors; auto begin = std::chrono::system_clock::now(); - using QueueType = - ThreadSafeQueue>; + using QueueType = ThreadSafeQueue>; std::vector> queues; - auto getNext = [&queues, &startSequence, &numExtractors]( - uint32_t sequence) -> std::shared_ptr { + auto getNext = [&queues, &startSequence, &numExtractors](uint32_t sequence) -> std::shared_ptr { return queues[(sequence - startSequence) % numExtractors]; }; std::vector extractors; @@ -599,12 +507,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) auto transformQueue = std::make_shared(maxQueueSize); queues.push_back(transformQueue); - extractors.emplace_back([this, - &startSequence, - &writeConflict, - transformQueue, - i, - numExtractors]() { + extractors.emplace_back([this, &startSequence, &writeConflict, transformQueue, i, numExtractors]() { beast::setCurrentThreadName("rippled: ReportingETL extract"); uint32_t currentSequence = startSequence + i; @@ -616,14 +519,11 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) // the entire server is shutting down. This can be detected in a // variety of ways. See the comment at the top of the function while ((!finishSequence_ || currentSequence <= *finishSequence_) && - networkValidatedLedgers_->waitUntilValidatedByNetwork( - currentSequence) && - !writeConflict && !isStopping()) + networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence) && !writeConflict && + !isStopping()) { - auto [fetchResponse, time] = - util::timed>([&]() { - return fetchLedgerDataAndDiff(currentSequence); - }); + auto [fetchResponse, time] = util::timed>( + [&]() { return fetchLedgerDataAndDiff(currentSequence); }); totalTime += time; // if the fetch is unsuccessful, stop. fetchLedger only @@ -637,16 +537,11 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) { break; } - auto tps = - fetchResponse->transactions_list().transactions_size() / - time; + auto tps = fetchResponse->transactions_list().transactions_size() / time; - log_.info() << "Extract phase time = " << time - << " . Extract phase tps = " << tps - << " . Avg extract time = " - << totalTime / (currentSequence - startSequence + 1) - << " . thread num = " << i - << " . seq = " << currentSequence; + log_.info() << "Extract phase time = " << time << " . Extract phase tps = " << tps + << " . Avg extract time = " << totalTime / (currentSequence - startSequence + 1) + << " . thread num = " << i << " . seq = " << currentSequence; transformQueue->push(std::move(fetchResponse)); currentSequence += numExtractors; @@ -658,19 +553,13 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) }); } - std::thread transformer{[this, - &minSequence, - &writeConflict, - &startSequence, - &getNext, - &lastPublishedSequence]() { + std::thread transformer{[this, &minSequence, &writeConflict, &startSequence, &getNext, &lastPublishedSequence]() { beast::setCurrentThreadName("rippled: ReportingETL transform"); uint32_t currentSequence = startSequence; while (!writeConflict) { - std::optional fetchResponse{ - getNext(currentSequence)->pop()}; + std::optional fetchResponse{getNext(currentSequence)->pop()}; ++currentSequence; // if fetchResponse is an empty optional, the extracter thread // has stopped and the transformer should stop as well @@ -681,8 +570,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) if (isStopping()) continue; - auto numTxns = - fetchResponse->transactions_list().transactions_size(); + auto numTxns = fetchResponse->transactions_list().transactions_size(); auto numObjects = fetchResponse->ledger_objects().objects_size(); auto start = std::chrono::system_clock::now(); auto [lgrInfo, success] = buildNextLedger(*fetchResponse); @@ -690,40 +578,31 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) auto duration = ((end - start).count()) / 1000000000.0; if (success) - log_.info() - << "Load phase of etl : " - << "Successfully wrote ledger! Ledger info: " - << detail::toString(lgrInfo) << ". txn count = " << numTxns - << ". object count = " << numObjects - << ". load time = " << duration - << ". load txns per second = " << numTxns / duration - << ". load objs per second = " << numObjects / duration; + log_.info() << "Load phase of etl : " + << "Successfully wrote ledger! Ledger info: " << detail::toString(lgrInfo) + << ". txn count = " << numTxns << ". object count = " << numObjects + << ". load time = " << duration << ". load txns per second = " << numTxns / duration + << ". load objs per second = " << numObjects / duration; else - log_.error() - << "Error writing ledger. " << detail::toString(lgrInfo); + log_.error() << "Error writing ledger. " << detail::toString(lgrInfo); // success is false if the ledger was already written if (success) { - boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() { - publishLedger(lgrInfo); - }); + boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() { publishLedger(lgrInfo); }); lastPublishedSequence = lgrInfo.seq; } writeConflict = !success; // TODO move online delete logic to an admin RPC call - if (onlineDeleteInterval_ && !deleting_ && - lgrInfo.seq - minSequence > *onlineDeleteInterval_) + if (onlineDeleteInterval_ && !deleting_ && lgrInfo.seq - minSequence > *onlineDeleteInterval_) { deleting_ = true; ioContext_.post([this, &minSequence]() { log_.info() << "Running online delete"; - Backend::synchronous( - [&](boost::asio::yield_context& yield) { - backend_->doOnlineDelete( - *onlineDeleteInterval_, yield); - }); + Backend::synchronous([&](boost::asio::yield_context& yield) { + backend_->doOnlineDelete(*onlineDeleteInterval_, yield); + }); log_.info() << "Finished online delete"; auto rng = backend_->fetchLedgerRange(); @@ -744,8 +623,7 @@ ReportingETL::runETLPipeline(uint32_t startSequence, int numExtractors) for (auto& t : extractors) t.join(); auto end = std::chrono::system_clock::now(); - log_.debug() << "Extracted and wrote " - << *lastPublishedSequence - startSequence << " in " + log_.debug() << "Extracted and wrote " << *lastPublishedSequence - startSequence << " in " << ((end - begin).count()) / 1000000000.0; writing_ = false; @@ -776,20 +654,16 @@ ReportingETL::monitor() if (startSequence_) { log_.info() << "ledger sequence specified in config. " - << "Will begin ETL process starting with ledger " - << *startSequence_; + << "Will begin ETL process starting with ledger " << *startSequence_; ledger = loadInitialLedger(*startSequence_); } else { - log_.info() - << "Waiting for next ledger to be validated by network..."; - std::optional mostRecentValidated = - networkValidatedLedgers_->getMostRecent(); + log_.info() << "Waiting for next ledger to be validated by network..."; + std::optional mostRecentValidated = networkValidatedLedgers_->getMostRecent(); if (mostRecentValidated) { - log_.info() << "Ledger " << *mostRecentValidated - << " has been validated. " + log_.info() << "Ledger " << *mostRecentValidated << " has been validated. " << "Downloading..."; ledger = loadInitialLedger(*mostRecentValidated); } @@ -805,8 +679,7 @@ ReportingETL::monitor() rng = backend_->hardFetchLedgerRangeNoThrow(); else { - log_.error() - << "Failed to load initial ledger. Exiting monitor loop"; + log_.error() << "Failed to load initial ledger. Exiting monitor loop"; return; } } @@ -814,11 +687,9 @@ ReportingETL::monitor() { if (startSequence_) { - log_.warn() - << "start sequence specified but db is already populated"; + log_.warn() << "start sequence specified but db is already populated"; } - log_.info() - << "Database already populated. Picking up from the tip of history"; + log_.info() << "Database already populated. Picking up from the tip of history"; loadCache(rng->maxSequence); } assert(rng); @@ -828,17 +699,14 @@ ReportingETL::monitor() << "Starting monitor loop. sequence = " << nextSequence; while (true) { - if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); - rng && rng->maxSequence >= nextSequence) + if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) { publishLedger(nextSequence, {}); ++nextSequence; } - else if (networkValidatedLedgers_->waitUntilValidatedByNetwork( - nextSequence, 1000)) + else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000)) { - log_.info() << "Ledger with sequence = " << nextSequence - << " has been validated by the network. " + log_.info() << "Ledger with sequence = " << nextSequence << " has been validated by the network. " << "Attempting to find in database and publish"; // Attempt to take over responsibility of ETL writer after 10 failed // attempts to publish the ledger. publishLedger() fails if the @@ -850,12 +718,10 @@ ReportingETL::monitor() bool success = publishLedger(nextSequence, timeoutSeconds); if (!success) { - log_.warn() << "Failed to publish ledger with sequence = " - << nextSequence << " . Beginning ETL"; + log_.warn() << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL"; // doContinousETLPipelined returns the most recent sequence // published empty optional if no sequence was published - std::optional lastPublished = - runETLPipeline(nextSequence, extractorThreads_); + std::optional lastPublished = runETLPipeline(nextSequence, extractorThreads_); log_.info() << "Aborting ETL. Falling back to publishing"; // if no ledger was published, don't increment nextSequence if (lastPublished) @@ -873,8 +739,7 @@ ReportingETL::loadCacheFromClioPeer( std::string const& port, boost::asio::yield_context& yield) { - log_.info() << "Loading cache from peer. ip = " << ip - << " . port = " << port; + log_.info() << "Loading cache from peer. ip = " << ip << " . port = " << port; namespace beast = boost::beast; // from namespace http = beast::http; // from namespace websocket = beast::websocket; // from @@ -887,8 +752,7 @@ ReportingETL::loadCacheFromClioPeer( tcp::resolver resolver{ioContext_}; log_.trace() << "Creating websocket"; - auto ws = - std::make_unique>(ioContext_); + auto ws = std::make_unique>(ioContext_); // Look up the domain name auto const results = resolver.async_resolve(ip, port, yield[ec]); @@ -928,9 +792,7 @@ ReportingETL::loadCacheFromClioPeer( do { // Send the message - ws->async_write( - net::buffer(boost::json::serialize(getRequest(marker))), - yield[ec]); + ws->async_write(net::buffer(boost::json::serialize(getRequest(marker))), yield[ec]); if (ec) { log_.error() << "error writing = " << ec.message(); @@ -955,8 +817,7 @@ ReportingETL::loadCacheFromClioPeer( } log_.trace() << "Successfully parsed response " << parsed; - if (auto const& response = parsed.as_object(); - response.contains("error")) + if (auto const& response = parsed.as_object(); response.contains("error")) { log_.error() << "Response contains error: " << response; auto const& err = response.at("error"); @@ -965,15 +826,13 @@ ReportingETL::loadCacheFromClioPeer( ++numAttempts; if (numAttempts >= 5) { - log_.error() - << " ledger not found at peer after 5 attempts. " - "peer = " - << ip << " ledger = " << ledgerIndex - << ". Check your config and the health of the peer"; + log_.error() << " ledger not found at peer after 5 attempts. " + "peer = " + << ip << " ledger = " << ledgerIndex + << ". Check your config and the health of the peer"; return false; } - log_.warn() << "Ledger not found. ledger = " << ledgerIndex - << ". Sleeping and trying again"; + log_.warn() << "Ledger not found. ledger = " << ledgerIndex << ". Sleeping and trying again"; std::this_thread::sleep_for(std::chrono::seconds(1)); continue; } @@ -982,8 +841,7 @@ ReportingETL::loadCacheFromClioPeer( started = true; auto const& response = parsed.as_object()["result"].as_object(); - if (!response.contains("cache_full") || - !response.at("cache_full").as_bool()) + if (!response.contains("cache_full") || !response.at("cache_full").as_bool()) { log_.error() << "cache not full for clio node. ip = " << ip; return false; @@ -1003,15 +861,12 @@ ReportingETL::loadCacheFromClioPeer( Backend::LedgerObject stateObject = {}; - if (!stateObject.key.parseHex( - obj.at("index").as_string().c_str())) + if (!stateObject.key.parseHex(obj.at("index").as_string().c_str())) { log_.error() << "failed to parse object id"; return false; } - boost::algorithm::unhex( - obj.at("data").as_string().c_str(), - std::back_inserter(stateObject.blob)); + boost::algorithm::unhex(obj.at("data").as_string().c_str(), std::back_inserter(stateObject.blob)); objects.push_back(std::move(stateObject)); } backend_->cache().update(objects, ledgerIndex, true); @@ -1020,16 +875,14 @@ ReportingETL::loadCacheFromClioPeer( log_.debug() << "At marker " << *marker; } while (marker || !started); - log_.info() << "Finished downloading ledger from clio node. ip = " - << ip; + log_.info() << "Finished downloading ledger from clio node. ip = " << ip; backend_->cache().setFull(); return true; } catch (std::exception const& e) { - log_.error() << "Encountered exception : " << e.what() - << " - ip = " << ip; + log_.error() << "Encountered exception : " << e.what() << " - ip = " << ip; return false; } } @@ -1059,18 +912,16 @@ ReportingETL::loadCache(uint32_t seq) if (clioPeers.size() > 0) { - boost::asio::spawn( - ioContext_, [this, seq](boost::asio::yield_context yield) { - for (auto const& peer : clioPeers) - { - // returns true on success - if (loadCacheFromClioPeer( - seq, peer.ip, std::to_string(peer.port), yield)) - return; - } - // if we couldn't successfully load from any peers, load from db - loadCacheFromDb(seq); - }); + boost::asio::spawn(ioContext_, [this, seq](boost::asio::yield_context yield) { + for (auto const& peer : clioPeers) + { + // returns true on success + if (loadCacheFromClioPeer(seq, peer.ip, std::to_string(peer.port), yield)) + return; + } + // if we couldn't successfully load from any peers, load from db + loadCacheFromDb(seq); + }); return; } else @@ -1078,14 +929,11 @@ ReportingETL::loadCache(uint32_t seq) loadCacheFromDb(seq); } // If loading synchronously, poll cache until full - while (cacheLoadStyle_ == CacheLoadStyle::SYNC && - !backend_->cache().isFull()) + while (cacheLoadStyle_ == CacheLoadStyle::SYNC && !backend_->cache().isFull()) { - log_.debug() << "Cache not full. Cache size = " - << backend_->cache().size() << ". Sleeping ..."; + log_.debug() << "Cache not full. Cache size = " << backend_->cache().size() << ". Sleeping ..."; std::this_thread::sleep_for(std::chrono::seconds(10)); - log_.info() << "Cache is full. Cache size = " - << backend_->cache().size(); + log_.info() << "Cache is full. Cache size = " << backend_->cache().size(); } } @@ -1101,9 +949,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq) } loading = true; std::vector diff; - auto append = [](auto&& a, auto&& b) { - a.insert(std::end(a), std::begin(b), std::end(b)); - }; + auto append = [](auto&& a, auto&& b) { a.insert(std::end(a), std::begin(b), std::end(b)); }; for (size_t i = 0; i < numCacheDiffs_; ++i) { @@ -1113,15 +959,9 @@ ReportingETL::loadCacheFromDb(uint32_t seq) } std::sort(diff.begin(), diff.end(), [](auto a, auto b) { - return a.key < b.key || - (a.key == b.key && a.blob.size() < b.blob.size()); + return a.key < b.key || (a.key == b.key && a.blob.size() < b.blob.size()); }); - diff.erase( - std::unique( - diff.begin(), - diff.end(), - [](auto a, auto b) { return a.key == b.key; }), - diff.end()); + diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end()); std::vector> cursors; cursors.push_back({}); for (auto& obj : diff) @@ -1142,8 +982,7 @@ ReportingETL::loadCacheFromDb(uint32_t seq) cacheDownloader_ = std::thread{[this, seq, cursors]() { auto startTime = std::chrono::system_clock::now(); auto markers = std::make_shared(0); - auto numRemaining = - std::make_shared(cursors.size() - 1); + auto numRemaining = std::make_shared(cursors.size() - 1); for (size_t i = 0; i < cursors.size() - 1; ++i) { std::optional start = cursors[i]; @@ -1152,33 +991,23 @@ ReportingETL::loadCacheFromDb(uint32_t seq) ++(*markers); boost::asio::spawn( ioContext_, - [this, seq, start, end, numRemaining, startTime, markers]( - boost::asio::yield_context yield) { + [this, seq, start, end, numRemaining, startTime, markers](boost::asio::yield_context yield) { std::optional cursor = start; - std::string cursorStr = cursor.has_value() - ? ripple::strHex(cursor.value()) - : ripple::strHex(Backend::firstKey); - log_.debug() << "Starting a cursor: " << cursorStr - << " markers = " << *markers; + std::string cursorStr = + cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(Backend::firstKey); + log_.debug() << "Starting a cursor: " << cursorStr << " markers = " << *markers; while (!stopping_) { - auto res = Backend::retryOnTimeout([this, - seq, - &cursor, - &yield]() { - return backend_->fetchLedgerPage( - cursor, seq, cachePageFetchSize_, false, yield); + auto res = Backend::retryOnTimeout([this, seq, &cursor, &yield]() { + return backend_->fetchLedgerPage(cursor, seq, cachePageFetchSize_, false, yield); }); backend_->cache().update(res.objects, seq, true); if (!res.cursor || (end && *(res.cursor) > *end)) break; - log_.trace() - << "Loading cache. cache size = " - << backend_->cache().size() << " - cursor = " - << ripple::strHex(res.cursor.value()) - << " start = " << cursorStr - << " markers = " << *markers; + log_.trace() << "Loading cache. cache size = " << backend_->cache().size() + << " - cursor = " << ripple::strHex(res.cursor.value()) << " start = " << cursorStr + << " markers = " << *markers; cursor = std::move(res.cursor); } @@ -1187,19 +1016,15 @@ ReportingETL::loadCacheFromDb(uint32_t seq) if (--(*numRemaining) == 0) { auto endTime = std::chrono::system_clock::now(); - auto duration = - std::chrono::duration_cast( - endTime - startTime); - log_.info() << "Finished loading cache. cache size = " - << backend_->cache().size() << ". Took " + auto duration = std::chrono::duration_cast(endTime - startTime); + log_.info() << "Finished loading cache. cache size = " << backend_->cache().size() << ". Took " << duration.count() << " seconds"; backend_->cache().setFull(); } else { - log_.info() << "Finished a cursor. num remaining = " - << *numRemaining << " start = " << cursorStr - << " markers = " << *markers; + log_.info() << "Finished a cursor. num remaining = " << *numRemaining + << " start = " << cursorStr << " markers = " << *markers; } }); } @@ -1223,8 +1048,7 @@ ReportingETL::monitorReadOnly() latestSequence++; while (true) { - if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); - rng && rng->maxSequence >= latestSequence) + if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence) { publishLedger(latestSequence, {}); latestSequence = latestSequence + 1; @@ -1233,8 +1057,7 @@ ReportingETL::monitorReadOnly() // second passes, whichever occurs first. Even if we don't hear // from rippled, if ledgers are being written to the db, we // publish them - networkValidatedLedgers_->waitUntilValidatedByNetwork( - latestSequence, 1000); + networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000); } } @@ -1274,16 +1097,14 @@ ReportingETL::ReportingETL( if (*interval > max) { std::stringstream msg; - msg << "online_delete cannot be greater than " - << std::to_string(max); + msg << "online_delete cannot be greater than " << std::to_string(max); throw std::runtime_error(msg.str()); } if (*interval > 0) onlineDeleteInterval_ = *interval; } - extractorThreads_ = - config.valueOr("extractor_threads", extractorThreads_); + extractorThreads_ = config.valueOr("extractor_threads", extractorThreads_); txnThreshold_ = config.valueOr("txn_threshold", txnThreshold_); if (config.contains("cache")) { @@ -1299,10 +1120,8 @@ ReportingETL::ReportingETL( } numCacheDiffs_ = cache.valueOr("num_diffs", numCacheDiffs_); - numCacheMarkers_ = - cache.valueOr("num_markers", numCacheMarkers_); - cachePageFetchSize_ = - cache.valueOr("page_fetch_size", cachePageFetchSize_); + numCacheMarkers_ = cache.valueOr("num_markers", numCacheMarkers_); + cachePageFetchSize_ = cache.valueOr("page_fetch_size", cachePageFetchSize_); if (auto peers = cache.maybeArray("peers"); peers) { @@ -1314,13 +1133,9 @@ ReportingETL::ReportingETL( // todo: use emplace_back when clang is ready clioPeers.push_back({ip, port}); } - unsigned seed = - std::chrono::system_clock::now().time_since_epoch().count(); + unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); - std::shuffle( - clioPeers.begin(), - clioPeers.end(), - std::default_random_engine(seed)); + std::shuffle(clioPeers.begin(), clioPeers.end(), std::default_random_engine(seed)); } } } diff --git a/src/etl/ReportingETL.h b/src/etl/ReportingETL.h index c23ba378..b4c0be79 100644 --- a/src/etl/ReportingETL.h +++ b/src/etl/ReportingETL.h @@ -265,9 +265,7 @@ private: /// (mostly transaction hashes, corresponding nodestore hashes and affected /// accounts) FormattedTransactionsData - insertTransactions( - ripple::LedgerInfo const& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data); + insertTransactions(ripple::LedgerInfo const& ledger, org::xrpl::rpc::v1::GetLedgerResponse& data); // TODO update this documentation /// Build the next ledger using the previous ledger and the extracted data. @@ -341,8 +339,7 @@ public: std::shared_ptr balancer, std::shared_ptr ledgers) { - auto etl = std::make_shared( - config, ioc, backend, subscriptions, balancer, ledgers); + auto etl = std::make_shared(config, ioc, backend, subscriptions, balancer, ledgers); etl->run(); @@ -373,8 +370,7 @@ public: result["read_only"] = readOnly_; auto last = getLastPublish(); if (last.time_since_epoch().count() != 0) - result["last_publish_age_seconds"] = - std::to_string(lastPublishAgeSeconds()); + result["last_publish_age_seconds"] = std::to_string(lastPublishAgeSeconds()); return result; } @@ -388,8 +384,7 @@ public: std::uint32_t lastPublishAgeSeconds() const { - return std::chrono::duration_cast( - std::chrono::system_clock::now() - getLastPublish()) + return std::chrono::duration_cast(std::chrono::system_clock::now() - getLastPublish()) .count(); } @@ -397,8 +392,7 @@ public: lastCloseAgeSeconds() const { std::shared_lock lck(closeTimeMtx_); - auto now = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) + auto now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) .count(); auto closeTime = lastCloseTime_.time_since_epoch().count(); if (now < (rippleEpochStart + closeTime)) diff --git a/src/log/Logger.cpp b/src/log/Logger.cpp index 4b25147a..324022e8 100644 --- a/src/log/Logger.cpp +++ b/src/log/Logger.cpp @@ -57,8 +57,7 @@ tag_invoke(boost::json::value_to_tag, boost::json::value const& value) return Severity::DBG; else if (boost::iequals(logLevel, "info")) return Severity::NFO; - else if ( - boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn")) + else if (boost::iequals(logLevel, "warning") || boost::iequals(logLevel, "warn")) return Severity::WRN; else if (boost::iequals(logLevel, "error")) return Severity::ERR; @@ -82,8 +81,7 @@ LogService::init(Config const& config) auto const defaultFormat = "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% " "%Message%"; - std::string format = - config.valueOr("log_format", defaultFormat); + std::string format = config.valueOr("log_format", defaultFormat); if (config.valueOr("log_to_console", false)) { @@ -96,14 +94,9 @@ LogService::init(Config const& config) boost::filesystem::path dirPath{logDir.value()}; if (!boost::filesystem::exists(dirPath)) boost::filesystem::create_directories(dirPath); - auto const rotationSize = - config.valueOr("log_rotation_size", 2048u) * 1024u * - 1024u; - auto const rotationPeriod = - config.valueOr("log_rotation_hour_interval", 12u); - auto const dirSize = - config.valueOr("log_directory_max_size", 50u * 1024u) * - 1024u * 1024u; + auto const rotationSize = config.valueOr("log_rotation_size", 2048u) * 1024u * 1024u; + auto const rotationPeriod = config.valueOr("log_rotation_hour_interval", 12u); + auto const dirSize = config.valueOr("log_directory_max_size", 50u * 1024u) * 1024u * 1024u; auto fileSink = boost::log::add_file_log( keywords::file_name = dirPath / "clio.log", keywords::target_file_name = dirPath / "clio_%Y-%m-%d_%H-%M-%S.log", @@ -112,11 +105,9 @@ LogService::init(Config const& config) keywords::open_mode = std::ios_base::app, keywords::rotation_size = rotationSize, keywords::time_based_rotation = - sinks::file::rotation_at_time_interval( - boost::posix_time::hours(rotationPeriod))); + sinks::file::rotation_at_time_interval(boost::posix_time::hours(rotationPeriod))); fileSink->locked_backend()->set_file_collector( - sinks::file::make_collector( - keywords::target = dirPath, keywords::max_size = dirSize)); + sinks::file::make_collector(keywords::target = dirPath, keywords::max_size = dirSize)); fileSink->locked_backend()->scan_for_files(); } @@ -134,26 +125,19 @@ LogService::init(Config const& config) }; auto core = boost::log::core::get(); - auto min_severity = boost::log::expressions::channel_severity_filter( - log_channel, log_severity); + auto min_severity = boost::log::expressions::channel_severity_filter(log_channel, log_severity); for (auto const& channel : channels) min_severity[channel] = defaultSeverity; - min_severity["Alert"] = - Severity::WRN; // Channel for alerts, always warning severity + min_severity["Alert"] = Severity::WRN; // Channel for alerts, always warning severity - for (auto const overrides = config.arrayOr("log_channels", {}); - auto const& cfg : overrides) + for (auto const overrides = config.arrayOr("log_channels", {}); auto const& cfg : overrides) { - auto name = cfg.valueOrThrow( - "channel", "Channel name is required"); + auto name = cfg.valueOrThrow("channel", "Channel name is required"); if (not std::count(std::begin(channels), std::end(channels), name)) - throw std::runtime_error( - "Can't override settings for log channel " + name + - ": invalid channel"); + throw std::runtime_error("Can't override settings for log channel " + name + ": invalid channel"); - min_severity[name] = - cfg.valueOr("log_level", defaultSeverity); + min_severity[name] = cfg.valueOr("log_level", defaultSeverity); } core->set_filter(min_severity); @@ -202,8 +186,7 @@ Logger::Pump::pretty_path(source_location_t const& loc, size_t max_depth) const if (idx == std::string::npos || idx == 0) break; } - return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' + - std::to_string(loc.line()); + return file_path.substr(idx == std::string::npos ? 0 : idx + 1) + ':' + std::to_string(loc.line()); } } // namespace clio diff --git a/src/log/Logger.h b/src/log/Logger.h index 7793a4b3..9bc4b6e6 100644 --- a/src/log/Logger.h +++ b/src/log/Logger.h @@ -68,8 +68,7 @@ class SourceLocation std::size_t line_; public: - SourceLocation(std::string_view file, std::size_t line) - : file_{file}, line_{line} + SourceLocation(std::string_view file, std::size_t line) : file_{file}, line_{line} { } std::string_view @@ -84,8 +83,7 @@ public: } }; using source_location_t = SourceLocation; -#define CURRENT_SRC_LOCATION \ - source_location_t(__builtin_FILE(), __builtin_LINE()) +#define CURRENT_SRC_LOCATION source_location_t(__builtin_FILE(), __builtin_LINE()) #endif /** @@ -121,9 +119,7 @@ operator<<(std::ostream& stream, Severity sev); * @throws std::runtime_error Thrown if severity is not in the right format */ Severity -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& value); +tag_invoke(boost::json::value_to_tag, boost::json::value const& value); /** * @brief A simple thread-safe logger for the channel specified @@ -135,8 +131,7 @@ tag_invoke( */ class Logger final { - using logger_t = - boost::log::sources::severity_channel_logger_mt; + using logger_t = boost::log::sources::severity_channel_logger_mt; mutable logger_t logger_; friend class LogService; // to expose the Pump interface @@ -146,8 +141,7 @@ class Logger final */ class Pump final { - using pump_opt_t = - std::optional>; + using pump_opt_t = std::optional>; boost::log::record rec_; pump_opt_t pump_ = std::nullopt; @@ -160,8 +154,7 @@ class Logger final if (rec_) { pump_.emplace(boost::log::aux::make_record_pump(logger, rec_)); - pump_->stream() << boost::log::add_value( - "SourceLocation", pretty_path(loc)); + pump_->stream() << boost::log::add_value("SourceLocation", pretty_path(loc)); } } @@ -205,8 +198,7 @@ public: * * @param channel The channel this logger will report into. */ - Logger(std::string channel) - : logger_{boost::log::keywords::channel = channel} + Logger(std::string channel) : logger_{boost::log::keywords::channel = channel} { } Logger(Logger const&) = default; diff --git a/src/main/main.cpp b/src/main/main.cpp index 051418c5..65d69fca 100644 --- a/src/main/main.cpp +++ b/src/main/main.cpp @@ -78,12 +78,7 @@ parseCli(int argc, char* argv[]) positional.add("conf", 1); po::variables_map parsed; - po::store( - po::command_line_parser(argc, argv) - .options(description) - .positional(positional) - .run(), - parsed); + po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed); po::notify(parsed); if (parsed.count("version")) @@ -94,9 +89,7 @@ parseCli(int argc, char* argv[]) if (parsed.count("help")) { - std::cout << "Clio server " << Build::getClioFullVersionString() - << "\n\n" - << description; + std::cout << "Clio server " << Build::getClioFullVersionString() << "\n\n" << description; std::exit(EXIT_SUCCESS); } @@ -137,15 +130,11 @@ parseCerts(Config const& config) ssl::context ctx{ssl::context::tlsv12}; - ctx.set_options( - boost::asio::ssl::context::default_workarounds | - boost::asio::ssl::context::no_sslv2); + ctx.set_options(boost::asio::ssl::context::default_workarounds | boost::asio::ssl::context::no_sslv2); ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size())); - ctx.use_private_key( - boost::asio::buffer(key.data(), key.size()), - boost::asio::ssl::context::file_format::pem); + ctx.use_private_key(boost::asio::buffer(key.data(), key.size()), boost::asio::ssl::context::file_format::pem); return ctx; } @@ -175,8 +164,7 @@ try auto const config = ConfigReader::open(configPath); if (!config) { - std::cerr << "Couldnt parse config '" << configPath << "'." - << std::endl; + std::cerr << "Couldnt parse config '" << configPath << "'." << std::endl; return EXIT_FAILURE; } @@ -184,9 +172,7 @@ try LogService::info() << "Clio version: " << Build::getClioFullVersionString(); auto ctx = parseCerts(config); - auto ctxRef = ctx - ? std::optional>{ctx.value()} - : std::nullopt; + auto ctxRef = ctx ? std::optional>{ctx.value()} : std::nullopt; auto const threads = config.valueOr("io_threads", 2); if (threads <= 0) @@ -208,8 +194,7 @@ try auto backend = Backend::make_Backend(ioc, config); // Manages clients subscribed to streams - auto subscriptions = - SubscriptionManager::make_SubscriptionManager(config, backend); + auto subscriptions = SubscriptionManager::make_SubscriptionManager(config, backend); // Tracks which ledgers have been validated by the // network @@ -220,17 +205,14 @@ try // The server uses the balancer to forward RPCs to a rippled node. // The balancer itself publishes to streams (transactions_proposed and // accounts_proposed) - auto balancer = ETLLoadBalancer::make_ETLLoadBalancer( - config, ioc, backend, subscriptions, ledgers); + auto balancer = ETLLoadBalancer::make_ETLLoadBalancer(config, ioc, backend, subscriptions, ledgers); // ETL is responsible for writing and publishing to streams. In read-only // mode, ETL only publishes - auto etl = ReportingETL::make_ReportingETL( - config, ioc, backend, subscriptions, balancer, ledgers); + auto etl = ReportingETL::make_ReportingETL(config, ioc, backend, subscriptions, balancer, ledgers); // The server handles incoming RPCs - auto httpServer = Server::make_HttpServer( - config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard); + auto httpServer = Server::make_HttpServer(config, ioc, ctxRef, backend, subscriptions, balancer, etl, dosGuard); // Blocks until stopped. // When stopped, shared_ptrs fall out of scope diff --git a/src/rpc/Counters.cpp b/src/rpc/Counters.cpp index 10cc8d0d..92ef5afb 100644 --- a/src/rpc/Counters.cpp +++ b/src/rpc/Counters.cpp @@ -52,9 +52,7 @@ Counters::rpcErrored(std::string const& method) } void -Counters::rpcComplete( - std::string const& method, - std::chrono::microseconds const& rpcDuration) +Counters::rpcComplete(std::string const& method, std::chrono::microseconds const& rpcDuration) { if (!validHandler(method)) return; diff --git a/src/rpc/Counters.h b/src/rpc/Counters.h index 4091caba..e55c04d5 100644 --- a/src/rpc/Counters.h +++ b/src/rpc/Counters.h @@ -59,9 +59,7 @@ public: rpcErrored(std::string const& method); void - rpcComplete( - std::string const& method, - std::chrono::microseconds const& rpcDuration); + rpcComplete(std::string const& method, std::chrono::microseconds const& rpcDuration); void rpcForwarded(std::string const& method); diff --git a/src/rpc/Errors.cpp b/src/rpc/Errors.cpp index da659a84..409881d1 100644 --- a/src/rpc/Errors.cpp +++ b/src/rpc/Errors.cpp @@ -50,8 +50,7 @@ getWarningInfo(WarningCode code) {warnRPC_RATE_LIMIT, "You are about to be rate limited"}}; auto matchByCode = [code](auto const& info) { return info.code == code; }; - if (auto it = find_if(begin(infos), end(infos), matchByCode); - it != end(infos)) + if (auto it = find_if(begin(infos), end(infos), matchByCode); it != end(infos)) return *it; throw(out_of_range("Invalid WarningCode")); @@ -71,31 +70,21 @@ ClioErrorInfo const& getErrorInfo(ClioError code) { constexpr static ClioErrorInfo infos[]{ - {ClioError::rpcMALFORMED_CURRENCY, - "malformedCurrency", - "Malformed currency."}, - {ClioError::rpcMALFORMED_REQUEST, - "malformedRequest", - "Malformed request."}, + {ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency", "Malformed currency."}, + {ClioError::rpcMALFORMED_REQUEST, "malformedRequest", "Malformed request."}, {ClioError::rpcMALFORMED_OWNER, "malformedOwner", "Malformed owner."}, - {ClioError::rpcMALFORMED_ADDRESS, - "malformedAddress", - "Malformed address."}, + {ClioError::rpcMALFORMED_ADDRESS, "malformedAddress", "Malformed address."}, }; auto matchByCode = [code](auto const& info) { return info.code == code; }; - if (auto it = find_if(begin(infos), end(infos), matchByCode); - it != end(infos)) + if (auto it = find_if(begin(infos), end(infos), matchByCode); it != end(infos)) return *it; throw(out_of_range("Invalid error code")); } boost::json::object -makeError( - RippledError err, - optional customError, - optional customMessage) +makeError(RippledError err, optional customError, optional customMessage) { boost::json::object json; auto const& info = ripple::RPC::get_error_info(err); @@ -109,10 +98,7 @@ makeError( } boost::json::object -makeError( - ClioError err, - optional customError, - optional customMessage) +makeError(ClioError err, optional customError, optional customMessage) { boost::json::object json; auto const& info = getErrorInfo(err); @@ -128,31 +114,20 @@ makeError( boost::json::object makeError(Status const& status) { - auto wrapOptional = [](string_view const& str) { - return str.empty() ? nullopt : make_optional(str); - }; + auto wrapOptional = [](string_view const& str) { return str.empty() ? nullopt : make_optional(str); }; auto res = visit( overloadSet{ [&status, &wrapOptional](RippledError err) { if (err == ripple::rpcUNKNOWN) { - return boost::json::object{ - {"error", status.message}, - {"type", "response"}, - {"status", "error"}}; + return boost::json::object{{"error", status.message}, {"type", "response"}, {"status", "error"}}; } - return makeError( - err, - wrapOptional(status.error), - wrapOptional(status.message)); + return makeError(err, wrapOptional(status.error), wrapOptional(status.message)); }, [&status, &wrapOptional](ClioError err) { - return makeError( - err, - wrapOptional(status.error), - wrapOptional(status.message)); + return makeError(err, wrapOptional(status.error), wrapOptional(status.message)); }, }, status.code); diff --git a/src/rpc/Errors.h b/src/rpc/Errors.h index 5e7caf17..b9a9df97 100644 --- a/src/rpc/Errors.h +++ b/src/rpc/Errors.h @@ -75,24 +75,20 @@ struct Status Status() = default; /* implicit */ Status(CombinedError code) : code(code){}; - Status(CombinedError code, boost::json::object&& extraInfo) - : code(code), extraInfo(std::move(extraInfo)){}; + Status(CombinedError code, boost::json::object&& extraInfo) : code(code), extraInfo(std::move(extraInfo)){}; // HACK. Some rippled handlers explicitly specify errors. // This means that we have to be able to duplicate this // functionality. - explicit Status(std::string const& message) - : code(ripple::rpcUNKNOWN), message(message) + explicit Status(std::string const& message) : code(ripple::rpcUNKNOWN), message(message) { } - Status(CombinedError code, std::string message) - : code(code), message(message) + Status(CombinedError code, std::string message) : code(code), message(message) { } - Status(CombinedError code, std::string error, std::string message) - : code(code), error(error), message(message) + Status(CombinedError code, std::string error, std::string message) : code(code), error(error), message(message) { } @@ -138,12 +134,7 @@ struct Status /** * @brief Warning codes that can be returned by clio. */ -enum WarningCode { - warnUNKNOWN = -1, - warnRPC_CLIO = 2001, - warnRPC_OUTDATED = 2002, - warnRPC_RATE_LIMIT = 2003 -}; +enum WarningCode { warnUNKNOWN = -1, warnRPC_CLIO = 2001, warnRPC_OUTDATED = 2002, warnRPC_RATE_LIMIT = 2003 }; /** * @brief Holds information about a clio warning. @@ -151,8 +142,7 @@ enum WarningCode { struct WarningInfo { constexpr WarningInfo() = default; - constexpr WarningInfo(WarningCode code, char const* message) - : code(code), message(message) + constexpr WarningInfo(WarningCode code, char const* message) : code(code), message(message) { } diff --git a/src/rpc/RPC.cpp b/src/rpc/RPC.cpp index 32e13a98..0e339a7e 100644 --- a/src/rpc/RPC.cpp +++ b/src/rpc/RPC.cpp @@ -95,19 +95,7 @@ make_WsContext( string command = commandValue.as_string().c_str(); return make_optional( - yc, - command, - 1, - request, - backend, - subscriptions, - balancer, - etl, - session, - tagFactory, - range, - counters, - clientIp); + yc, command, 1, request, backend, subscriptions, balancer, etl, session, tagFactory, range, counters, clientIp); } optional @@ -267,8 +255,7 @@ isClioOnly(string const& method) bool shouldSuppressValidatedFlag(RPC::Context const& context) { - return boost::iequals(context.method, "subscribe") || - boost::iequals(context.method, "unsubscribe"); + return boost::iequals(context.method, "subscribe") || boost::iequals(context.method, "unsubscribe"); } Status @@ -278,8 +265,7 @@ getLimit(RPC::Context const& context, uint32_t& limit) return Status{RippledError::rpcUNKNOWN_COMMAND}; if (!handlerTable.getLimitRange(context.method)) - return Status{ - RippledError::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"}; + return Status{RippledError::rpcINVALID_PARAMS, "rpcDoesNotRequireLimit"}; auto [lo, def, hi] = *handlerTable.getLimitRange(context.method); @@ -317,8 +303,7 @@ shouldForwardToRippled(Context const& ctx) if (specifiesCurrentOrClosedLedger(request)) return true; - if (ctx.method == "account_info" && request.contains("queue") && - request.at("queue").as_bool()) + if (ctx.method == "account_info" && request.contains("queue") && request.at("queue").as_bool()) return true; return false; @@ -332,8 +317,7 @@ buildResponse(Context const& ctx) boost::json::object toForward = ctx.params; toForward["command"] = ctx.method; - auto res = - ctx.balancer->forwardToRippled(toForward, ctx.clientIp, ctx.yield); + auto res = ctx.balancer->forwardToRippled(toForward, ctx.clientIp, ctx.yield); ctx.counters.rpcForwarded(ctx.method); @@ -359,14 +343,11 @@ buildResponse(Context const& ctx) try { - gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method - << '`'; + gPerfLog.debug() << ctx.tag() << " start executing rpc `" << ctx.method << '`'; auto v = (*method)(ctx); - gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method - << '`'; + gPerfLog.debug() << ctx.tag() << " finish executing rpc `" << ctx.method << '`'; - if (auto object = get_if(&v); - object && not shouldSuppressValidatedFlag(ctx)) + if (auto object = get_if(&v); object && not shouldSuppressValidatedFlag(ctx)) { (*object)[JS(validated)] = true; } diff --git a/src/rpc/RPC.h b/src/rpc/RPC.h index fd2b3129..17567b1f 100644 --- a/src/rpc/RPC.h +++ b/src/rpc/RPC.h @@ -150,11 +150,10 @@ logDuration(Context const& ctx, T const& dur) { static clio::Logger log{"RPC"}; std::stringstream ss; - ss << ctx.tag() << "Request processing duration = " - << std::chrono::duration_cast(dur).count() + ss << ctx.tag() + << "Request processing duration = " << std::chrono::duration_cast(dur).count() << " milliseconds. request = " << ctx.params; - auto seconds = - std::chrono::duration_cast(dur).count(); + auto seconds = std::chrono::duration_cast(dur).count(); if (seconds > 10) log.error() << ss.str(); else if (seconds > 1) diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp index 8448b8c2..0685a41b 100644 --- a/src/rpc/RPCHelpers.cpp +++ b/src/rpc/RPCHelpers.cpp @@ -47,10 +47,7 @@ getBool(boost::json::object const& request, std::string const& field) } bool -getBool( - boost::json::object const& request, - std::string const& field, - bool dfault) +getBool(boost::json::object const& request, std::string const& field, bool dfault) { if (auto res = getBool(request, field)) return *res; @@ -81,10 +78,7 @@ getUInt(boost::json::object const& request, std::string const& field) } std::uint32_t -getUInt( - boost::json::object const& request, - std::string const& field, - std::uint32_t const dfault) +getUInt(boost::json::object const& request, std::string const& field, std::uint32_t const dfault) { if (auto res = getUInt(request, field)) return *res; @@ -156,10 +150,7 @@ getRequiredString(boost::json::object const& request, std::string const& field) } std::string -getString( - boost::json::object const& request, - std::string const& field, - std::string dfault) +getString(boost::json::object const& request, std::string const& field, std::string dfault) { if (auto res = getString(request, field)) return *res; @@ -192,25 +183,21 @@ getAccount( if (!request.contains(field)) { if (required) - return Status{ - RippledError::rpcINVALID_PARAMS, field.to_string() + "Missing"}; + return Status{RippledError::rpcINVALID_PARAMS, field.to_string() + "Missing"}; return {}; } if (!request.at(field).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; + return Status{RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; - if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); - a) + if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); a) { account = a.value(); return {}; } - return Status{ - RippledError::rpcACT_MALFORMED, field.to_string() + "Malformed"}; + return Status{RippledError::rpcACT_MALFORMED, field.to_string() + "Malformed"}; } Status @@ -226,18 +213,15 @@ getOptionalAccount( } if (!request.at(field).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; + return Status{RippledError::rpcINVALID_PARAMS, field.to_string() + "NotString"}; - if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); - a) + if (auto a = accountFromStringStrict(request.at(field).as_string().c_str()); a) { account = a.value(); return {}; } - return Status{ - RippledError::rpcINVALID_PARAMS, field.to_string() + "Malformed"}; + return Status{RippledError::rpcINVALID_PARAMS, field.to_string() + "Malformed"}; } Status @@ -247,10 +231,7 @@ getAccount(boost::json::object const& request, ripple::AccountID& accountId) } Status -getAccount( - boost::json::object const& request, - ripple::AccountID& destAccount, - boost::string_view const& field) +getAccount(boost::json::object const& request, ripple::AccountID& destAccount, boost::string_view const& field) { return getAccount(request, destAccount, field, false); } @@ -320,8 +301,7 @@ canHaveDeliveredAmount( std::shared_ptr const& meta) { ripple::TxType const tt{txn->getTxnType()}; - if (tt != ripple::ttPAYMENT && tt != ripple::ttCHECK_CASH && - tt != ripple::ttACCOUNT_DELETE) + if (tt != ripple::ttPAYMENT && tt != ripple::ttCHECK_CASH && tt != ripple::ttACCOUNT_DELETE) return false; /* @@ -343,13 +323,11 @@ accountFromStringStrict(std::string const& account) std::optional publicKey = {}; if (blob && ripple::publicKeyType(ripple::makeSlice(*blob))) { - publicKey = - ripple::PublicKey(ripple::Slice{blob->data(), blob->size()}); + publicKey = ripple::PublicKey(ripple::Slice{blob->data(), blob->size()}); } else { - publicKey = ripple::parseBase58( - ripple::TokenType::AccountPublic, account); + publicKey = ripple::parseBase58(ripple::TokenType::AccountPublic, account); } std::optional result; @@ -363,26 +341,19 @@ accountFromStringStrict(std::string const& account) else return {}; } -std::pair< - std::shared_ptr, - std::shared_ptr> +std::pair, std::shared_ptr> deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs) { try { - std::pair< - std::shared_ptr, - std::shared_ptr> - result; + std::pair, std::shared_ptr> result; { - ripple::SerialIter s{ - blobs.transaction.data(), blobs.transaction.size()}; + ripple::SerialIter s{blobs.transaction.data(), blobs.transaction.size()}; result.first = std::make_shared(s); } { ripple::SerialIter s{blobs.metadata.data(), blobs.metadata.size()}; - result.second = - std::make_shared(s, ripple::sfMetadata); + result.second = std::make_shared(s, ripple::sfMetadata); } return result; } @@ -390,34 +361,21 @@ deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs) { std::stringstream txn; std::stringstream meta; - std::copy( - blobs.transaction.begin(), - blobs.transaction.end(), - std::ostream_iterator(txn)); - std::copy( - blobs.metadata.begin(), - blobs.metadata.end(), - std::ostream_iterator(meta)); - gLog.error() << "Failed to deserialize transaction. txn = " << txn.str() - << " - meta = " << meta.str() << " txn length = " - << std::to_string(blobs.transaction.size()) - << " meta length = " - << std::to_string(blobs.metadata.size()); + std::copy(blobs.transaction.begin(), blobs.transaction.end(), std::ostream_iterator(txn)); + std::copy(blobs.metadata.begin(), blobs.metadata.end(), std::ostream_iterator(meta)); + gLog.error() << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str() + << " txn length = " << std::to_string(blobs.transaction.size()) + << " meta length = " << std::to_string(blobs.metadata.size()); throw e; } } -std::pair< - std::shared_ptr, - std::shared_ptr> -deserializeTxPlusMeta( - Backend::TransactionAndMetadata const& blobs, - std::uint32_t seq) +std::pair, std::shared_ptr> +deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs, std::uint32_t seq) { auto [tx, meta] = deserializeTxPlusMeta(blobs); - std::shared_ptr m = - std::make_shared(tx->getTransactionID(), seq, *meta); + std::shared_ptr m = std::make_shared(tx->getTransactionID(), seq, *meta); return {tx, m}; } @@ -425,8 +383,7 @@ deserializeTxPlusMeta( boost::json::object toJson(ripple::STBase const& obj) { - boost::json::value value = boost::json::parse( - obj.getJson(ripple::JsonOptions::none).toStyledString()); + boost::json::value value = boost::json::parse(obj.getJson(ripple::JsonOptions::none).toStyledString()); return value.as_object(); } @@ -451,8 +408,7 @@ insertDeliveredAmount( if (canHaveDeliveredAmount(txn, meta)) { if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq(), date)) - metaJson["delivered_amount"] = - toBoostJson(amt->getJson(ripple::JsonOptions::include_date)); + metaJson["delivered_amount"] = toBoostJson(amt->getJson(ripple::JsonOptions::include_date)); else metaJson["delivered_amount"] = "unavailable"; return true; @@ -463,8 +419,7 @@ insertDeliveredAmount( boost::json::object toJson(ripple::TxMeta const& meta) { - boost::json::value value = boost::json::parse( - meta.getJson(ripple::JsonOptions::none).toStyledString()); + boost::json::value value = boost::json::parse(meta.getJson(ripple::JsonOptions::none).toStyledString()); return value.as_object(); } @@ -480,8 +435,7 @@ toBoostJson(Json::Value const& value) boost::json::object toJson(ripple::SLE const& sle) { - boost::json::value value = boost::json::parse( - sle.getJson(ripple::JsonOptions::none).toStyledString()); + boost::json::value value = boost::json::parse(sle.getJson(ripple::JsonOptions::none).toStyledString()); if (sle.getType() == ripple::ltACCOUNT_ROOT) { if (sle.isFieldPresent(ripple::sfEmailHash)) @@ -489,8 +443,7 @@ toJson(ripple::SLE const& sle) auto const& hash = sle.getFieldH128(ripple::sfEmailHash); std::string md5 = strHex(hash); boost::algorithm::to_lower(md5); - value.as_object()["urlgravatar"] = - str(boost::format("http://www.gravatar.com/avatar/%s") % md5); + value.as_object()["urlgravatar"] = str(boost::format("http://www.gravatar.com/avatar/%s") % md5); } } return value.as_object(); @@ -509,8 +462,7 @@ toJson(ripple::LedgerInfo const& lgrInfo) header["close_flags"] = lgrInfo.closeFlags; // Always show fields that contribute to the ledger hash - header["parent_close_time"] = - lgrInfo.parentCloseTime.time_since_epoch().count(); + header["parent_close_time"] = lgrInfo.parentCloseTime.time_since_epoch().count(); header["close_time"] = lgrInfo.closeTime.time_since_epoch().count(); header["close_time_resolution"] = lgrInfo.closeTimeResolution.count(); return header; @@ -534,20 +486,16 @@ parseStringAsUInt(std::string const& value) std::variant ledgerInfoFromRequest(Context const& ctx) { - auto hashValue = ctx.params.contains("ledger_hash") - ? ctx.params.at("ledger_hash") - : nullptr; + auto hashValue = ctx.params.contains("ledger_hash") ? ctx.params.at("ledger_hash") : nullptr; if (!hashValue.is_null()) { if (!hashValue.is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, "ledgerHashNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "ledgerHashNotString"}; ripple::uint256 ledgerHash; if (!ledgerHash.parseHex(hashValue.as_string().c_str())) - return Status{ - RippledError::rpcINVALID_PARAMS, "ledgerHashMalformed"}; + return Status{RippledError::rpcINVALID_PARAMS, "ledgerHashMalformed"}; auto lgrInfo = ctx.backend->fetchLedgerByHash(ledgerHash, ctx.yield); @@ -557,9 +505,7 @@ ledgerInfoFromRequest(Context const& ctx) return *lgrInfo; } - auto indexValue = ctx.params.contains("ledger_index") - ? ctx.params.at("ledger_index") - : nullptr; + auto indexValue = ctx.params.contains("ledger_index") ? ctx.params.at("ledger_index") : nullptr; std::optional ledgerSequence = {}; if (!indexValue.is_null()) @@ -583,8 +529,7 @@ ledgerInfoFromRequest(Context const& ctx) if (!ledgerSequence) return Status{RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}; - auto lgrInfo = - ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield); + auto lgrInfo = ctx.backend->fetchLedgerBySequence(*ledgerSequence, ctx.yield); if (!lgrInfo || lgrInfo->seq > ctx.range.maxSequence) return Status{RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; @@ -602,8 +547,7 @@ getLedgerInfoFromHashOrSeq( uint32_t maxSeq) { std::optional lgrInfo; - auto const err = - RPC::Status{RPC::RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; + auto const err = RPC::Status{RPC::RippledError::rpcLGR_NOT_FOUND, "ledgerNotFound"}; if (ledgerHash) { // invoke uint256's constructor to parse the hex string , instead of @@ -652,8 +596,7 @@ getStartHint(ripple::SLE const& sle, ripple::AccountID const& accountID) { if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID) return sle.getFieldU64(ripple::sfLowNode); - else if ( - sle.getFieldAmount(ripple::sfHighLimit).getIssuer() == accountID) + else if (sle.getFieldAmount(ripple::sfHighLimit).getIssuer() == accountID) return sle.getFieldU64(ripple::sfHighNode); } @@ -673,8 +616,7 @@ traverseOwnedNodes( boost::asio::yield_context& yield, std::function atOwnedNode) { - if (!backend.fetchLedgerObject( - ripple::keylet::account(accountID).key, sequence, yield)) + if (!backend.fetchLedgerObject(ripple::keylet::account(accountID).key, sequence, yield)) return Status{RippledError::rpcACT_NOT_FOUND}; auto const maybeCursor = parseAccountCursor(jsonCursor); @@ -752,8 +694,7 @@ traverseOwnedNodes( if (hexMarker.isNonZero()) { auto const hintIndex = ripple::keylet::page(rootIndex, startHint); - auto hintDir = - backend.fetchLedgerObject(hintIndex.key, sequence, yield); + auto hintDir = backend.fetchLedgerObject(hintIndex.key, sequence, yield); if (!hintDir) return Status(ripple::rpcINVALID_PARAMS, "Invalid marker"); @@ -762,8 +703,7 @@ traverseOwnedNodes( ripple::SLE sle{it, hintIndex.key}; if (auto const& indexes = sle.getFieldV256(ripple::sfIndexes); - std::find(std::begin(indexes), std::end(indexes), hexMarker) == - std::end(indexes)) + std::find(std::begin(indexes), std::end(indexes), hexMarker) == std::end(indexes)) { // result in empty dataset return AccountCursor({beast::zero, 0}); @@ -773,12 +713,10 @@ traverseOwnedNodes( bool found = false; for (;;) { - auto const ownerDir = - backend.fetchLedgerObject(currentIndex.key, sequence, yield); + auto const ownerDir = backend.fetchLedgerObject(currentIndex.key, sequence, yield); if (!ownerDir) - return Status( - ripple::rpcINVALID_PARAMS, "Owner directory not found"); + return Status(ripple::rpcINVALID_PARAMS, "Owner directory not found"); ripple::SerialIter it{ownerDir->data(), ownerDir->size()}; ripple::SLE sle{it, currentIndex.key}; @@ -819,8 +757,7 @@ traverseOwnedNodes( { for (;;) { - auto const ownerDir = - backend.fetchLedgerObject(currentIndex.key, sequence, yield); + auto const ownerDir = backend.fetchLedgerObject(currentIndex.key, sequence, yield); if (!ownerDir) break; @@ -853,16 +790,11 @@ traverseOwnedNodes( auto end = std::chrono::system_clock::now(); gLog.debug() << "Time loading owned directories: " - << std::chrono::duration_cast( - end - start) - .count() - << " milliseconds"; + << std::chrono::duration_cast(end - start).count() << " milliseconds"; - auto [objects, timeDiff] = util::timed( - [&]() { return backend.fetchLedgerObjects(keys, sequence, yield); }); + auto [objects, timeDiff] = util::timed([&]() { return backend.fetchLedgerObjects(keys, sequence, yield); }); - gLog.debug() << "Time loading owned entries: " << timeDiff - << " milliseconds"; + gLog.debug() << "Time loading owned entries: " << timeDiff << " milliseconds"; for (auto i = 0; i < objects.size(); ++i) { @@ -877,17 +809,11 @@ traverseOwnedNodes( } std::shared_ptr -read( - ripple::Keylet const& keylet, - ripple::LedgerInfo const& lgrInfo, - Context const& context) +read(ripple::Keylet const& keylet, ripple::LedgerInfo const& lgrInfo, Context const& context) { - if (auto const blob = context.backend->fetchLedgerObject( - keylet.key, lgrInfo.seq, context.yield); - blob) + if (auto const blob = context.backend->fetchLedgerObject(keylet.key, lgrInfo.seq, context.yield); blob) { - return std::make_shared( - ripple::SerialIter{blob->data(), blob->size()}, keylet.key); + return std::make_shared(ripple::SerialIter{blob->data(), blob->size()}, keylet.key); } return nullptr; @@ -902,11 +828,9 @@ parseRippleLibSeed(boost::json::value const& value) if (!value.is_string()) return {}; - auto const result = ripple::decodeBase58Token( - value.as_string().c_str(), ripple::TokenType::None); + auto const result = ripple::decodeBase58Token(value.as_string().c_str(), ripple::TokenType::None); - if (result.size() == 18 && - static_cast(result[0]) == std::uint8_t(0xE1) && + if (result.size() == 18 && static_cast(result[0]) == std::uint8_t(0xE1) && static_cast(result[1]) == std::uint8_t(0x4B)) return ripple::Seed(ripple::makeSlice(result.substr(2))); @@ -920,8 +844,7 @@ keypairFromRequst(boost::json::object const& request) // All of the secret types we allow, but only one at a time. // The array should be constexpr, but that makes Visual Studio unhappy. - static std::string const secretTypes[]{ - "passphrase", "secret", "seed", "seed_hex"}; + static std::string const secretTypes[]{"passphrase", "secret", "seed", "seed_hex"}; // Identify which secret type is in use. std::string secretType = ""; @@ -958,13 +881,10 @@ keypairFromRequst(boost::json::object const& request) keyType = ripple::keyTypeFromString(key_type); if (!keyType) - return Status{ - RippledError::rpcINVALID_PARAMS, "invalidFieldKeyType"}; + return Status{RippledError::rpcINVALID_PARAMS, "invalidFieldKeyType"}; if (secretType == "secret") - return Status{ - RippledError::rpcINVALID_PARAMS, - "The secret field is not allowed if key_type is used."}; + return Status{RippledError::rpcINVALID_PARAMS, "The secret field is not allowed if key_type is used."}; } // ripple-lib encodes seed used to generate an Ed25519 wallet in a @@ -978,11 +898,8 @@ keypairFromRequst(boost::json::object const& request) { // If the user passed in an Ed25519 seed but *explicitly* // requested another key type, return an error. - if (keyType.value_or(ripple::KeyType::ed25519) != - ripple::KeyType::ed25519) - return Status{ - RippledError::rpcINVALID_PARAMS, - "Specified seed is for an Ed25519 wallet."}; + if (keyType.value_or(ripple::KeyType::ed25519) != ripple::KeyType::ed25519) + return Status{RippledError::rpcINVALID_PARAMS, "Specified seed is for an Ed25519 wallet."}; keyType = ripple::KeyType::ed25519; } @@ -996,9 +913,7 @@ keypairFromRequst(boost::json::object const& request) if (has_key_type) { if (!request.at(secretType).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "secret value must be string"}; + return Status{RippledError::rpcINVALID_PARAMS, "secret value must be string"}; std::string key = request.at(secretType).as_string().c_str(); @@ -1016,9 +931,7 @@ keypairFromRequst(boost::json::object const& request) else { if (!request.at("secret").is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "field secret should be a string"}; + return Status{RippledError::rpcINVALID_PARAMS, "field secret should be a string"}; std::string secret = request.at("secret").as_string().c_str(); seed = ripple::parseGenericSeed(secret); @@ -1026,15 +939,10 @@ keypairFromRequst(boost::json::object const& request) } if (!seed) - return Status{ - RippledError::rpcBAD_SEED, - "Bad Seed: invalid field message secretType"}; + return Status{RippledError::rpcBAD_SEED, "Bad Seed: invalid field message secretType"}; - if (keyType != ripple::KeyType::secp256k1 && - keyType != ripple::KeyType::ed25519) - return Status{ - RippledError::rpcINVALID_PARAMS, - "keypairForSignature: invalid key type"}; + if (keyType != ripple::KeyType::secp256k1 && keyType != ripple::KeyType::ed25519) + return Status{RippledError::rpcINVALID_PARAMS, "keypairForSignature: invalid key type"}; return generateKeyPair(*keyType, *seed); } @@ -1120,8 +1028,7 @@ isFrozen( ripple::SerialIter issuerIt{blob->data(), blob->size()}; ripple::SLE issuerLine{issuerIt, key}; - auto frozen = - (issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze; + auto frozen = (issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze; if (issuerLine.isFlag(frozen)) return true; @@ -1148,8 +1055,7 @@ xrpLiquid( std::uint32_t const ownerCount = sle.getFieldU32(ripple::sfOwnerCount); - auto const reserve = - backend.fetchFees(sequence, yield)->accountReserve(ownerCount); + auto const reserve = backend.fetchFees(sequence, yield)->accountReserve(ownerCount); auto const balance = sle.getFieldAmount(ripple::sfBalance); @@ -1174,14 +1080,7 @@ accountFunds( } else { - return accountHolds( - backend, - sequence, - id, - amount.getCurrency(), - amount.getIssuer(), - true, - yield); + return accountHolds(backend, sequence, id, amount.getCurrency(), amount.getIssuer(), true, yield); } } @@ -1213,8 +1112,7 @@ accountHolds( ripple::SerialIter it{blob->data(), blob->size()}; ripple::SLE sle{it, key}; - if (zeroIfFrozen && - isFrozen(backend, sequence, account, currency, issuer, yield)) + if (zeroIfFrozen && isFrozen(backend, sequence, account, currency, issuer, yield)) { amount.clear(ripple::Issue(currency, issuer)); } @@ -1267,8 +1165,7 @@ postProcessOrderBook( std::map umBalance; - bool globalFreeze = - isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) || + bool globalFreeze = isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) || isGlobalFrozen(backend, ledgerSequence, book.in.account, yield); auto rate = transferRate(backend, ledgerSequence, book.out.account, yield); @@ -1279,8 +1176,7 @@ postProcessOrderBook( { ripple::SerialIter it{obj.blob.data(), obj.blob.size()}; ripple::SLE offer{it, obj.key}; - ripple::uint256 bookDir = - offer.getFieldH256(ripple::sfBookDirectory); + ripple::uint256 bookDir = offer.getFieldH256(ripple::sfBookDirectory); auto const uOfferOwnerID = offer.getAccountID(ripple::sfAccount); auto const& saTakerGets = offer.getFieldAmount(ripple::sfTakerGets); @@ -1313,13 +1209,7 @@ postProcessOrderBook( else { saOwnerFunds = accountHolds( - backend, - ledgerSequence, - uOfferOwnerID, - book.out.currency, - book.out.account, - true, - yield); + backend, ledgerSequence, uOfferOwnerID, book.out.currency, book.out.account, true, yield); if (saOwnerFunds < beast::zero) saOwnerFunds.clear(); @@ -1331,8 +1221,7 @@ postProcessOrderBook( ripple::STAmount saTakerGetsFunded; ripple::STAmount saOwnerFundsLimit = saOwnerFunds; ripple::Rate offerRate = ripple::parityRate; - ripple::STAmount dirRate = - ripple::amountFromQuality(getQuality(bookDir)); + ripple::STAmount dirRate = ripple::amountFromQuality(getQuality(bookDir)); if (rate != ripple::parityRate // Have a tranfer fee. @@ -1354,21 +1243,15 @@ postProcessOrderBook( else { saTakerGetsFunded = saOwnerFundsLimit; - offerJson["taker_gets_funded"] = toBoostJson( - saTakerGetsFunded.getJson(ripple::JsonOptions::none)); - offerJson["taker_pays_funded"] = toBoostJson( - std::min( - saTakerPays, - ripple::multiply( - saTakerGetsFunded, dirRate, saTakerPays.issue())) - .getJson(ripple::JsonOptions::none)); + offerJson["taker_gets_funded"] = toBoostJson(saTakerGetsFunded.getJson(ripple::JsonOptions::none)); + offerJson["taker_pays_funded"] = + toBoostJson(std::min(saTakerPays, ripple::multiply(saTakerGetsFunded, dirRate, saTakerPays.issue())) + .getJson(ripple::JsonOptions::none)); } ripple::STAmount saOwnerPays = (ripple::parityRate == offerRate) ? saTakerGetsFunded - : std::min( - saOwnerFunds, - ripple::multiply(saTakerGetsFunded, offerRate)); + : std::min(saOwnerFunds, ripple::multiply(saTakerGetsFunded, offerRate)); umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays; @@ -1389,11 +1272,7 @@ postProcessOrderBook( // get book via currency type std::variant -parseBook( - ripple::Currency pays, - ripple::AccountID payIssuer, - ripple::Currency gets, - ripple::AccountID getIssuer) +parseBook(ripple::Currency pays, ripple::AccountID payIssuer, ripple::Currency gets, ripple::AccountID getIssuer) { if (isXRP(pays) && !isXRP(payIssuer)) return Status{ @@ -1415,8 +1294,7 @@ parseBook( if (!ripple::isXRP(gets) && ripple::isXRP(getIssuer)) return Status{ - RippledError::rpcDST_ISR_MALFORMED, - "Invalid field 'taker_gets.issuer', expected non-XRP issuer."}; + RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', expected non-XRP issuer."}; if (pays == gets && payIssuer == getIssuer) return Status{RippledError::rpcBAD_MARKET, "badMarket"}; @@ -1428,22 +1306,16 @@ std::variant parseBook(boost::json::object const& request) { if (!request.contains("taker_pays")) - return Status{ - RippledError::rpcINVALID_PARAMS, "Missing field 'taker_pays'"}; + return Status{RippledError::rpcINVALID_PARAMS, "Missing field 'taker_pays'"}; if (!request.contains("taker_gets")) - return Status{ - RippledError::rpcINVALID_PARAMS, "Missing field 'taker_gets'"}; + return Status{RippledError::rpcINVALID_PARAMS, "Missing field 'taker_gets'"}; if (!request.at("taker_pays").is_object()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "Field 'taker_pays' is not an object"}; + return Status{RippledError::rpcINVALID_PARAMS, "Field 'taker_pays' is not an object"}; if (!request.at("taker_gets").is_object()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "Field 'taker_gets' is not an object"}; + return Status{RippledError::rpcINVALID_PARAMS, "Field 'taker_gets' is not an object"}; auto taker_pays = request.at("taker_pays").as_object(); if (!taker_pays.contains("currency")) @@ -1462,24 +1334,20 @@ parseBook(boost::json::object const& request) }; ripple::Currency pay_currency; - if (!ripple::to_currency( - pay_currency, taker_pays.at("currency").as_string().c_str())) + if (!ripple::to_currency(pay_currency, taker_pays.at("currency").as_string().c_str())) return Status{RippledError::rpcSRC_CUR_MALFORMED}; ripple::Currency get_currency; - if (!ripple::to_currency( - get_currency, taker_gets["currency"].as_string().c_str())) + if (!ripple::to_currency(get_currency, taker_gets["currency"].as_string().c_str())) return Status{RippledError::rpcDST_AMT_MALFORMED}; ripple::AccountID pay_issuer; if (taker_pays.contains("issuer")) { if (!taker_pays.at("issuer").is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, "takerPaysIssuerNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "takerPaysIssuerNotString"}; - if (!ripple::to_issuer( - pay_issuer, taker_pays.at("issuer").as_string().c_str())) + if (!ripple::to_issuer(pay_issuer, taker_pays.at("issuer").as_string().c_str())) return Status{RippledError::rpcSRC_ISR_MALFORMED}; if (pay_issuer == ripple::noAccount()) @@ -1503,23 +1371,17 @@ parseBook(boost::json::object const& request) "issuer."}; if ((!isXRP(pay_currency)) && (!taker_pays.contains("issuer"))) - return Status{ - RippledError::rpcSRC_ISR_MALFORMED, "Missing non-XRP issuer."}; + return Status{RippledError::rpcSRC_ISR_MALFORMED, "Missing non-XRP issuer."}; ripple::AccountID get_issuer; if (taker_gets.contains("issuer")) { if (!taker_gets["issuer"].is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "taker_gets.issuer should be string"}; + return Status{RippledError::rpcINVALID_PARAMS, "taker_gets.issuer should be string"}; - if (!ripple::to_issuer( - get_issuer, taker_gets.at("issuer").as_string().c_str())) - return Status{ - RippledError::rpcDST_ISR_MALFORMED, - "Invalid field 'taker_gets.issuer', bad issuer."}; + if (!ripple::to_issuer(get_issuer, taker_gets.at("issuer").as_string().c_str())) + return Status{RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', bad issuer."}; if (get_issuer == ripple::noAccount()) return Status{ @@ -1540,8 +1402,7 @@ parseBook(boost::json::object const& request) if (!ripple::isXRP(get_currency) && ripple::isXRP(get_issuer)) return Status{ - RippledError::rpcDST_ISR_MALFORMED, - "Invalid field 'taker_gets.issuer', expected non-XRP issuer."}; + RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', expected non-XRP issuer."}; if (pay_currency == get_currency && pay_issuer == get_issuer) return Status{RippledError::rpcBAD_MARKET, "badMarket"}; @@ -1623,27 +1484,22 @@ traverseTransactions( if (obj.contains(JS(seq))) { if (!obj.at(JS(seq)).is_int64()) - return Status{ - RippledError::rpcINVALID_PARAMS, "transactionIndexNotInt"}; + return Status{RippledError::rpcINVALID_PARAMS, "transactionIndexNotInt"}; - transactionIndex = - boost::json::value_to(obj.at(JS(seq))); + transactionIndex = boost::json::value_to(obj.at(JS(seq))); } std::optional ledgerIndex = {}; if (obj.contains(JS(ledger))) { if (!obj.at(JS(ledger)).is_int64()) - return Status{ - RippledError::rpcINVALID_PARAMS, "ledgerIndexNotInt"}; + return Status{RippledError::rpcINVALID_PARAMS, "ledgerIndexNotInt"}; - ledgerIndex = - boost::json::value_to(obj.at(JS(ledger))); + ledgerIndex = boost::json::value_to(obj.at(JS(ledger))); } if (!transactionIndex || !ledgerIndex) - return Status{ - RippledError::rpcINVALID_PARAMS, "missingLedgerOrSeq"}; + return Status{RippledError::rpcINVALID_PARAMS, "missingLedgerOrSeq"}; cursor = {*ledgerIndex, *transactionIndex}; } @@ -1657,19 +1513,15 @@ traverseTransactions( { if (!request.at(JS(ledger_index_min)).is_int64()) { - return Status{ - RippledError::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"}; + return Status{RippledError::rpcINVALID_PARAMS, "ledgerSeqMinNotNumber"}; } min = request.at(JS(ledger_index_min)).as_int64(); if (*min != -1) { - if (context.range.maxSequence < *min || - context.range.minSequence > *min) - return Status{ - RippledError::rpcLGR_IDX_MALFORMED, - "ledgerSeqMinOutOfRange"}; + if (context.range.maxSequence < *min || context.range.minSequence > *min) + return Status{RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMinOutOfRange"}; else minIndex = static_cast(*min); } @@ -1682,16 +1534,14 @@ traverseTransactions( { if (!request.at(JS(ledger_index_max)).is_int64()) { - return Status{ - RippledError::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"}; + return Status{RippledError::rpcINVALID_PARAMS, "ledgerSeqMaxNotNumber"}; } max = request.at(JS(ledger_index_max)).as_int64(); if (*max != -1) { - if (context.range.maxSequence < *max || - context.range.minSequence > *max) + if (context.range.maxSequence < *max || context.range.minSequence > *max) return Status{RippledError::rpcLGR_IDXS_INVALID}; else maxIndex = static_cast(*max); @@ -1711,11 +1561,8 @@ traverseTransactions( if (request.contains(JS(ledger_index)) || request.contains(JS(ledger_hash))) { - if (request.contains(JS(ledger_index_max)) || - request.contains(JS(ledger_index_min))) - return Status{ - RippledError::rpcINVALID_PARAMS, - "containsLedgerSpecifierAndRange"}; + if (request.contains(JS(ledger_index_max)) || request.contains(JS(ledger_index_min))) + return Status{RippledError::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"}; auto v = ledgerInfoFromRequest(context); if (auto status = std::get_if(&v); status) @@ -1740,8 +1587,7 @@ traverseTransactions( response[JS(limit)] = limit; boost::json::array txns; - auto [blobs, retCursor] = transactionFetcher( - context.backend, limit, forward, cursor, context.yield); + auto [blobs, retCursor] = transactionFetcher(context.backend, limit, forward, cursor, context.yield); auto timeDiff = util::timed([&, &retCursor = retCursor, &blobs = blobs]() { if (retCursor) { @@ -1761,8 +1607,7 @@ traverseTransactions( } else if (txnPlusMeta.ledgerSequence > maxIndex && !forward) { - gLog.debug() - << "Skipping over transactions from incomplete ledger"; + gLog.debug() << "Skipping over transactions from incomplete ledger"; continue; } @@ -1773,8 +1618,7 @@ traverseTransactions( auto [txn, meta] = toExpandedJson(txnPlusMeta); obj[JS(meta)] = meta; obj[JS(tx)] = txn; - obj[JS(tx)].as_object()[JS(ledger_index)] = - txnPlusMeta.ledgerSequence; + obj[JS(tx)].as_object()[JS(ledger_index)] = txnPlusMeta.ledgerSequence; obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date; } else diff --git a/src/rpc/RPCHelpers.h b/src/rpc/RPCHelpers.h index 62151269..b62952ee 100644 --- a/src/rpc/RPCHelpers.h +++ b/src/rpc/RPCHelpers.h @@ -53,18 +53,12 @@ std::optional parseAccountCursor(std::optional jsonCursor); // TODO this function should probably be in a different file and namespace -std::pair< - std::shared_ptr, - std::shared_ptr> +std::pair, std::shared_ptr> deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs); // TODO this function should probably be in a different file and namespace -std::pair< - std::shared_ptr, - std::shared_ptr> -deserializeTxPlusMeta( - Backend::TransactionAndMetadata const& blobs, - std::uint32_t seq); +std::pair, std::shared_ptr> +deserializeTxPlusMeta(Backend::TransactionAndMetadata const& blobs, std::uint32_t seq); std::pair toExpandedJson(Backend::TransactionAndMetadata const& blobs); @@ -145,10 +139,7 @@ ngTraverseOwnedNodes( std::function atOwnedNode); std::shared_ptr -read( - ripple::Keylet const& keylet, - ripple::LedgerInfo const& lgrInfo, - Context const& context); +read(ripple::Keylet const& keylet, ripple::LedgerInfo const& lgrInfo, Context const& context); std::variant> keypairFromRequst(boost::json::object const& request); @@ -217,11 +208,7 @@ postProcessOrderBook( boost::asio::yield_context& yield); std::variant -parseBook( - ripple::Currency pays, - ripple::AccountID payIssuer, - ripple::Currency gets, - ripple::AccountID getIssuer); +parseBook(ripple::Currency pays, ripple::AccountID payIssuer, ripple::Currency gets, ripple::AccountID getIssuer); std::variant parseBook(boost::json::object const& request); @@ -233,10 +220,7 @@ std::optional getUInt(boost::json::object const& request, std::string const& field); std::uint32_t -getUInt( - boost::json::object const& request, - std::string const& field, - std::uint32_t dfault); +getUInt(boost::json::object const& request, std::string const& field, std::uint32_t dfault); std::uint32_t getRequiredUInt(boost::json::object const& request, std::string const& field); @@ -245,10 +229,7 @@ std::optional getBool(boost::json::object const& request, std::string const& field); bool -getBool( - boost::json::object const& request, - std::string const& field, - bool dfault); +getBool(boost::json::object const& request, std::string const& field, bool dfault); bool getRequiredBool(boost::json::object const& request, std::string const& field); @@ -260,10 +241,7 @@ std::string getRequiredString(boost::json::object const& request, std::string const& field); std::string -getString( - boost::json::object const& request, - std::string const& field, - std::string dfault); +getString(boost::json::object const& request, std::string const& field, std::string dfault); Status getHexMarker(boost::json::object const& request, ripple::uint256& marker); @@ -272,10 +250,7 @@ Status getAccount(boost::json::object const& request, ripple::AccountID& accountId); Status -getAccount( - boost::json::object const& request, - ripple::AccountID& destAccount, - boost::string_view const& field); +getAccount(boost::json::object const& request, ripple::AccountID& destAccount, boost::string_view const& field); Status getOptionalAccount( @@ -308,8 +283,6 @@ traverseTransactions( boost::asio::yield_context& yield)> transactionFetcher); [[nodiscard]] boost::json::object const -computeBookChanges( - ripple::LedgerInfo const& lgrInfo, - std::vector const& transactions); +computeBookChanges(ripple::LedgerInfo const& lgrInfo, std::vector const& transactions); } // namespace RPC diff --git a/src/rpc/WorkQueue.h b/src/rpc/WorkQueue.h index f1caf283..fc943336 100644 --- a/src/rpc/WorkQueue.h +++ b/src/rpc/WorkQueue.h @@ -50,31 +50,24 @@ public: { if (curSize_ >= maxSize_ && !isWhiteListed) { - log_.warn() << "Queue is full. rejecting job. current size = " - << curSize_ << " max size = " << maxSize_; + log_.warn() << "Queue is full. rejecting job. current size = " << curSize_ << " max size = " << maxSize_; return false; } ++curSize_; auto start = std::chrono::system_clock::now(); // Each time we enqueue a job, we want to post a symmetrical job that // will dequeue and run the job at the front of the job queue. - boost::asio::spawn( - ioc_, - [this, f = std::move(f), start](boost::asio::yield_context yield) { - auto run = std::chrono::system_clock::now(); - auto wait = - std::chrono::duration_cast( - run - start) - .count(); - // increment queued_ here, in the same place we implement - // durationUs_ - ++queued_; - durationUs_ += wait; - log_.info() << "WorkQueue wait time = " << wait - << " queue size = " << curSize_; - f(yield); - --curSize_; - }); + boost::asio::spawn(ioc_, [this, f = std::move(f), start](boost::asio::yield_context yield) { + auto run = std::chrono::system_clock::now(); + auto wait = std::chrono::duration_cast(run - start).count(); + // increment queued_ here, in the same place we implement + // durationUs_ + ++queued_; + durationUs_ += wait; + log_.info() << "WorkQueue wait time = " << wait << " queue size = " << curSize_; + f(yield); + --curSize_; + }); return true; } diff --git a/src/rpc/common/AnyHandler.h b/src/rpc/common/AnyHandler.h index 7a6752d6..c1639391 100644 --- a/src/rpc/common/AnyHandler.h +++ b/src/rpc/common/AnyHandler.h @@ -44,12 +44,9 @@ public: * @param handler The handler to wrap. Required to fulfil the @ref Handler * concept. */ - template < - Handler HandlerType, - typename ProcessingStrategy = detail::DefaultProcessor> + template > /* implicit */ AnyHandler(HandlerType&& handler) - : pimpl_{std::make_unique>( - std::forward(handler))} + : pimpl_{std::make_unique>(std::forward(handler))} { } @@ -125,8 +122,7 @@ private: } [[nodiscard]] ReturnType - process(boost::json::value const& value, Context const& ctx) - const override + process(boost::json::value const& value, Context const& ctx) const override { return processor(handler, value, &ctx); } diff --git a/src/rpc/common/Specs.h b/src/rpc/common/Specs.h index 8bfc9424..309d91d2 100644 --- a/src/rpc/common/Specs.h +++ b/src/rpc/common/Specs.h @@ -43,9 +43,7 @@ struct FieldSpec final */ template FieldSpec(std::string const& key, Requirements&&... requirements) - : validator_{detail::makeFieldValidator( - key, - std::forward(requirements)...)} + : validator_{detail::makeFieldValidator(key, std::forward(requirements)...)} { } diff --git a/src/rpc/common/Types.h b/src/rpc/common/Types.h index b025b04d..2db8306a 100644 --- a/src/rpc/common/Types.h +++ b/src/rpc/common/Types.h @@ -68,10 +68,7 @@ struct Context }; inline void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - VoidOutput const&) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, VoidOutput const&) { jv = boost::json::object{}; } diff --git a/src/rpc/common/Validators.cpp b/src/rpc/common/Validators.cpp index 90af9e0d..a554f514 100644 --- a/src/rpc/common/Validators.cpp +++ b/src/rpc/common/Validators.cpp @@ -52,16 +52,14 @@ Section::verify(boost::json::value const& value, std::string_view key) const Required::verify(boost::json::value const& value, std::string_view key) const { if (not value.is_object() or not value.as_object().contains(key.data())) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "Required field '" + std::string{key} + "' missing"}}; + return Error{ + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "Required field '" + std::string{key} + "' missing"}}; return {}; } [[nodiscard]] MaybeError -ValidateArrayAt::verify(boost::json::value const& value, std::string_view key) - const +ValidateArrayAt::verify(boost::json::value const& value, std::string_view key) const { if (not value.is_object() or not value.as_object().contains(key.data())) return {}; // ignore. field does not exist, let 'required' fail @@ -83,8 +81,7 @@ ValidateArrayAt::verify(boost::json::value const& value, std::string_view key) } [[nodiscard]] MaybeError -CustomValidator::verify(boost::json::value const& value, std::string_view key) - const +CustomValidator::verify(boost::json::value const& value, std::string_view key) const { if (not value.is_object() or not value.as_object().contains(key.data())) return {}; // ignore. field does not exist, let 'required' fail @@ -101,118 +98,96 @@ checkIsU32Numeric(std::string_view sv) return ec == std::errc(); } -CustomValidator Uint256HexStringValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator Uint256HexStringValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; } ripple::uint256 ledgerHash; if (!ledgerHash.parseHex(value.as_string().c_str())) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "Malformed"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "Malformed"}}; return MaybeError{}; }}; -CustomValidator LedgerIndexValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { - auto err = Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}}; +CustomValidator LedgerIndexValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { + auto err = Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "ledgerIndexMalformed"}}; if (!value.is_string() && !(value.is_uint64() || value.is_int64())) { return err; } - if (value.is_string() && value.as_string() != "validated" && - !checkIsU32Numeric(value.as_string().c_str())) + if (value.is_string() && value.as_string() != "validated" && !checkIsU32Numeric(value.as_string().c_str())) { return err; } return MaybeError{}; }}; -CustomValidator AccountValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator AccountValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; } // TODO: we are using accountFromStringStrict from RPCHelpers, after we // remove all old handler, this function can be moved to here if (!RPC::accountFromStringStrict(value.as_string().c_str())) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "Malformed"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "Malformed"}}; } return MaybeError{}; }}; -CustomValidator AccountBase58Validator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator AccountBase58Validator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; } - auto const account = - ripple::parseBase58(value.as_string().c_str()); + auto const account = ripple::parseBase58(value.as_string().c_str()); if (!account || account->isZero()) return Error{RPC::Status{RPC::ClioError::rpcMALFORMED_ADDRESS}}; return MaybeError{}; }}; -CustomValidator AccountMarkerValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator AccountMarkerValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; } // TODO: we are using parseAccountCursor from RPCHelpers, after we // remove all old handler, this function can be moved to here if (!RPC::parseAccountCursor(value.as_string().c_str())) { // align with the current error message - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "Malformed cursor"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "Malformed cursor"}}; } return MaybeError{}; }}; -CustomValidator CurrencyValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator CurrencyValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; } ripple::Currency currency; if (!ripple::to_currency(currency, value.as_string().c_str())) - return Error{RPC::Status{ - RPC::ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}}; + return Error{RPC::Status{RPC::ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}}; return MaybeError{}; }}; -CustomValidator IssuerValidator = CustomValidator{ - [](boost::json::value const& value, std::string_view key) -> MaybeError { +CustomValidator IssuerValidator = + CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string()) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotString"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotString"}}; ripple::AccountID issuer; if (!ripple::to_issuer(issuer, value.as_string().c_str())) - return Error{RPC::Status{ - // TODO: need to align with the error - RPC::RippledError::rpcINVALID_PARAMS, - fmt::format("Invalid field '{}', bad issuer.", key)}}; + return Error{RPC::Status{// TODO: need to align with the error + RPC::RippledError::rpcINVALID_PARAMS, + fmt::format("Invalid field '{}', bad issuer.", key)}}; if (issuer == ripple::noAccount()) return Error{RPC::Status{ diff --git a/src/rpc/common/Validators.h b/src/rpc/common/Validators.h index cba05f59..b7b1da1e 100644 --- a/src/rpc/common/Validators.h +++ b/src/rpc/common/Validators.h @@ -46,8 +46,7 @@ template if (not value.is_string()) hasError = true; } - else if constexpr ( - std::is_same_v or std::is_same_v) + else if constexpr (std::is_same_v or std::is_same_v) { if (not value.is_double()) hasError = true; @@ -62,9 +61,7 @@ template if (not value.is_object()) hasError = true; } - else if constexpr ( - std::is_convertible_v or - std::is_convertible_v) + else if constexpr (std::is_convertible_v or std::is_convertible_v) { if (not value.is_int64() && not value.is_uint64()) hasError = true; @@ -266,8 +263,7 @@ public: using boost::json::value_to; auto const res = value_to(value.as_object().at(key.data())); - if (std::find(std::begin(options_), std::end(options_), res) == - std::end(options_)) + if (std::find(std::begin(options_), std::end(options_), res) == std::end(options_)) return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS}}; return {}; @@ -297,8 +293,7 @@ public: * @param idx The index inside the array to validate * @param specs The specifications to validate against */ - ValidateArrayAt(std::size_t idx, std::initializer_list specs) - : idx_{idx}, specs_{specs} + ValidateArrayAt(std::size_t idx, std::initializer_list specs) : idx_{idx}, specs_{specs} { } @@ -330,12 +325,11 @@ public: IfType(Requirements&&... requirements) { validator_ = [... r = std::forward(requirements)]( - boost::json::value const& j, - std::string_view key) -> MaybeError { - // clang-format off + boost::json::value const& j, std::string_view key) -> MaybeError { std::optional firstFailure = std::nullopt; // the check logic is the same as fieldspec + // clang-format off ([&j, &key, &firstFailure, req = &r]() { if (firstFailure) return; @@ -373,8 +367,7 @@ public: } private: - std::function - validator_; + std::function validator_; }; /** @@ -392,8 +385,7 @@ public: * @brief Constructs a validator that calls the given validator "req" and * return customized error "err" */ - WithCustomError(Requirement req, RPC::Status err) - : requirement{std::move(req)}, error{err} + WithCustomError(Requirement req, RPC::Status err) : requirement{std::move(req)}, error{err} { } @@ -412,8 +404,7 @@ public: */ class CustomValidator final { - std::function - validator_; + std::function validator_; public: /** diff --git a/src/rpc/common/impl/Factories.h b/src/rpc/common/impl/Factories.h index c1fcd7e9..6a6081bb 100644 --- a/src/rpc/common/impl/Factories.h +++ b/src/rpc/common/impl/Factories.h @@ -32,14 +32,13 @@ template [[nodiscard]] auto makeFieldValidator(std::string const& key, Requirements&&... requirements) { - return [key, ... r = std::forward(requirements)]( - boost::json::value const& j) -> MaybeError { - // clang-format off + return [key, ... r = std::forward(requirements)](boost::json::value const& j) -> MaybeError { std::optional firstFailure = std::nullopt; - // This expands in order of Requirements and stops evaluating after - // first failure which is stored in `firstFailure` and can be checked + // This expands in order of Requirements and stops evaluating after + // first failure which is stored in `firstFailure` and can be checked // later on to see whether the verification failed as a whole or not. + // clang-format off ([&j, &key, &firstFailure, req = &r]() { if (firstFailure) return; // already failed earlier - skip diff --git a/src/rpc/common/impl/Processors.h b/src/rpc/common/impl/Processors.h index e9002f2c..84ae510f 100644 --- a/src/rpc/common/impl/Processors.h +++ b/src/rpc/common/impl/Processors.h @@ -31,10 +31,7 @@ template struct DefaultProcessor final { [[nodiscard]] ReturnType - operator()( - HandlerType const& handler, - boost::json::value const& value, - Context const* ctx = nullptr) const + operator()(HandlerType const& handler, boost::json::value const& value, Context const* ctx = nullptr) const { using boost::json::value_from; using boost::json::value_to; diff --git a/src/rpc/handlers/AccountChannels.cpp b/src/rpc/handlers/AccountChannels.cpp index 0042234d..87221cbc 100644 --- a/src/rpc/handlers/AccountChannels.cpp +++ b/src/rpc/handlers/AccountChannels.cpp @@ -38,8 +38,7 @@ addChannel(boost::json::array& jsonLines, ripple::SLE const& line) boost::json::object jDst; jDst[JS(channel_id)] = ripple::to_string(line.key()); jDst[JS(account)] = ripple::to_string(line.getAccountID(ripple::sfAccount)); - jDst[JS(destination_account)] = - ripple::to_string(line.getAccountID(ripple::sfDestination)); + jDst[JS(destination_account)] = ripple::to_string(line.getAccountID(ripple::sfDestination)); jDst[JS(amount)] = line[ripple::sfAmount].getText(); jDst[JS(balance)] = line[ripple::sfBalance].getText(); if (publicKeyType(line[ripple::sfPublicKey])) @@ -77,16 +76,14 @@ doAccountChannels(Context const& context) if (auto const status = getAccount(request, accountID); status) return status; - auto rawAcct = context.backend->fetchLedgerObject( - ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + auto rawAcct = + context.backend->fetchLedgerObject(ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); if (!rawAcct) return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; ripple::AccountID destAccount; - if (auto const status = - getAccount(request, destAccount, JS(destination_account)); - status) + if (auto const status = getAccount(request, destAccount, JS(destination_account)); status) return status; std::uint32_t limit; @@ -108,10 +105,8 @@ doAccountChannels(Context const& context) boost::json::array& jsonChannels = response.at(JS(channels)).as_array(); auto const addToResponse = [&](ripple::SLE&& sle) { - if (sle.getType() == ripple::ltPAYCHAN && - sle.getAccountID(ripple::sfAccount) == accountID && - (!destAccount || - destAccount == sle.getAccountID(ripple::sfDestination))) + if (sle.getType() == ripple::ltPAYCHAN && sle.getAccountID(ripple::sfAccount) == accountID && + (!destAccount || destAccount == sle.getAccountID(ripple::sfDestination))) { addChannel(jsonChannels, sle); } @@ -119,14 +114,8 @@ doAccountChannels(Context const& context) return true; }; - auto next = traverseOwnedNodes( - *context.backend, - accountID, - lgrInfo.seq, - limit, - marker, - context.yield, - addToResponse); + auto next = + traverseOwnedNodes(*context.backend, accountID, lgrInfo.seq, limit, marker, context.yield, addToResponse); response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); response[JS(ledger_index)] = lgrInfo.seq; diff --git a/src/rpc/handlers/AccountCurrencies.cpp b/src/rpc/handlers/AccountCurrencies.cpp index 8be9f801..656816bc 100644 --- a/src/rpc/handlers/AccountCurrencies.cpp +++ b/src/rpc/handlers/AccountCurrencies.cpp @@ -47,8 +47,8 @@ doAccountCurrencies(Context const& context) if (auto const status = getAccount(request, accountID); status) return status; - auto rawAcct = context.backend->fetchLedgerObject( - ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + auto rawAcct = + context.backend->fetchLedgerObject(ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); if (!rawAcct) return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; @@ -88,10 +88,8 @@ doAccountCurrencies(Context const& context) response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); response[JS(ledger_index)] = lgrInfo.seq; - response[JS(receive_currencies)] = - boost::json::value(boost::json::array_kind); - boost::json::array& jsonReceive = - response.at(JS(receive_currencies)).as_array(); + response[JS(receive_currencies)] = boost::json::value(boost::json::array_kind); + boost::json::array& jsonReceive = response.at(JS(receive_currencies)).as_array(); for (auto const& currency : receive) jsonReceive.push_back(currency.c_str()); diff --git a/src/rpc/handlers/AccountInfo.cpp b/src/rpc/handlers/AccountInfo.cpp index 2a7fbe4c..fc7a4b14 100644 --- a/src/rpc/handlers/AccountInfo.cpp +++ b/src/rpc/handlers/AccountInfo.cpp @@ -76,8 +76,7 @@ doAccountInfo(Context const& context) if (!dbResponse) return Status{RippledError::rpcACT_NOT_FOUND}; - ripple::STLedgerEntry sle{ - ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key}; + ripple::STLedgerEntry sle{ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key.key}; if (!key.check(sle)) return Status{RippledError::rpcDB_DESERIALIZATION}; @@ -87,8 +86,7 @@ doAccountInfo(Context const& context) response[JS(ledger_index)] = lgrInfo.seq; // Return SignerList(s) if that is requested. - if (request.contains(JS(signer_lists)) && - request.at(JS(signer_lists)).as_bool()) + if (request.contains(JS(signer_lists)) && request.at(JS(signer_lists)).as_bool()) { // We put the SignerList in an array because of an anticipated // future when we support multiple signer lists on one account. @@ -97,21 +95,17 @@ doAccountInfo(Context const& context) // This code will need to be revisited if in the future we // support multiple SignerLists on one account. - auto const signers = context.backend->fetchLedgerObject( - signersKey.key, lgrInfo.seq, context.yield); + auto const signers = context.backend->fetchLedgerObject(signersKey.key, lgrInfo.seq, context.yield); if (signers) { - ripple::STLedgerEntry sleSigners{ - ripple::SerialIter{signers->data(), signers->size()}, - signersKey.key}; + ripple::STLedgerEntry sleSigners{ripple::SerialIter{signers->data(), signers->size()}, signersKey.key}; if (!signersKey.check(sleSigners)) return Status{RippledError::rpcDB_DESERIALIZATION}; signerList.push_back(toJson(sleSigners)); } - response[JS(account_data)].as_object()[JS(signer_lists)] = - std::move(signerList); + response[JS(account_data)].as_object()[JS(signer_lists)] = std::move(signerList); } return response; diff --git a/src/rpc/handlers/AccountLines.cpp b/src/rpc/handlers/AccountLines.cpp index a44a4889..ead52327 100644 --- a/src/rpc/handlers/AccountLines.cpp +++ b/src/rpc/handlers/AccountLines.cpp @@ -64,18 +64,12 @@ addLine( if (!viewLowest) balance.negate(); - bool lineAuth = - flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); - bool lineAuthPeer = - flags & (!viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); - bool lineNoRipple = - flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); - bool lineNoRipplePeer = flags & - (!viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); - bool lineFreeze = - flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); - bool lineFreezePeer = - flags & (!viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + bool lineAuth = flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool lineAuthPeer = flags & (!viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool lineNoRipple = flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool lineNoRipplePeer = flags & (!viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool lineFreeze = flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + bool lineFreezePeer = flags & (!viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); ripple::STAmount const& saBalance(balance); ripple::STAmount const& saLimit(lineLimit); @@ -119,15 +113,14 @@ doAccountLines(Context const& context) if (auto const status = getAccount(request, accountID); status) return status; - auto rawAcct = context.backend->fetchLedgerObject( - ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + auto rawAcct = + context.backend->fetchLedgerObject(ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); if (!rawAcct) return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; std::optional peerAccount; - if (auto const status = getOptionalAccount(request, peerAccount, JS(peer)); - status) + if (auto const status = getOptionalAccount(request, peerAccount, JS(peer)); status) return status; std::uint32_t limit; @@ -147,8 +140,7 @@ doAccountLines(Context const& context) if (request.contains(JS(ignore_default))) { if (not request.at(JS(ignore_default)).is_bool()) - return Status{ - RippledError::rpcINVALID_PARAMS, "ignoreDefaultNotBool"}; + return Status{RippledError::rpcINVALID_PARAMS, "ignoreDefaultNotBool"}; ignoreDefault = request.at(JS(ignore_default)).as_bool(); } @@ -166,15 +158,10 @@ doAccountLines(Context const& context) auto ignore = false; if (ignoreDefault) { - if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == - accountID) - ignore = - !(sle.getFieldU32(ripple::sfFlags) & - ripple::lsfLowReserve); + if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID) + ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfLowReserve); else - ignore = - !(sle.getFieldU32(ripple::sfFlags) & - ripple::lsfHighReserve); + ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfHighReserve); } if (!ignore) @@ -182,14 +169,8 @@ doAccountLines(Context const& context) } }; - auto next = traverseOwnedNodes( - *context.backend, - accountID, - lgrInfo.seq, - limit, - marker, - context.yield, - addToResponse); + auto next = + traverseOwnedNodes(*context.backend, accountID, lgrInfo.seq, limit, marker, context.yield, addToResponse); if (auto status = std::get_if(&next)) return *status; diff --git a/src/rpc/handlers/AccountObjects.cpp b/src/rpc/handlers/AccountObjects.cpp index cebf5135..00c1057d 100644 --- a/src/rpc/handlers/AccountObjects.cpp +++ b/src/rpc/handlers/AccountObjects.cpp @@ -66,8 +66,8 @@ doAccountNFTs(Context const& context) if (!accountID) return Status{RippledError::rpcINVALID_PARAMS, "malformedAccount"}; - auto rawAcct = context.backend->fetchLedgerObject( - ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + auto rawAcct = + context.backend->fetchLedgerObject(ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); if (!rawAcct) return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; @@ -90,15 +90,12 @@ doAccountNFTs(Context const& context) // if a marker was passed, start at the page specified in marker. Else, // start at the max page - auto const pageKey = - marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker; + auto const pageKey = marker.isZero() ? ripple::keylet::nftpage_max(accountID).key : marker; - auto const blob = - context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield); + auto const blob = context.backend->fetchLedgerObject(pageKey, lgrInfo.seq, context.yield); if (!blob) return response; - std::optional page{ - ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}}; + std::optional page{ripple::SLE{ripple::SerialIter{blob->data(), blob->size()}, pageKey}}; // Continue iteration from the current page while (page) @@ -110,20 +107,16 @@ doAccountNFTs(Context const& context) ripple::uint256 const nftokenID = o[ripple::sfNFTokenID]; { - nfts.push_back( - toBoostJson(o.getJson(ripple::JsonOptions::none))); + nfts.push_back(toBoostJson(o.getJson(ripple::JsonOptions::none))); auto& obj = nfts.back().as_object(); // Pull out the components of the nft ID. obj[SFS(sfFlags)] = ripple::nft::getFlags(nftokenID); - obj[SFS(sfIssuer)] = - to_string(ripple::nft::getIssuer(nftokenID)); - obj[SFS(sfNFTokenTaxon)] = - ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID)); + obj[SFS(sfIssuer)] = to_string(ripple::nft::getIssuer(nftokenID)); + obj[SFS(sfNFTokenTaxon)] = ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID)); obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID); - if (std::uint16_t xferFee = { - ripple::nft::getTransferFee(nftokenID)}) + if (std::uint16_t xferFee = {ripple::nft::getTransferFee(nftokenID)}) obj[SFS(sfTransferFee)] = xferFee; } } @@ -138,12 +131,9 @@ doAccountNFTs(Context const& context) response[JS(limit)] = numPages; return response; } - auto const nextBlob = context.backend->fetchLedgerObject( - nextKey.key, lgrInfo.seq, context.yield); + auto const nextBlob = context.backend->fetchLedgerObject(nextKey.key, lgrInfo.seq, context.yield); - page.emplace(ripple::SLE{ - ripple::SerialIter{nextBlob->data(), nextBlob->size()}, - nextKey.key}); + page.emplace(ripple::SLE{ripple::SerialIter{nextBlob->data(), nextBlob->size()}, nextKey.key}); } else page.reset(); @@ -196,8 +186,7 @@ doAccountObjects(Context const& context) response[JS(account)] = ripple::to_string(accountID); response[JS(account_objects)] = boost::json::value(boost::json::array_kind); - boost::json::array& jsonObjects = - response.at(JS(account_objects)).as_array(); + boost::json::array& jsonObjects = response.at(JS(account_objects)).as_array(); auto const addToResponse = [&](ripple::SLE&& sle) { if (!objectType || objectType == sle.getType()) @@ -206,14 +195,8 @@ doAccountObjects(Context const& context) } }; - auto next = traverseOwnedNodes( - *context.backend, - accountID, - lgrInfo.seq, - limit, - marker, - context.yield, - addToResponse); + auto next = + traverseOwnedNodes(*context.backend, accountID, lgrInfo.seq, limit, marker, context.yield, addToResponse); response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); response[JS(ledger_index)] = lgrInfo.seq; diff --git a/src/rpc/handlers/AccountOffers.cpp b/src/rpc/handlers/AccountOffers.cpp index 2cfd985e..57c3f83f 100644 --- a/src/rpc/handlers/AccountOffers.cpp +++ b/src/rpc/handlers/AccountOffers.cpp @@ -50,8 +50,7 @@ addOffer(boost::json::array& offersJson, ripple::SLE const& offer) boost::json::object& takerPaysJson = obj.at(JS(taker_pays)).as_object(); takerPaysJson[JS(value)] = takerPays.getText(); - takerPaysJson[JS(currency)] = - ripple::to_string(takerPays.getCurrency()); + takerPaysJson[JS(currency)] = ripple::to_string(takerPays.getCurrency()); takerPaysJson[JS(issuer)] = ripple::to_string(takerPays.getIssuer()); } else @@ -65,8 +64,7 @@ addOffer(boost::json::array& offersJson, ripple::SLE const& offer) boost::json::object& takerGetsJson = obj.at(JS(taker_gets)).as_object(); takerGetsJson[JS(value)] = takerGets.getText(); - takerGetsJson[JS(currency)] = - ripple::to_string(takerGets.getCurrency()); + takerGetsJson[JS(currency)] = ripple::to_string(takerGets.getCurrency()); takerGetsJson[JS(issuer)] = ripple::to_string(takerGets.getIssuer()); } else @@ -99,8 +97,8 @@ doAccountOffers(Context const& context) if (auto const status = getAccount(request, accountID); status) return status; - auto rawAcct = context.backend->fetchLedgerObject( - ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); + auto rawAcct = + context.backend->fetchLedgerObject(ripple::keylet::account(accountID).key, lgrInfo.seq, context.yield); if (!rawAcct) return Status{RippledError::rpcACT_NOT_FOUND, "accountNotFound"}; @@ -134,14 +132,8 @@ doAccountOffers(Context const& context) return true; }; - auto next = traverseOwnedNodes( - *context.backend, - accountID, - lgrInfo.seq, - limit, - marker, - context.yield, - addToResponse); + auto next = + traverseOwnedNodes(*context.backend, accountID, lgrInfo.seq, limit, marker, context.yield, addToResponse); if (auto status = std::get_if(&next)) return *status; diff --git a/src/rpc/handlers/AccountTx.cpp b/src/rpc/handlers/AccountTx.cpp index 59aac6b4..b8e0168a 100644 --- a/src/rpc/handlers/AccountTx.cpp +++ b/src/rpc/handlers/AccountTx.cpp @@ -46,13 +46,10 @@ doAccountTx(Context const& context) bool const forward, std::optional const& cursorIn, boost::asio::yield_context& yield) { - auto [txnsAndCursor, timeDiff] = util::timed([&]() { - return backend->fetchAccountTransactions( - accountID, limit, forward, cursorIn, yield); - }); + auto [txnsAndCursor, timeDiff] = util::timed( + [&]() { return backend->fetchAccountTransactions(accountID, limit, forward, cursorIn, yield); }); gLog.info() << outerFuncName << " db fetch took " << timeDiff - << " milliseconds - num blobs = " - << txnsAndCursor.txns.size(); + << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); return txnsAndCursor; }); diff --git a/src/rpc/handlers/BookChanges.cpp b/src/rpc/handlers/BookChanges.cpp index b3a2818b..877628a4 100644 --- a/src/rpc/handlers/BookChanges.cpp +++ b/src/rpc/handlers/BookChanges.cpp @@ -72,8 +72,7 @@ private: public: [[nodiscard]] std::vector - operator()( - std::vector const& transactions) + operator()(std::vector const& transactions) { for (auto const& tx : transactions) handleBookChange(tx); @@ -103,47 +102,36 @@ private: // if either FF or PF are missing we can't compute // but generally these are cancelled rather than crossed // so skipping them is consistent - if (!node.isFieldPresent(sfFinalFields) || - !node.isFieldPresent(sfPreviousFields)) + if (!node.isFieldPresent(sfFinalFields) || !node.isFieldPresent(sfPreviousFields)) return; - auto const& finalFields = - node.peekAtField(sfFinalFields).downcast(); - auto const& previousFields = - node.peekAtField(sfPreviousFields).downcast(); + auto const& finalFields = node.peekAtField(sfFinalFields).downcast(); + auto const& previousFields = node.peekAtField(sfPreviousFields).downcast(); // defensive case that should never be hit - if (!finalFields.isFieldPresent(sfTakerGets) || - !finalFields.isFieldPresent(sfTakerPays) || - !previousFields.isFieldPresent(sfTakerGets) || - !previousFields.isFieldPresent(sfTakerPays)) + if (!finalFields.isFieldPresent(sfTakerGets) || !finalFields.isFieldPresent(sfTakerPays) || + !previousFields.isFieldPresent(sfTakerGets) || !previousFields.isFieldPresent(sfTakerPays)) return; // filter out any offers deleted by explicit offer cancels - if (metaType == sfDeletedNode && offerCancel_ && - finalFields.getFieldU32(sfSequence) == *offerCancel_) + if (metaType == sfDeletedNode && offerCancel_ && finalFields.getFieldU32(sfSequence) == *offerCancel_) return; // compute the difference in gets and pays actually // affected onto the offer - auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) - - previousFields.getFieldAmount(sfTakerGets); - auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) - - previousFields.getFieldAmount(sfTakerPays); + auto const deltaGets = finalFields.getFieldAmount(sfTakerGets) - previousFields.getFieldAmount(sfTakerGets); + auto const deltaPays = finalFields.getFieldAmount(sfTakerPays) - previousFields.getFieldAmount(sfTakerPays); transformAndStore(deltaGets, deltaPays); } void - transformAndStore( - ripple::STAmount const& deltaGets, - ripple::STAmount const& deltaPays) + transformAndStore(ripple::STAmount const& deltaGets, ripple::STAmount const& deltaPays) { auto const g = to_string(deltaGets.issue()); auto const p = to_string(deltaPays.issue()); - auto const noswap = - isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p)); + auto const noswap = isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p)); auto first = noswap ? deltaGets : deltaPays; auto second = noswap ? deltaPays : deltaGets; @@ -224,8 +212,7 @@ void tag_invoke(json::value_from_tag, json::value& jv, BookChange const& change) { auto amountStr = [](STAmount const& amount) -> std::string { - return isXRP(amount) ? to_string(amount.xrp()) - : to_string(amount.iou()); + return isXRP(amount) ? to_string(amount.xrp()) : to_string(amount.iou()); }; auto currencyStr = [](STAmount const& amount) -> std::string { @@ -245,9 +232,7 @@ tag_invoke(json::value_from_tag, json::value& jv, BookChange const& change) } json::object const -computeBookChanges( - ripple::LedgerInfo const& lgrInfo, - std::vector const& transactions) +computeBookChanges(ripple::LedgerInfo const& lgrInfo, std::vector const& transactions) { return { {JS(type), "bookChanges"}, @@ -267,8 +252,7 @@ doBookChanges(Context const& context) return *status; auto const lgrInfo = std::get(info); - auto const transactions = context.backend->fetchAllTransactionsInLedger( - lgrInfo.seq, context.yield); + auto const transactions = context.backend->fetchAllTransactionsInLedger(lgrInfo.seq, context.yield); return computeBookChanges(lgrInfo, transactions); } diff --git a/src/rpc/handlers/BookOffers.cpp b/src/rpc/handlers/BookOffers.cpp index 331c6f3c..7ab0caa6 100644 --- a/src/rpc/handlers/BookOffers.cpp +++ b/src/rpc/handlers/BookOffers.cpp @@ -84,28 +84,21 @@ doBookOffers(Context const& context) return status; auto start = std::chrono::system_clock::now(); - auto [offers, _] = context.backend->fetchBookOffers( - bookBase, lgrInfo.seq, limit, context.yield); + auto [offers, _] = context.backend->fetchBookOffers(bookBase, lgrInfo.seq, limit, context.yield); auto end = std::chrono::system_clock::now(); - gLog.warn() << "Time loading books: " - << std::chrono::duration_cast( - end - start) - .count() + gLog.warn() << "Time loading books: " << std::chrono::duration_cast(end - start).count() << " milliseconds - request = " << request; response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); response[JS(ledger_index)] = lgrInfo.seq; - response[JS(offers)] = postProcessOrderBook( - offers, book, takerID, *context.backend, lgrInfo.seq, context.yield); + response[JS(offers)] = postProcessOrderBook(offers, book, takerID, *context.backend, lgrInfo.seq, context.yield); auto end2 = std::chrono::system_clock::now(); gLog.warn() << "Time transforming to json: " - << std::chrono::duration_cast( - end2 - end) - .count() + << std::chrono::duration_cast(end2 - end).count() << " milliseconds - request = " << request; return response; } diff --git a/src/rpc/handlers/ChannelAuthorize.cpp b/src/rpc/handlers/ChannelAuthorize.cpp index 477fa451..20671a8c 100644 --- a/src/rpc/handlers/ChannelAuthorize.cpp +++ b/src/rpc/handlers/ChannelAuthorize.cpp @@ -30,10 +30,7 @@ namespace RPC { void -serializePayChanAuthorization( - ripple::Serializer& msg, - ripple::uint256 const& key, - ripple::XRPAmount const& amt) +serializePayChanAuthorization(ripple::Serializer& msg, ripple::uint256 const& key, ripple::XRPAmount const& amt) { msg.add32(ripple::HashPrefix::paymentChannelClaim); msg.addBitString(key); @@ -53,32 +50,27 @@ doChannelAuthorize(Context const& context) return Status{RippledError::rpcINVALID_PARAMS, "amountNotString"}; if (!request.contains(JS(key_type)) && !request.contains(JS(secret))) - return Status{ - RippledError::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"}; + return Status{RippledError::rpcINVALID_PARAMS, "missingKeyTypeOrSecret"}; auto v = keypairFromRequst(request); if (auto status = std::get_if(&v)) return *status; - auto const [pk, sk] = - std::get>(v); + auto const [pk, sk] = std::get>(v); ripple::uint256 channelId; if (auto const status = getChannelId(request, channelId); status) return status; - auto optDrops = - ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); + auto optDrops = ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); if (!optDrops) - return Status{ - RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; + return Status{RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; std::uint64_t drops = *optDrops; ripple::Serializer msg; - ripple::serializePayChanAuthorization( - msg, channelId, ripple::XRPAmount(drops)); + ripple::serializePayChanAuthorization(msg, channelId, ripple::XRPAmount(drops)); try { diff --git a/src/rpc/handlers/ChannelVerify.cpp b/src/rpc/handlers/ChannelVerify.cpp index 2db1ea5c..d20a0685 100644 --- a/src/rpc/handlers/ChannelVerify.cpp +++ b/src/rpc/handlers/ChannelVerify.cpp @@ -55,23 +55,18 @@ doChannelVerify(Context const& context) std::optional pk; { - std::string const strPk = - request.at(JS(public_key)).as_string().c_str(); - pk = ripple::parseBase58( - ripple::TokenType::AccountPublic, strPk); + std::string const strPk = request.at(JS(public_key)).as_string().c_str(); + pk = ripple::parseBase58(ripple::TokenType::AccountPublic, strPk); if (!pk) { auto pkHex = ripple::strUnHex(strPk); if (!pkHex) - return Status{ - RippledError::rpcPUBLIC_MALFORMED, "malformedPublicKey"}; + return Status{RippledError::rpcPUBLIC_MALFORMED, "malformedPublicKey"}; - auto const pkType = - ripple::publicKeyType(ripple::makeSlice(*pkHex)); + auto const pkType = ripple::publicKeyType(ripple::makeSlice(*pkHex)); if (!pkType) - return Status{ - RippledError::rpcPUBLIC_MALFORMED, "invalidKeyType"}; + return Status{RippledError::rpcPUBLIC_MALFORMED, "invalidKeyType"}; pk.emplace(ripple::makeSlice(*pkHex)); } @@ -81,12 +76,10 @@ doChannelVerify(Context const& context) if (auto const status = getChannelId(request, channelId); status) return status; - auto optDrops = - ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); + auto optDrops = ripple::to_uint64(request.at(JS(amount)).as_string().c_str()); if (!optDrops) - return Status{ - RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; + return Status{RippledError::rpcCHANNEL_AMT_MALFORMED, "couldNotParseAmount"}; std::uint64_t drops = *optDrops; @@ -96,11 +89,9 @@ doChannelVerify(Context const& context) return Status{RippledError::rpcINVALID_PARAMS, "invalidSignature"}; ripple::Serializer msg; - ripple::serializePayChanAuthorization( - msg, channelId, ripple::XRPAmount(drops)); + ripple::serializePayChanAuthorization(msg, channelId, ripple::XRPAmount(drops)); - response[JS(signature_verified)] = - ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true); + response[JS(signature_verified)] = ripple::verify(*pk, msg.slice(), ripple::makeSlice(*sig), true); return response; } diff --git a/src/rpc/handlers/GatewayBalances.cpp b/src/rpc/handlers/GatewayBalances.cpp index 7cf874f5..038d791f 100644 --- a/src/rpc/handlers/GatewayBalances.cpp +++ b/src/rpc/handlers/GatewayBalances.cpp @@ -46,19 +46,17 @@ doGatewayBalances(Context const& context) if (request.contains(JS(hotwallet))) { - auto getAccountID = - [](auto const& j) -> std::optional { + auto getAccountID = [](auto const& j) -> std::optional { if (j.is_string()) { - auto const pk = ripple::parseBase58( - ripple::TokenType::AccountPublic, j.as_string().c_str()); + auto const pk = + ripple::parseBase58(ripple::TokenType::AccountPublic, j.as_string().c_str()); if (pk) { return ripple::calcAccountID(*pk); } - return ripple::parseBase58( - j.as_string().c_str()); + return ripple::parseBase58(j.as_string().c_str()); } return {}; }; @@ -111,8 +109,7 @@ doGatewayBalances(Context const& context) auto lineLimit = viewLowest ? lowLimit : highLimit; auto lineLimitPeer = !viewLowest ? lowLimit : highLimit; auto flags = sle.getFieldU32(ripple::sfFlags); - auto freeze = flags & - (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + auto freeze = flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); if (!viewLowest) balance.negate(); @@ -188,34 +185,28 @@ doGatewayBalances(Context const& context) response[JS(obligations)] = std::move(obj); } - auto toJson = - [](std::map> const& - balances) { - boost::json::object obj; - if (!balances.empty()) + auto toJson = [](std::map> const& balances) { + boost::json::object obj; + if (!balances.empty()) + { + for (auto const& [accId, accBalances] : balances) { - for (auto const& [accId, accBalances] : balances) + boost::json::array arr; + for (auto const& balance : accBalances) { - boost::json::array arr; - for (auto const& balance : accBalances) - { - boost::json::object entry; - entry[JS(currency)] = - ripple::to_string(balance.issue().currency); - entry[JS(value)] = balance.getText(); - arr.push_back(std::move(entry)); - } - obj[ripple::to_string(accId)] = std::move(arr); + boost::json::object entry; + entry[JS(currency)] = ripple::to_string(balance.issue().currency); + entry[JS(value)] = balance.getText(); + arr.push_back(std::move(entry)); } + obj[ripple::to_string(accId)] = std::move(arr); } - return obj; - }; - - auto containsHotWallet = [&](auto const& hw) { - return hotBalances.contains(hw); + } + return obj; }; - if (not std::all_of( - hotWallets.begin(), hotWallets.end(), containsHotWallet)) + + auto containsHotWallet = [&](auto const& hw) { return hotBalances.contains(hw); }; + if (not std::all_of(hotWallets.begin(), hotWallets.end(), containsHotWallet)) return Status{RippledError::rpcINVALID_PARAMS, "invalidHotWallet"}; if (auto balances = toJson(hotBalances); balances.size()) diff --git a/src/rpc/handlers/Ledger.cpp b/src/rpc/handlers/Ledger.cpp index a3334e5b..04dcb934 100644 --- a/src/rpc/handlers/Ledger.cpp +++ b/src/rpc/handlers/Ledger.cpp @@ -41,8 +41,7 @@ doLedger(Context const& context) if (params.contains(JS(transactions))) { if (!params.at(JS(transactions)).is_bool()) - return Status{ - RippledError::rpcINVALID_PARAMS, "transactionsFlagNotBool"}; + return Status{RippledError::rpcINVALID_PARAMS, "transactionsFlagNotBool"}; transactions = params.at(JS(transactions)).as_bool(); } @@ -94,8 +93,7 @@ doLedger(Context const& context) header[JS(hash)] = ripple::strHex(lgrInfo.hash); header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); header[JS(ledger_index)] = std::to_string(lgrInfo.seq); - header[JS(parent_close_time)] = - lgrInfo.parentCloseTime.time_since_epoch().count(); + header[JS(parent_close_time)] = lgrInfo.parentCloseTime.time_since_epoch().count(); header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash); header[JS(seqNum)] = std::to_string(lgrInfo.seq); header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops); @@ -110,8 +108,7 @@ doLedger(Context const& context) boost::json::array& jsonTxs = header.at(JS(transactions)).as_array(); if (expand) { - auto txns = context.backend->fetchAllTransactionsInLedger( - lgrInfo.seq, context.yield); + auto txns = context.backend->fetchAllTransactionsInLedger(lgrInfo.seq, context.yield); std::transform( std::move_iterator(txns.begin()), @@ -136,8 +133,7 @@ doLedger(Context const& context) } else { - auto hashes = context.backend->fetchAllTransactionHashesInLedger( - lgrInfo.seq, context.yield); + auto hashes = context.backend->fetchAllTransactionHashesInLedger(lgrInfo.seq, context.yield); std::transform( std::move_iterator(hashes.begin()), std::move_iterator(hashes.end()), @@ -153,8 +149,7 @@ doLedger(Context const& context) { header["diff"] = boost::json::value(boost::json::array_kind); boost::json::array& jsonDiff = header.at("diff").as_array(); - auto diff = - context.backend->fetchLedgerDiff(lgrInfo.seq, context.yield); + auto diff = context.backend->fetchLedgerDiff(lgrInfo.seq, context.yield); for (auto const& obj : diff) { boost::json::object entry; @@ -163,9 +158,7 @@ doLedger(Context const& context) entry["object"] = ripple::strHex(obj.blob); else if (obj.blob.size()) { - ripple::STLedgerEntry sle{ - ripple::SerialIter{obj.blob.data(), obj.blob.size()}, - obj.key}; + ripple::STLedgerEntry sle{ripple::SerialIter{obj.blob.data(), obj.blob.size()}, obj.key}; entry["object"] = toJson(sle); } else diff --git a/src/rpc/handlers/LedgerData.cpp b/src/rpc/handlers/LedgerData.cpp index fdc86f9d..9b887d6e 100644 --- a/src/rpc/handlers/LedgerData.cpp +++ b/src/rpc/handlers/LedgerData.cpp @@ -82,14 +82,11 @@ doLedgerData(Context const& context) if (outOfOrder) { if (!request.at(JS(marker)).is_int64()) - return Status{ - RippledError::rpcINVALID_PARAMS, - "markerNotStringOrInt"}; + return Status{RippledError::rpcINVALID_PARAMS, "markerNotStringOrInt"}; diffMarker = value_to(request.at(JS(marker))); } else - return Status{ - RippledError::rpcINVALID_PARAMS, "markerNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "markerNotString"}; } else { @@ -97,8 +94,7 @@ doLedgerData(Context const& context) marker = ripple::uint256{}; if (!marker->parseHex(request.at(JS(marker)).as_string().c_str())) - return Status{ - RippledError::rpcINVALID_PARAMS, "markerMalformed"}; + return Status{RippledError::rpcINVALID_PARAMS, "markerMalformed"}; } } @@ -121,16 +117,13 @@ doLedgerData(Context const& context) header[JS(accepted)] = true; header[JS(account_hash)] = ripple::strHex(lgrInfo.accountHash); header[JS(close_flags)] = lgrInfo.closeFlags; - header[JS(close_time)] = - lgrInfo.closeTime.time_since_epoch().count(); + header[JS(close_time)] = lgrInfo.closeTime.time_since_epoch().count(); header[JS(close_time_human)] = ripple::to_string(lgrInfo.closeTime); - header[JS(close_time_resolution)] = - lgrInfo.closeTimeResolution.count(); + header[JS(close_time_resolution)] = lgrInfo.closeTimeResolution.count(); header[JS(hash)] = ripple::strHex(lgrInfo.hash); header[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); header[JS(ledger_index)] = std::to_string(lgrInfo.seq); - header[JS(parent_close_time)] = - lgrInfo.parentCloseTime.time_since_epoch().count(); + header[JS(parent_close_time)] = lgrInfo.parentCloseTime.time_since_epoch().count(); header[JS(parent_hash)] = ripple::strHex(lgrInfo.parentHash); header[JS(seqNum)] = std::to_string(lgrInfo.seq); header[JS(totalCoins)] = ripple::to_string(lgrInfo.drops); @@ -143,11 +136,8 @@ doLedgerData(Context const& context) } else { - if (!outOfOrder && - !context.backend->fetchLedgerObject( - *marker, lgrInfo.seq, context.yield)) - return Status{ - RippledError::rpcINVALID_PARAMS, "markerDoesNotExist"}; + if (!outOfOrder && !context.backend->fetchLedgerObject(*marker, lgrInfo.seq, context.yield)) + return Status{RippledError::rpcINVALID_PARAMS, "markerDoesNotExist"}; } response[JS(ledger_hash)] = ripple::strHex(lgrInfo.hash); @@ -158,8 +148,7 @@ doLedgerData(Context const& context) if (diffMarker) { assert(outOfOrder); - auto diff = - context.backend->fetchLedgerDiff(*diffMarker, context.yield); + auto diff = context.backend->fetchLedgerDiff(*diffMarker, context.yield); std::vector keys; for (auto&& [key, object] : diff) { @@ -168,8 +157,7 @@ doLedgerData(Context const& context) keys.push_back(std::move(key)); } } - auto objs = context.backend->fetchLedgerObjects( - keys, lgrInfo.seq, context.yield); + auto objs = context.backend->fetchLedgerObjects(keys, lgrInfo.seq, context.yield); for (size_t i = 0; i < objs.size(); ++i) { auto&& obj = objs[i]; @@ -181,29 +169,23 @@ doLedgerData(Context const& context) } else { - auto page = context.backend->fetchLedgerPage( - marker, lgrInfo.seq, limit, outOfOrder, context.yield); + auto page = context.backend->fetchLedgerPage(marker, lgrInfo.seq, limit, outOfOrder, context.yield); results = std::move(page.objects); if (page.cursor) response["marker"] = ripple::strHex(*(page.cursor)); else if (outOfOrder) - response["marker"] = - context.backend->fetchLedgerRange()->maxSequence; + response["marker"] = context.backend->fetchLedgerRange()->maxSequence; } auto end = std::chrono::system_clock::now(); - auto time = - std::chrono::duration_cast(end - start) - .count(); + auto time = std::chrono::duration_cast(end - start).count(); - gLog.debug() << "Number of results = " << results.size() << " fetched in " - << time << " microseconds"; + gLog.debug() << "Number of results = " << results.size() << " fetched in " << time << " microseconds"; boost::json::array objects; objects.reserve(results.size()); for (auto const& [key, object] : results) { - ripple::STLedgerEntry sle{ - ripple::SerialIter{object.data(), object.size()}, key}; + ripple::STLedgerEntry sle{ripple::SerialIter{object.data(), object.size()}, key}; if (binary) { boost::json::object entry; @@ -219,10 +201,8 @@ doLedgerData(Context const& context) response["cache_full"] = context.backend->cache().isFull(); auto end2 = std::chrono::system_clock::now(); - time = std::chrono::duration_cast(end2 - end) - .count(); - gLog.debug() << "Number of results = " << results.size() - << " serialized in " << time << " microseconds"; + time = std::chrono::duration_cast(end2 - end).count(); + gLog.debug() << "Number of results = " << results.size() << " serialized in " << time << " microseconds"; return response; } diff --git a/src/rpc/handlers/LedgerEntry.cpp b/src/rpc/handlers/LedgerEntry.cpp index aa710dd0..fd0b183b 100644 --- a/src/rpc/handlers/LedgerEntry.cpp +++ b/src/rpc/handlers/LedgerEntry.cpp @@ -64,11 +64,9 @@ doLedgerEntry(Context const& context) else if (request.contains(JS(account_root))) { if (!request.at(JS(account_root)).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, "account_rootNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "account_rootNotString"}; - auto const account = ripple::parseBase58( - request.at(JS(account_root)).as_string().c_str()); + auto const account = ripple::parseBase58(request.at(JS(account_root)).as_string().c_str()); expectedType = ripple::ltACCOUNT_ROOT; if (!account || account->isZero()) return Status{ClioError::rpcMALFORMED_ADDRESS}; @@ -92,52 +90,37 @@ doLedgerEntry(Context const& context) if (!request.at(JS(deposit_preauth)).is_object()) { if (!request.at(JS(deposit_preauth)).is_string() || - !key.parseHex( - request.at(JS(deposit_preauth)).as_string().c_str())) + !key.parseHex(request.at(JS(deposit_preauth)).as_string().c_str())) { - return Status{ - RippledError::rpcINVALID_PARAMS, - "deposit_preauthMalformed"}; + return Status{RippledError::rpcINVALID_PARAMS, "deposit_preauthMalformed"}; } } else if ( !request.at(JS(deposit_preauth)).as_object().contains(JS(owner)) || - !request.at(JS(deposit_preauth)) - .as_object() - .at(JS(owner)) - .is_string()) + !request.at(JS(deposit_preauth)).as_object().at(JS(owner)).is_string()) { return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"}; } else if ( - !request.at(JS(deposit_preauth)) - .as_object() - .contains(JS(authorized)) || - !request.at(JS(deposit_preauth)) - .as_object() - .at(JS(authorized)) - .is_string()) + !request.at(JS(deposit_preauth)).as_object().contains(JS(authorized)) || + !request.at(JS(deposit_preauth)).as_object().at(JS(authorized)).is_string()) { - return Status{ - RippledError::rpcINVALID_PARAMS, "authorizedNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "authorizedNotString"}; } else { - boost::json::object const& deposit_preauth = - request.at(JS(deposit_preauth)).as_object(); + boost::json::object const& deposit_preauth = request.at(JS(deposit_preauth)).as_object(); - auto const owner = ripple::parseBase58( - deposit_preauth.at(JS(owner)).as_string().c_str()); + auto const owner = + ripple::parseBase58(deposit_preauth.at(JS(owner)).as_string().c_str()); - auto const authorized = ripple::parseBase58( - deposit_preauth.at(JS(authorized)).as_string().c_str()); + auto const authorized = + ripple::parseBase58(deposit_preauth.at(JS(authorized)).as_string().c_str()); if (!owner) - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedOwner"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedOwner"}; else if (!authorized) - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedAuthorized"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedAuthorized"}; else key = ripple::keylet::depositPreauth(*owner, *authorized).key; } @@ -148,13 +131,11 @@ doLedgerEntry(Context const& context) if (!request.at(JS(directory)).is_object()) { if (!request.at(JS(directory)).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, "directoryNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "directoryNotString"}; if (!key.parseHex(request.at(JS(directory)).as_string().c_str())) { - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedDirectory"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedDirectory"}; } } else if ( @@ -167,8 +148,7 @@ doLedgerEntry(Context const& context) { auto directory = request.at(JS(directory)).as_object(); std::uint64_t subIndex = directory.contains(JS(sub_index)) - ? boost::json::value_to( - directory.at(JS(sub_index))) + ? boost::json::value_to(directory.at(JS(sub_index))) : 0; if (directory.contains(JS(dir_root))) @@ -178,15 +158,11 @@ doLedgerEntry(Context const& context) if (directory.contains(JS(owner))) { // May not specify both dir_root and owner. - return Status{ - RippledError::rpcINVALID_PARAMS, - "mayNotSpecifyBothDirRootAndOwner"}; + return Status{RippledError::rpcINVALID_PARAMS, "mayNotSpecifyBothDirRootAndOwner"}; } - else if (!uDirRoot.parseHex( - directory.at(JS(dir_root)).as_string().c_str())) + else if (!uDirRoot.parseHex(directory.at(JS(dir_root)).as_string().c_str())) { - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedDirRoot"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedDirRoot"}; } else { @@ -195,8 +171,8 @@ doLedgerEntry(Context const& context) } else if (directory.contains(JS(owner))) { - auto const ownerID = ripple::parseBase58( - directory.at(JS(owner)).as_string().c_str()); + auto const ownerID = + ripple::parseBase58(directory.at(JS(owner)).as_string().c_str()); if (!ownerID) { @@ -204,15 +180,12 @@ doLedgerEntry(Context const& context) } else { - key = ripple::keylet::page( - ripple::keylet::ownerDir(*ownerID), subIndex) - .key; + key = ripple::keylet::page(ripple::keylet::ownerDir(*ownerID), subIndex).key; } } else { - return Status{ - RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; + return Status{RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; } } } @@ -222,8 +195,7 @@ doLedgerEntry(Context const& context) if (!request.at(JS(escrow)).is_object()) { if (!key.parseHex(request.at(JS(escrow)).as_string().c_str())) - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedEscrow"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedEscrow"}; } else if ( !request.at(JS(escrow)).as_object().contains(JS(owner)) || @@ -239,19 +211,14 @@ doLedgerEntry(Context const& context) } else { - auto const id = - ripple::parseBase58(request.at(JS(escrow)) - .as_object() - .at(JS(owner)) - .as_string() - .c_str()); + auto const id = ripple::parseBase58( + request.at(JS(escrow)).as_object().at(JS(owner)).as_string().c_str()); if (!id) return Status{ClioError::rpcMALFORMED_ADDRESS}; else { - std::uint32_t seq = - request.at(JS(escrow)).as_object().at(JS(seq)).as_int64(); + std::uint32_t seq = request.at(JS(escrow)).as_object().at(JS(seq)).as_int64(); key = ripple::keylet::escrow(*id, seq).key; } } @@ -262,8 +229,7 @@ doLedgerEntry(Context const& context) if (!request.at(JS(offer)).is_object()) { if (!key.parseHex(request.at(JS(offer)).as_string().c_str())) - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedOffer"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedOffer"}; } else if ( !request.at(JS(offer)).as_object().contains(JS(account)) || @@ -280,15 +246,13 @@ doLedgerEntry(Context const& context) else { auto offer = request.at(JS(offer)).as_object(); - auto const id = ripple::parseBase58( - offer.at(JS(account)).as_string().c_str()); + auto const id = ripple::parseBase58(offer.at(JS(account)).as_string().c_str()); if (!id) return Status{ClioError::rpcMALFORMED_ADDRESS}; else { - std::uint32_t seq = - boost::json::value_to(offer.at(JS(seq))); + std::uint32_t seq = boost::json::value_to(offer.at(JS(seq))); key = ripple::keylet::offer(*id, seq).key; } } @@ -297,34 +261,27 @@ doLedgerEntry(Context const& context) { expectedType = ripple::ltPAYCHAN; if (!request.at(JS(payment_channel)).is_string()) - return Status{ - RippledError::rpcINVALID_PARAMS, "paymentChannelNotString"}; + return Status{RippledError::rpcINVALID_PARAMS, "paymentChannelNotString"}; if (!key.parseHex(request.at(JS(payment_channel)).as_string().c_str())) - return Status{ - RippledError::rpcINVALID_PARAMS, "malformedPaymentChannel"}; + return Status{RippledError::rpcINVALID_PARAMS, "malformedPaymentChannel"}; } else if (request.contains(JS(ripple_state))) { if (!request.at(JS(ripple_state)).is_object()) - return Status{ - RippledError::rpcINVALID_PARAMS, "rippleStateNotObject"}; + return Status{RippledError::rpcINVALID_PARAMS, "rippleStateNotObject"}; expectedType = ripple::ltRIPPLE_STATE; ripple::Currency currency; - boost::json::object const& state = - request.at(JS(ripple_state)).as_object(); + boost::json::object const& state = request.at(JS(ripple_state)).as_object(); - if (!state.contains(JS(currency)) || - !state.at(JS(currency)).is_string()) + if (!state.contains(JS(currency)) || !state.at(JS(currency)).is_string()) { return Status{RippledError::rpcINVALID_PARAMS, "currencyNotString"}; } - if (!state.contains(JS(accounts)) || - !state.at(JS(accounts)).is_array() || - 2 != state.at(JS(accounts)).as_array().size() || - !state.at(JS(accounts)).as_array().at(0).is_string() || + if (!state.contains(JS(accounts)) || !state.at(JS(accounts)).is_array() || + 2 != state.at(JS(accounts)).as_array().size() || !state.at(JS(accounts)).as_array().at(0).is_string() || !state.at(JS(accounts)).as_array().at(1).is_string() || (state.at(JS(accounts)).as_array().at(0).as_string() == state.at(JS(accounts)).as_array().at(1).as_string())) @@ -332,19 +289,16 @@ doLedgerEntry(Context const& context) return Status{RippledError::rpcINVALID_PARAMS, "malformedAccounts"}; } - auto const id1 = ripple::parseBase58( - state.at(JS(accounts)).as_array().at(0).as_string().c_str()); - auto const id2 = ripple::parseBase58( - state.at(JS(accounts)).as_array().at(1).as_string().c_str()); + auto const id1 = + ripple::parseBase58(state.at(JS(accounts)).as_array().at(0).as_string().c_str()); + auto const id2 = + ripple::parseBase58(state.at(JS(accounts)).as_array().at(1).as_string().c_str()); if (!id1 || !id2) - return Status{ - ClioError::rpcMALFORMED_ADDRESS, "malformedAddresses"}; + return Status{ClioError::rpcMALFORMED_ADDRESS, "malformedAddresses"}; - else if (!ripple::to_currency( - currency, state.at(JS(currency)).as_string().c_str())) - return Status{ - ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}; + else if (!ripple::to_currency(currency, state.at(JS(currency)).as_string().c_str())) + return Status{ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency"}; key = ripple::keylet::line(*id1, *id2, currency).key; } @@ -355,12 +309,10 @@ doLedgerEntry(Context const& context) if (!request.at(JS(ticket)).is_object()) { if (!request.at(JS(ticket)).is_string()) - return Status{ - ClioError::rpcMALFORMED_REQUEST, "ticketNotString"}; + return Status{ClioError::rpcMALFORMED_REQUEST, "ticketNotString"}; if (!key.parseHex(request.at(JS(ticket)).as_string().c_str())) - return Status{ - ClioError::rpcMALFORMED_REQUEST, "malformedTicket"}; + return Status{ClioError::rpcMALFORMED_REQUEST, "malformedTicket"}; } else if ( !request.at(JS(ticket)).as_object().contains(JS(account)) || @@ -372,26 +324,18 @@ doLedgerEntry(Context const& context) !request.at(JS(ticket)).as_object().contains(JS(ticket_seq)) || !request.at(JS(ticket)).as_object().at(JS(ticket_seq)).is_int64()) { - return Status{ - ClioError::rpcMALFORMED_REQUEST, "malformedTicketSeq"}; + return Status{ClioError::rpcMALFORMED_REQUEST, "malformedTicketSeq"}; } else { - auto const id = - ripple::parseBase58(request.at(JS(ticket)) - .as_object() - .at(JS(account)) - .as_string() - .c_str()); + auto const id = ripple::parseBase58( + request.at(JS(ticket)).as_object().at(JS(account)).as_string().c_str()); if (!id) return Status{ClioError::rpcMALFORMED_OWNER}; else { - std::uint32_t seq = request.at(JS(ticket)) - .as_object() - .at(JS(ticket_seq)) - .as_int64(); + std::uint32_t seq = request.at(JS(ticket)).as_object().at(JS(ticket_seq)).as_int64(); key = ripple::getTicketIndex(*id, seq); } @@ -402,15 +346,13 @@ doLedgerEntry(Context const& context) return Status{RippledError::rpcINVALID_PARAMS, "unknownOption"}; } - auto dbResponse = - context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield); + auto dbResponse = context.backend->fetchLedgerObject(key, lgrInfo.seq, context.yield); if (!dbResponse or dbResponse->size() == 0) return Status{"entryNotFound"}; // check expected type matches actual type - ripple::STLedgerEntry sle{ - ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key}; + ripple::STLedgerEntry sle{ripple::SerialIter{dbResponse->data(), dbResponse->size()}, key}; if (expectedType != ripple::ltANY && sle.getType() != expectedType) return Status{"unexpectedLedgerType"}; diff --git a/src/rpc/handlers/NFTHistory.cpp b/src/rpc/handlers/NFTHistory.cpp index d94ae4ab..a49a93eb 100644 --- a/src/rpc/handlers/NFTHistory.cpp +++ b/src/rpc/handlers/NFTHistory.cpp @@ -46,16 +46,12 @@ doNFTHistory(Context const& context) std::uint32_t const limit, bool const forward, std::optional const& cursorIn, - boost::asio::yield_context& yield) - -> Backend::TransactionsAndCursor { - auto const [txnsAndCursor, timeDiff] = - util::timed([&, &tokenID = tokenID]() { - return backend->fetchNFTTransactions( - tokenID, limit, forward, cursorIn, yield); - }); + boost::asio::yield_context& yield) -> Backend::TransactionsAndCursor { + auto const [txnsAndCursor, timeDiff] = util::timed([&, &tokenID = tokenID]() { + return backend->fetchNFTTransactions(tokenID, limit, forward, cursorIn, yield); + }); gLog.info() << outerFuncName << " db fetch took " << timeDiff - << " milliseconds - num blobs = " - << txnsAndCursor.txns.size(); + << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); return txnsAndCursor; }); diff --git a/src/rpc/handlers/NFTInfo.cpp b/src/rpc/handlers/NFTInfo.cpp index e8872c71..bdfae1e3 100644 --- a/src/rpc/handlers/NFTInfo.cpp +++ b/src/rpc/handlers/NFTInfo.cpp @@ -42,8 +42,7 @@ doNFTInfo(Context const& context) return *status; auto const lgrInfo = std::get(maybeLedgerInfo); - auto const dbResponse = - context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield); + auto const dbResponse = context.backend->fetchNFT(tokenID, lgrInfo.seq, context.yield); if (!dbResponse) return Status{RippledError::rpcOBJECT_NOT_FOUND, "NFT not found"}; @@ -55,10 +54,8 @@ doNFTInfo(Context const& context) response[JS(flags)] = ripple::nft::getFlags(dbResponse->tokenID); response["transfer_fee"] = ripple::nft::getTransferFee(dbResponse->tokenID); - response[JS(issuer)] = - ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID)); - response["nft_taxon"] = - ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID)); + response[JS(issuer)] = ripple::toBase58(ripple::nft::getIssuer(dbResponse->tokenID)); + response["nft_taxon"] = ripple::nft::toUInt32(ripple::nft::getTaxon(dbResponse->tokenID)); response[JS(nft_serial)] = ripple::nft::getSerial(dbResponse->tokenID); return response; diff --git a/src/rpc/handlers/NFTOffers.cpp b/src/rpc/handlers/NFTOffers.cpp index 31ac32b5..b76df333 100644 --- a/src/rpc/handlers/NFTOffers.cpp +++ b/src/rpc/handlers/NFTOffers.cpp @@ -36,8 +36,7 @@ namespace ripple { inline void tag_invoke(json::value_from_tag, json::value& jv, SLE const& offer) { - auto amount = ::RPC::toBoostJson( - offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); + auto amount = ::RPC::toBoostJson(offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); json::object obj = { {JS(nft_offer_index), to_string(offer.key())}, @@ -47,8 +46,7 @@ tag_invoke(json::value_from_tag, json::value& jv, SLE const& offer) }; if (offer.isFieldPresent(sfDestination)) - obj.insert_or_assign( - JS(destination), toBase58(offer.getAccountID(sfDestination))); + obj.insert_or_assign(JS(destination), toBase58(offer.getAccountID(sfDestination))); if (offer.isFieldPresent(sfExpiration)) obj.insert_or_assign(JS(expiration), offer.getFieldU32(sfExpiration)); @@ -61,10 +59,7 @@ tag_invoke(json::value_from_tag, json::value& jv, SLE const& offer) namespace RPC { Result -enumerateNFTOffers( - Context const& context, - ripple::uint256 const& tokenid, - ripple::Keylet const& directory) +enumerateNFTOffers(Context const& context, ripple::uint256 const& tokenid, ripple::Keylet const& directory) { auto const& request = context.params; @@ -75,8 +70,7 @@ enumerateNFTOffers( auto lgrInfo = std::get(v); // TODO: just check for existence without pulling - if (!context.backend->fetchLedgerObject( - directory.key, lgrInfo.seq, context.yield)) + if (!context.backend->fetchLedgerObject(directory.key, lgrInfo.seq, context.yield)) return Status{RippledError::rpcOBJECT_NOT_FOUND, "notFound"}; std::uint32_t limit; @@ -104,12 +98,9 @@ enumerateNFTOffers( if (!cursor.parseHex(marker.as_string().c_str())) return Status{RippledError::rpcINVALID_PARAMS, "malformedCursor"}; - auto const sle = - read(ripple::keylet::nftoffer(cursor), lgrInfo, context); + auto const sle = read(ripple::keylet::nftoffer(cursor), lgrInfo, context); - if (!sle || - sle->getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_OFFER || + if (!sle || sle->getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER || tokenid != sle->getFieldH256(ripple::sfNFTokenID)) return Status{RippledError::rpcINVALID_PARAMS}; @@ -152,14 +143,10 @@ enumerateNFTOffers( offers.pop_back(); } - std::transform( - std::cbegin(offers), - std::cend(offers), - std::back_inserter(jsonOffers), - [](auto const& offer) { - // uses tag_invoke at the top of this file - return json::value_from(offer); - }); + std::transform(std::cbegin(offers), std::cend(offers), std::back_inserter(jsonOffers), [](auto const& offer) { + // uses tag_invoke at the top of this file + return json::value_from(offer); + }); response.insert_or_assign(JS(offers), std::move(jsonOffers)); return response; @@ -179,8 +166,7 @@ doNFTOffers(Context const& context, bool sells) return ripple::keylet::nft_buys(std::get(v)); }; - return enumerateNFTOffers( - context, std::get(v), getKeylet()); + return enumerateNFTOffers(context, std::get(v), getKeylet()); } Result diff --git a/src/rpc/handlers/NoRippleCheck.cpp b/src/rpc/handlers/NoRippleCheck.cpp index 6520f8c3..85e7ba40 100644 --- a/src/rpc/handlers/NoRippleCheck.cpp +++ b/src/rpc/handlers/NoRippleCheck.cpp @@ -23,10 +23,7 @@ namespace RPC { boost::json::object -getBaseTx( - ripple::AccountID const& accountID, - std::uint32_t accountSeq, - ripple::Fees const& fees) +getBaseTx(ripple::AccountID const& accountID, std::uint32_t accountSeq, ripple::Fees const& fees) { boost::json::object tx; tx[JS(Sequence)] = accountSeq; @@ -50,8 +47,7 @@ doNoRippleCheck(Context const& context) if (role == "gateway") roleGateway = true; else if (role != "user") - return Status{ - RippledError::rpcINVALID_PARAMS, "role field is invalid"}; + return Status{RippledError::rpcINVALID_PARAMS, "role field is invalid"}; } std::uint32_t limit = 300; @@ -65,15 +61,13 @@ doNoRippleCheck(Context const& context) return *status; auto lgrInfo = std::get(v); - std::optional fees = includeTxs - ? context.backend->fetchFees(lgrInfo.seq, context.yield) - : std::nullopt; + std::optional fees = + includeTxs ? context.backend->fetchFees(lgrInfo.seq, context.yield) : std::nullopt; boost::json::array transactions; auto keylet = ripple::keylet::account(accountID); - auto accountObj = context.backend->fetchLedgerObject( - keylet.key, lgrInfo.seq, context.yield); + auto accountObj = context.backend->fetchLedgerObject(keylet.key, lgrInfo.seq, context.yield); if (!accountObj) throw AccountNotFoundError(ripple::toBase58(accountID)); @@ -83,8 +77,7 @@ doNoRippleCheck(Context const& context) std::uint32_t accountSeq = sle.getFieldU32(ripple::sfSequence); boost::json::array problems; - bool bDefaultRipple = - sle.getFieldU32(ripple::sfFlags) & ripple::lsfDefaultRipple; + bool bDefaultRipple = sle.getFieldU32(ripple::sfFlags) & ripple::lsfDefaultRipple; if (bDefaultRipple & !roleGateway) { problems.push_back( @@ -95,8 +88,7 @@ doNoRippleCheck(Context const& context) } else if (roleGateway & !bDefaultRipple) { - problems.push_back( - "You should immediately set your default ripple flag"); + problems.push_back("You should immediately set your default ripple flag"); if (includeTxs) { auto tx = getBaseTx(accountID, accountSeq++, *fees); @@ -113,21 +105,14 @@ doNoRippleCheck(Context const& context) std::numeric_limits::max(), {}, context.yield, - [roleGateway, - includeTxs, - &fees, - &transactions, - &accountSeq, - &limit, - &accountID, - &problems](ripple::SLE&& ownedItem) { + [roleGateway, includeTxs, &fees, &transactions, &accountSeq, &limit, &accountID, &problems]( + ripple::SLE&& ownedItem) { if (ownedItem.getType() == ripple::ltRIPPLE_STATE) { - bool const bLow = accountID == - ownedItem.getFieldAmount(ripple::sfLowLimit).getIssuer(); + bool const bLow = accountID == ownedItem.getFieldAmount(ripple::sfLowLimit).getIssuer(); - bool const bNoRipple = ownedItem.getFieldU32(ripple::sfFlags) & - (bLow ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool const bNoRipple = + ownedItem.getFieldU32(ripple::sfFlags) & (bLow ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); std::string problem; bool needFix = false; @@ -146,27 +131,22 @@ doNoRippleCheck(Context const& context) if (needFix) { ripple::AccountID peer = - ownedItem - .getFieldAmount( - bLow ? ripple::sfHighLimit : ripple::sfLowLimit) - .getIssuer(); - ripple::STAmount peerLimit = ownedItem.getFieldAmount( - bLow ? ripple::sfHighLimit : ripple::sfLowLimit); + ownedItem.getFieldAmount(bLow ? ripple::sfHighLimit : ripple::sfLowLimit).getIssuer(); + ripple::STAmount peerLimit = + ownedItem.getFieldAmount(bLow ? ripple::sfHighLimit : ripple::sfLowLimit); problem += to_string(peerLimit.getCurrency()); problem += " line to "; problem += to_string(peerLimit.getIssuer()); problems.emplace_back(problem); if (includeTxs) { - ripple::STAmount limitAmount(ownedItem.getFieldAmount( - bLow ? ripple::sfLowLimit : ripple::sfHighLimit)); + ripple::STAmount limitAmount( + ownedItem.getFieldAmount(bLow ? ripple::sfLowLimit : ripple::sfHighLimit)); limitAmount.setIssuer(peer); auto tx = getBaseTx(accountID, accountSeq++, *fees); tx[JS(TransactionType)] = JS(TrustSet); - tx[JS(LimitAmount)] = RPC::toBoostJson( - limitAmount.getJson(ripple::JsonOptions::none)); - tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple - : ripple::tfSetNoRipple; + tx[JS(LimitAmount)] = RPC::toBoostJson(limitAmount.getJson(ripple::JsonOptions::none)); + tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple : ripple::tfSetNoRipple; transactions.push_back(tx); } diff --git a/src/rpc/handlers/ServerInfo.cpp b/src/rpc/handlers/ServerInfo.cpp index 0992e017..8985180d 100644 --- a/src/rpc/handlers/ServerInfo.cpp +++ b/src/rpc/handlers/ServerInfo.cpp @@ -33,23 +33,18 @@ doServerInfo(Context const& context) auto range = context.backend->fetchLedgerRange(); if (!range) { - return Status{ - RippledError::rpcNOT_READY, - "emptyDatabase", - "The server has no data in the database"}; + return Status{RippledError::rpcNOT_READY, "emptyDatabase", "The server has no data in the database"}; } - auto lgrInfo = context.backend->fetchLedgerBySequence( - range->maxSequence, context.yield); + auto lgrInfo = context.backend->fetchLedgerBySequence(range->maxSequence, context.yield); auto fees = context.backend->fetchFees(lgrInfo->seq, context.yield); if (!lgrInfo || !fees) return Status{RippledError::rpcINTERNAL}; - auto age = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count() - + auto age = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() - lgrInfo->closeTime.time_since_epoch().count() - 946684800; if (age < 0) @@ -58,20 +53,18 @@ doServerInfo(Context const& context) response[JS(info)] = boost::json::object{}; boost::json::object& info = response[JS(info)].as_object(); - info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" + - std::to_string(range->maxSequence); + info[JS(complete_ledgers)] = std::to_string(range->minSequence) + "-" + std::to_string(range->maxSequence); bool admin = context.clientIp == "127.0.0.1"; if (admin) { info[JS(counters)] = context.counters.report(); - info[JS(counters)].as_object()["subscriptions"] = - context.subscriptions->report(); + info[JS(counters)].as_object()["subscriptions"] = context.subscriptions->report(); } - auto serverInfoRippled = context.balancer->forwardToRippled( - {{"command", "server_info"}}, context.clientIp, context.yield); + auto serverInfoRippled = + context.balancer->forwardToRippled({{"command", "server_info"}}, context.clientIp, context.yield); info[JS(load_factor)] = 1; info["clio_version"] = Build::getClioVersionString(); @@ -105,11 +98,9 @@ doServerInfo(Context const& context) cache["size"] = context.backend->cache().size(); cache["is_full"] = context.backend->cache().isFull(); - cache["latest_ledger_seq"] = - context.backend->cache().latestLedgerSequence(); + cache["latest_ledger_seq"] = context.backend->cache().latestLedgerSequence(); cache["object_hit_rate"] = context.backend->cache().getObjectHitRate(); - cache["successor_hit_rate"] = - context.backend->cache().getSuccessorHitRate(); + cache["successor_hit_rate"] = context.backend->cache().getSuccessorHitRate(); if (admin) { diff --git a/src/rpc/handlers/Subscribe.cpp b/src/rpc/handlers/Subscribe.cpp index e0cdd1d0..9a1559f4 100644 --- a/src/rpc/handlers/Subscribe.cpp +++ b/src/rpc/handlers/Subscribe.cpp @@ -26,19 +26,13 @@ namespace RPC { // these are the streams that take no arguments -static std::unordered_set validCommonStreams{ - "ledger", - "transactions", - "transactions_proposed", - "validations", - "manifests", - "book_changes"}; +static std::unordered_set + validCommonStreams{"ledger", "transactions", "transactions_proposed", "validations", "manifests", "book_changes"}; Status validateStreams(boost::json::object const& request) { - for (auto const& streams = request.at(JS(streams)).as_array(); - auto const& stream : streams) + for (auto const& streams = request.at(JS(streams)).as_array(); auto const& stream : streams) { if (!stream.is_string()) return Status{RippledError::rpcINVALID_PARAMS, "streamNotString"}; @@ -83,10 +77,7 @@ subscribeToStreams( } void -unsubscribeToStreams( - boost::json::object const& request, - std::shared_ptr session, - SubscriptionManager& manager) +unsubscribeToStreams(boost::json::object const& request, std::shared_ptr session, SubscriptionManager& manager) { boost::json::array const& streams = request.at(JS(streams)).as_array(); @@ -127,10 +118,7 @@ validateAccounts(boost::json::array const& accounts) } void -subscribeToAccounts( - boost::json::object const& request, - std::shared_ptr session, - SubscriptionManager& manager) +subscribeToAccounts(boost::json::object const& request, std::shared_ptr session, SubscriptionManager& manager) { boost::json::array const& accounts = request.at(JS(accounts)).as_array(); @@ -151,10 +139,7 @@ subscribeToAccounts( } void -unsubscribeToAccounts( - boost::json::object const& request, - std::shared_ptr session, - SubscriptionManager& manager) +unsubscribeToAccounts(boost::json::object const& request, std::shared_ptr session, SubscriptionManager& manager) { boost::json::array const& accounts = request.at(JS(accounts)).as_array(); @@ -180,8 +165,7 @@ subscribeToAccountsProposed( std::shared_ptr session, SubscriptionManager& manager) { - boost::json::array const& accounts = - request.at(JS(accounts_proposed)).as_array(); + boost::json::array const& accounts = request.at(JS(accounts_proposed)).as_array(); for (auto const& account : accounts) { @@ -205,8 +189,7 @@ unsubscribeToAccountsProposed( std::shared_ptr session, SubscriptionManager& manager) { - boost::json::array const& accounts = - request.at(JS(accounts_proposed)).as_array(); + boost::json::array const& accounts = request.at(JS(accounts_proposed)).as_array(); for (auto const& account : accounts) { @@ -255,23 +238,15 @@ validateAndGetBooks( rng = backend->fetchLedgerRange(); ripple::AccountID takerID = beast::zero; if (book.as_object().contains(JS(taker))) - if (auto const status = getTaker(book.as_object(), takerID); - status) + if (auto const status = getTaker(book.as_object(), takerID); status) return status; - auto getOrderBook = [&snapshot, &backend, &rng, &takerID]( - auto book, - boost::asio::yield_context& yield) { + auto getOrderBook = [&snapshot, &backend, &rng, &takerID](auto book, boost::asio::yield_context& yield) { auto bookBase = getBookBase(book); - auto [offers, _] = backend->fetchBookOffers( - bookBase, rng->maxSequence, 200, yield); + auto [offers, _] = backend->fetchBookOffers(bookBase, rng->maxSequence, 200, yield); - auto orderBook = postProcessOrderBook( - offers, book, takerID, *backend, rng->maxSequence, yield); - std::copy( - orderBook.begin(), - orderBook.end(), - std::back_inserter(snapshot)); + auto orderBook = postProcessOrderBook(offers, book, takerID, *backend, rng->maxSequence, yield); + std::copy(orderBook.begin(), orderBook.end(), std::back_inserter(snapshot)); }; getOrderBook(b, yield); if (both) @@ -282,10 +257,7 @@ validateAndGetBooks( } void -subscribeToBooks( - std::vector const& books, - std::shared_ptr session, - SubscriptionManager& manager) +subscribeToBooks(std::vector const& books, std::shared_ptr session, SubscriptionManager& manager) { for (auto const& book : books) { @@ -340,8 +312,7 @@ doSubscribe(Context const& context) { auto const& jsonAccounts = request.at(JS(accounts_proposed)); if (!jsonAccounts.is_array()) - return Status{ - RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; + return Status{RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; auto const& accounts = jsonAccounts.as_array(); if (accounts.empty()) @@ -357,27 +328,22 @@ doSubscribe(Context const& context) if (request.contains(JS(books))) { - auto parsed = - validateAndGetBooks(context.yield, request, context.backend); + auto parsed = validateAndGetBooks(context.yield, request, context.backend); if (auto status = std::get_if(&parsed)) return *status; - auto [bks, snap] = - std::get, boost::json::array>>( - parsed); + auto [bks, snap] = std::get, boost::json::array>>(parsed); books = std::move(bks); response[JS(offers)] = std::move(snap); } if (request.contains(JS(streams))) - response = subscribeToStreams( - context.yield, request, context.session, *context.subscriptions); + response = subscribeToStreams(context.yield, request, context.session, *context.subscriptions); if (request.contains(JS(accounts))) subscribeToAccounts(request, context.session, *context.subscriptions); if (request.contains(JS(accounts_proposed))) - subscribeToAccountsProposed( - request, context.session, *context.subscriptions); + subscribeToAccountsProposed(request, context.session, *context.subscriptions); if (request.contains(JS(books))) subscribeToBooks(books, context.session, *context.subscriptions); @@ -420,8 +386,7 @@ doUnsubscribe(Context const& context) { auto const& jsonAccounts = request.at(JS(accounts_proposed)); if (!jsonAccounts.is_array()) - return Status{ - RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; + return Status{RippledError::rpcINVALID_PARAMS, "accountsProposedNotArray"}; auto const& accounts = jsonAccounts.as_array(); if (accounts.empty()) @@ -435,15 +400,12 @@ doUnsubscribe(Context const& context) std::vector books; if (request.contains(JS(books))) { - auto parsed = - validateAndGetBooks(context.yield, request, context.backend); + auto parsed = validateAndGetBooks(context.yield, request, context.backend); if (auto status = std::get_if(&parsed)) return *status; - auto [bks, snap] = - std::get, boost::json::array>>( - parsed); + auto [bks, snap] = std::get, boost::json::array>>(parsed); books = std::move(bks); } @@ -455,8 +417,7 @@ doUnsubscribe(Context const& context) unsubscribeToAccounts(request, context.session, *context.subscriptions); if (request.contains(JS(accounts_proposed))) - unsubscribeToAccountsProposed( - request, context.session, *context.subscriptions); + unsubscribeToAccountsProposed(request, context.session, *context.subscriptions); if (request.contains("books")) unsubscribeToBooks(books, context.session, *context.subscriptions); diff --git a/src/rpc/handlers/TransactionEntry.cpp b/src/rpc/handlers/TransactionEntry.cpp index c6b3ad81..dfb59f0d 100644 --- a/src/rpc/handlers/TransactionEntry.cpp +++ b/src/rpc/handlers/TransactionEntry.cpp @@ -46,10 +46,7 @@ doTransactionEntry(Context const& context) // ledger; we simulate that here by returning not found if the transaction // is in a different ledger than the one specified. if (!dbResponse || dbResponse->ledgerSequence != lgrInfo.seq) - return Status{ - RippledError::rpcTXN_NOT_FOUND, - "transactionNotFound", - "Transaction not found."}; + return Status{RippledError::rpcTXN_NOT_FOUND, "transactionNotFound", "Transaction not found."}; auto [txn, meta] = toExpandedJson(*dbResponse); response[JS(tx_json)] = std::move(txn); diff --git a/src/rpc/handlers/Tx.cpp b/src/rpc/handlers/Tx.cpp index 1b1de0c2..dabb103d 100644 --- a/src/rpc/handlers/Tx.cpp +++ b/src/rpc/handlers/Tx.cpp @@ -71,8 +71,7 @@ doTx(Context const& context) { if (rangeSupplied) { - bool searchedAll = range->maxSequence >= *maxLedger && - range->minSequence <= *minLedger; + bool searchedAll = range->maxSequence >= *maxLedger && range->minSequence <= *minLedger; boost::json::object extra; extra["searched_all"] = searchedAll; return Status{RippledError::rpcTXN_NOT_FOUND, std::move(extra)}; diff --git a/src/rpc/ngHandlers/AccountChannels.cpp b/src/rpc/ngHandlers/AccountChannels.cpp index b32318f9..d6550a35 100644 --- a/src/rpc/ngHandlers/AccountChannels.cpp +++ b/src/rpc/ngHandlers/AccountChannels.cpp @@ -23,16 +23,12 @@ namespace RPCng { void -AccountChannelsHandler::addChannel( - std::vector& jsonChannels, - ripple::SLE const& channelSle) const +AccountChannelsHandler::addChannel(std::vector& jsonChannels, ripple::SLE const& channelSle) const { ChannelResponse channel; channel.channelID = ripple::to_string(channelSle.key()); - channel.account = - ripple::to_string(channelSle.getAccountID(ripple::sfAccount)); - channel.accountDestination = - ripple::to_string(channelSle.getAccountID(ripple::sfDestination)); + channel.account = ripple::to_string(channelSle.getAccountID(ripple::sfAccount)); + channel.accountDestination = ripple::to_string(channelSle.getAccountID(ripple::sfDestination)); channel.amount = channelSle[ripple::sfAmount].getText(); channel.balance = channelSle[ripple::sfBalance].getText(); if (publicKeyType(channelSle[ripple::sfPublicKey])) @@ -55,17 +51,11 @@ AccountChannelsHandler::addChannel( } AccountChannelsHandler::Result -AccountChannelsHandler::process( - AccountChannelsHandler::Input input, - Context const& ctx) const +AccountChannelsHandler::process(AccountChannelsHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -75,22 +65,18 @@ AccountChannelsHandler::process( // no need to check the return value, validator check for us auto const accountID = RPC::accountFromStringStrict(input.account); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = + sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); if (!accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; - auto const destAccountID = input.destinationAccount - ? RPC::accountFromStringStrict(input.destinationAccount.value()) - : std::optional{}; + auto const destAccountID = input.destinationAccount ? RPC::accountFromStringStrict(input.destinationAccount.value()) + : std::optional{}; Output response; auto const addToResponse = [&](ripple::SLE&& sle) { - if (sle.getType() == ripple::ltPAYCHAN && - sle.getAccountID(ripple::sfAccount) == accountID && - (!destAccountID || - *destAccountID == sle.getAccountID(ripple::sfDestination))) + if (sle.getType() == ripple::ltPAYCHAN && sle.getAccountID(ripple::sfAccount) == accountID && + (!destAccountID || *destAccountID == sle.getAccountID(ripple::sfDestination))) { addChannel(response.channels, sle); } @@ -98,13 +84,7 @@ AccountChannelsHandler::process( }; auto const next = RPC::ngTraverseOwnedNodes( - *sharedPtrBackend_, - *accountID, - lgrInfo.seq, - input.limit, - input.marker, - ctx.yield, - addToResponse); + *sharedPtrBackend_, *accountID, lgrInfo.seq, input.limit, input.marker, ctx.yield, addToResponse); response.account = input.account; response.limit = input.limit; @@ -119,9 +99,7 @@ AccountChannelsHandler::process( } AccountChannelsHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountChannelsHandler::Input input; @@ -140,8 +118,7 @@ tag_invoke( } if (jsonObject.contains(JS(destination_account))) { - input.destinationAccount = - jv.at(JS(destination_account)).as_string().c_str(); + input.destinationAccount = jv.at(JS(destination_account)).as_string().c_str(); } if (jsonObject.contains(JS(ledger_index))) { @@ -151,8 +128,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } @@ -160,10 +136,7 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountChannelsHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountChannelsHandler::Output const& output) { boost::json::object obj; obj = { @@ -179,10 +152,7 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountChannelsHandler::ChannelResponse const& channel) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountChannelsHandler::ChannelResponse const& channel) { boost::json::object obj; obj = { diff --git a/src/rpc/ngHandlers/AccountChannels.h b/src/rpc/ngHandlers/AccountChannels.h index 1f990b31..89e082a4 100644 --- a/src/rpc/ngHandlers/AccountChannels.h +++ b/src/rpc/ngHandlers/AccountChannels.h @@ -74,8 +74,7 @@ public: using Result = RPCng::HandlerReturnType; - AccountChannelsHandler( - std::shared_ptr const& sharedPtrBackend) + AccountChannelsHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -83,16 +82,14 @@ public: RpcSpecConstRef spec() const { - // clang-format off - static auto const rpcSpec = RpcSpec{ + static auto const rpcSpec = RpcSpec{ {JS(account), validation::Required{}, validation::AccountValidator}, - {JS(destination_account), validation::Type{},validation::AccountValidator}, + {JS(destination_account), validation::Type{}, validation::AccountValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, - {JS(limit), validation::Type{},validation::Between{10,400}}, + {JS(limit), validation::Type{}, validation::Between{10, 400}}, {JS(ledger_index), validation::LedgerIndexValidator}, - {JS(marker), validation::AccountMarkerValidator} + {JS(marker), validation::AccountMarkerValidator}, }; - // clang-format on return rpcSpec; } @@ -102,22 +99,15 @@ public: private: void - addChannel(std::vector& jsonLines, ripple::SLE const& line) - const; + addChannel(std::vector& jsonLines, ripple::SLE const& line) const; friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - ChannelResponse const& channel); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ChannelResponse const& channel); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountCurrencies.cpp b/src/rpc/ngHandlers/AccountCurrencies.cpp index 3039b8a2..3de4b7cc 100644 --- a/src/rpc/ngHandlers/AccountCurrencies.cpp +++ b/src/rpc/ngHandlers/AccountCurrencies.cpp @@ -21,17 +21,11 @@ namespace RPCng { AccountCurrenciesHandler::Result -AccountCurrenciesHandler::process( - AccountCurrenciesHandler::Input input, - Context const& ctx) const +AccountCurrenciesHandler::process(AccountCurrenciesHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -40,11 +34,10 @@ AccountCurrenciesHandler::process( auto const accountID = RPC::accountFromStringStrict(input.account); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = + sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); if (!accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; Output response; auto const addToResponse = [&](ripple::SLE&& sle) { @@ -59,11 +52,9 @@ AccountCurrenciesHandler::process( if (!viewLowest) balance.negate(); if (balance < lineLimit) - response.receiveCurrencies.insert( - ripple::to_string(balance.getCurrency())); + response.receiveCurrencies.insert(ripple::to_string(balance.getCurrency())); if ((-balance) < lineLimitPeer) - response.sendCurrencies.insert( - ripple::to_string(balance.getCurrency())); + response.sendCurrencies.insert(ripple::to_string(balance.getCurrency())); } return true; }; @@ -84,10 +75,7 @@ AccountCurrenciesHandler::process( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountCurrenciesHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountCurrenciesHandler::Output const& output) { jv = { {JS(ledger_hash), output.ledgerHash}, @@ -98,9 +86,7 @@ tag_invoke( } AccountCurrenciesHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountCurrenciesHandler::Input input; @@ -117,8 +103,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } return input; diff --git a/src/rpc/ngHandlers/AccountCurrencies.h b/src/rpc/ngHandlers/AccountCurrencies.h index 533f8846..987cdd90 100644 --- a/src/rpc/ngHandlers/AccountCurrencies.h +++ b/src/rpc/ngHandlers/AccountCurrencies.h @@ -53,8 +53,7 @@ public: using Result = RPCng::HandlerReturnType; - AccountCurrenciesHandler( - std::shared_ptr const& sharedPtrBackend) + AccountCurrenciesHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -65,7 +64,9 @@ public: static auto const rpcSpec = RpcSpec{ {JS(account), validation::Required{}, validation::AccountValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, - {JS(ledger_index), validation::LedgerIndexValidator}}; + {JS(ledger_index), validation::LedgerIndexValidator}, + }; + return rpcSpec; } @@ -74,10 +75,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/AccountInfo.cpp b/src/rpc/ngHandlers/AccountInfo.cpp index f9066cee..ca54edb9 100644 --- a/src/rpc/ngHandlers/AccountInfo.cpp +++ b/src/rpc/ngHandlers/AccountInfo.cpp @@ -21,19 +21,14 @@ namespace RPCng { AccountInfoHandler::Result -AccountInfoHandler::process(AccountInfoHandler::Input input, Context const& ctx) - const +AccountInfoHandler::process(AccountInfoHandler::Input input, Context const& ctx) const { if (!input.account && !input.ident) return Error{RPC::Status{RPC::RippledError::rpcACT_MALFORMED}}; auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -43,16 +38,12 @@ AccountInfoHandler::process(AccountInfoHandler::Input input, Context const& ctx) auto const accountStr = input.account.value_or(input.ident.value_or("")); auto const accountID = RPC::accountFromStringStrict(accountStr); auto const accountKeylet = ripple::keylet::account(*accountID); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - accountKeylet.key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject(accountKeylet.key, lgrInfo.seq, ctx.yield); if (!accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; ripple::STLedgerEntry const sle{ - ripple::SerialIter{ - accountLedgerObject->data(), accountLedgerObject->size()}, - accountKeylet.key}; + ripple::SerialIter{accountLedgerObject->data(), accountLedgerObject->size()}, accountKeylet.key}; if (!accountKeylet.check(sle)) return Error{RPC::Status{RPC::RippledError::rpcDB_DESERIALIZATION}}; // Return SignerList(s) if that is requested. @@ -63,31 +54,24 @@ AccountInfoHandler::process(AccountInfoHandler::Input input, Context const& ctx) auto const signersKey = ripple::keylet::signers(*accountID); // This code will need to be revisited if in the future we // support multiple SignerLists on one account. - auto const signers = sharedPtrBackend_->fetchLedgerObject( - signersKey.key, lgrInfo.seq, ctx.yield); + auto const signers = sharedPtrBackend_->fetchLedgerObject(signersKey.key, lgrInfo.seq, ctx.yield); std::vector signerList; if (signers) { ripple::STLedgerEntry const sleSigners{ - ripple::SerialIter{signers->data(), signers->size()}, - signersKey.key}; + ripple::SerialIter{signers->data(), signers->size()}, signersKey.key}; if (!signersKey.check(sleSigners)) - return Error{ - RPC::Status{RPC::RippledError::rpcDB_DESERIALIZATION}}; + return Error{RPC::Status{RPC::RippledError::rpcDB_DESERIALIZATION}}; signerList.push_back(sleSigners); } - return Output( - lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, signerList); + return Output(lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, signerList); } return Output(lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle); } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountInfoHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandler::Output const& output) { jv = boost::json::object{ {JS(account_data), RPC::toJson(output.accountData)}, @@ -97,15 +81,12 @@ tag_invoke( { jv.as_object()[JS(signer_lists)] = boost::json::array(); for (auto const& signerList : output.signerLists.value()) - jv.as_object()[JS(signer_lists)].as_array().push_back( - RPC::toJson(signerList)); + jv.as_object()[JS(signer_lists)].as_array().push_back(RPC::toJson(signerList)); } } AccountInfoHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountInfoHandler::Input input; @@ -129,8 +110,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(signer_lists))) diff --git a/src/rpc/ngHandlers/AccountInfo.h b/src/rpc/ngHandlers/AccountInfo.h index 51217d64..5035cb18 100644 --- a/src/rpc/ngHandlers/AccountInfo.h +++ b/src/rpc/ngHandlers/AccountInfo.h @@ -51,13 +51,8 @@ public: { } - Output( - uint32_t ledgerId, - std::string ledgerHash, - ripple::STLedgerEntry sle) - : ledgerIndex(ledgerId) - , ledgerHash(std::move(ledgerHash)) - , accountData(std::move(sle)) + Output(uint32_t ledgerId, std::string ledgerHash, ripple::STLedgerEntry sle) + : ledgerIndex(ledgerId), ledgerHash(std::move(ledgerHash)), accountData(std::move(sle)) { } }; @@ -75,9 +70,7 @@ public: using Result = RPCng::HandlerReturnType; - AccountInfoHandler( - std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + AccountInfoHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -89,7 +82,9 @@ public: {JS(ident), validation::AccountValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, - {JS(signer_lists), validation::Type{}}}; + {JS(signer_lists), validation::Type{}}, + }; + return rpcSpec; } @@ -98,10 +93,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/AccountLines.cpp b/src/rpc/ngHandlers/AccountLines.cpp index f73650cb..b47a8c2e 100644 --- a/src/rpc/ngHandlers/AccountLines.cpp +++ b/src/rpc/ngHandlers/AccountLines.cpp @@ -53,18 +53,12 @@ AccountLinesHandler::addLine( if (not viewLowest) balance.negate(); - bool const lineAuth = - flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); - bool const lineAuthPeer = - flags & (not viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); - bool const lineNoRipple = - flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); - bool const lineNoRipplePeer = flags & - (not viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); - bool const lineFreeze = - flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); - bool const lineFreezePeer = - flags & (not viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + bool const lineAuth = flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool const lineAuthPeer = flags & (not viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth); + bool const lineNoRipple = flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool const lineNoRipplePeer = flags & (not viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool const lineFreeze = flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + bool const lineFreezePeer = flags & (not viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); ripple::STAmount const& saBalance = balance; ripple::STAmount const& saLimit = lineLimit; @@ -93,33 +87,25 @@ AccountLinesHandler::addLine( } AccountLinesHandler::Result -AccountLinesHandler::process( - AccountLinesHandler::Input input, - Context const& ctx) const +AccountLinesHandler::process(AccountLinesHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; auto const lgrInfo = std::get(lgrInfoOrStatus); auto const accountID = RPC::accountFromStringStrict(input.account); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = + sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); if (not accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; - auto const peerAccountID = input.peer - ? RPC::accountFromStringStrict(*(input.peer)) - : std::optional{}; + auto const peerAccountID = + input.peer ? RPC::accountFromStringStrict(*(input.peer)) : std::optional{}; Output response; response.lines.reserve(input.limit); @@ -130,18 +116,13 @@ AccountLinesHandler::process( auto ignore = false; if (input.ignoreDefault) { - if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == - accountID) + if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID) { - ignore = - !(sle.getFieldU32(ripple::sfFlags) & - ripple::lsfLowReserve); + ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfLowReserve); } else { - ignore = - !(sle.getFieldU32(ripple::sfFlags) & - ripple::lsfHighReserve); + ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfHighReserve); } } @@ -151,18 +132,11 @@ AccountLinesHandler::process( }; auto const next = RPC::ngTraverseOwnedNodes( - *sharedPtrBackend_, - *accountID, - lgrInfo.seq, - input.limit, - input.marker, - ctx.yield, - addToResponse); + *sharedPtrBackend_, *accountID, lgrInfo.seq, input.limit, input.marker, ctx.yield, addToResponse); response.account = input.account; - response.limit = - input.limit; // not documented, - // https://github.com/XRPLF/xrpl-dev-portal/issues/1838 + response.limit = input.limit; // not documented, + // https://github.com/XRPLF/xrpl-dev-portal/issues/1838 response.ledgerHash = ripple::strHex(lgrInfo.hash); response.ledgerIndex = lgrInfo.seq; @@ -174,9 +148,7 @@ AccountLinesHandler::process( } AccountLinesHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountLinesHandler::Input input; @@ -210,8 +182,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } @@ -219,10 +190,7 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountLinesHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountLinesHandler::Output const& output) { auto obj = boost::json::object{ {JS(ledger_hash), output.ledgerHash}, diff --git a/src/rpc/ngHandlers/AccountLines.h b/src/rpc/ngHandlers/AccountLines.h index f8ef170e..a5149bfb 100644 --- a/src/rpc/ngHandlers/AccountLines.h +++ b/src/rpc/ngHandlers/AccountLines.h @@ -68,25 +68,21 @@ public: std::optional ledgerHash; std::optional ledgerIndex; std::optional peer; - bool ignoreDefault = - false; // TODO: document - // https://github.com/XRPLF/xrpl-dev-portal/issues/1839 + bool ignoreDefault = false; // TODO: document + // https://github.com/XRPLF/xrpl-dev-portal/issues/1839 uint32_t limit = 50; std::optional marker; }; using Result = RPCng::HandlerReturnType; - AccountLinesHandler( - std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + AccountLinesHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } RpcSpecConstRef spec() const { - // clang-format off static auto const rpcSpec = RpcSpec{ {JS(account), validation::Required{}, validation::AccountValidator}, {JS(peer), validation::Type{}, validation::AccountValidator}, @@ -96,7 +92,6 @@ public: {JS(ledger_index), validation::LedgerIndexValidator}, {JS(marker), validation::AccountMarkerValidator}, }; - // clang-format on return rpcSpec; } @@ -114,18 +109,12 @@ private: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - LineResponse const& line); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, LineResponse const& line); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountOffers.cpp b/src/rpc/ngHandlers/AccountOffers.cpp index d333e643..025aa030 100644 --- a/src/rpc/ngHandlers/AccountOffers.cpp +++ b/src/rpc/ngHandlers/AccountOffers.cpp @@ -22,9 +22,7 @@ namespace RPCng { void -AccountOffersHandler::addOffer( - std::vector& offers, - ripple::SLE const& offerSle) const +AccountOffersHandler::addOffer(std::vector& offers, ripple::SLE const& offerSle) const { AccountOffersHandler::Offer offer; offer.takerPays = offerSle.getFieldAmount(ripple::sfTakerPays); @@ -32,8 +30,7 @@ AccountOffersHandler::addOffer( offer.seq = offerSle.getFieldU32(ripple::sfSequence); offer.flags = offerSle.getFieldU32(ripple::sfFlags); - auto const quality = - getQuality(offerSle.getFieldH256(ripple::sfBookDirectory)); + auto const quality = getQuality(offerSle.getFieldH256(ripple::sfBookDirectory)); ripple::STAmount const rate = ripple::amountFromQuality(quality); offer.quality = rate.getText(); if (offerSle.isFieldPresent(ripple::sfExpiration)) @@ -42,17 +39,11 @@ AccountOffersHandler::addOffer( }; AccountOffersHandler::Result -AccountOffersHandler::process( - AccountOffersHandler::Input input, - Context const& ctx) const +AccountOffersHandler::process(AccountOffersHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -61,11 +52,10 @@ AccountOffersHandler::process( auto const accountID = RPC::accountFromStringStrict(input.account); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = + sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); if (!accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; Output response; response.account = ripple::to_string(*accountID); @@ -82,13 +72,7 @@ AccountOffersHandler::process( }; auto const next = RPC::ngTraverseOwnedNodes( - *sharedPtrBackend_, - *accountID, - lgrInfo.seq, - input.limit, - input.marker, - ctx.yield, - addToResponse); + *sharedPtrBackend_, *accountID, lgrInfo.seq, input.limit, input.marker, ctx.yield, addToResponse); if (auto const status = std::get_if(&next)) return Error{*status}; @@ -102,10 +86,7 @@ AccountOffersHandler::process( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountOffersHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountOffersHandler::Output const& output) { jv = { {JS(ledger_hash), output.ledgerHash}, @@ -118,21 +99,14 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountOffersHandler::Offer const& offer) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountOffersHandler::Offer const& offer) { - jv = { - {JS(seq), offer.seq}, - {JS(flags), offer.flags}, - {JS(quality), offer.quality}}; + jv = {{JS(seq), offer.seq}, {JS(flags), offer.flags}, {JS(quality), offer.quality}}; auto& jsonObject = jv.as_object(); if (offer.expiration) jsonObject[JS(expiration)] = *offer.expiration; - auto const convertAmount = [&](const char* field, - ripple::STAmount const& amount) { + auto const convertAmount = [&](const char* field, ripple::STAmount const& amount) { if (amount.native()) { jsonObject[field] = amount.getText(); @@ -150,9 +124,7 @@ tag_invoke( } AccountOffersHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountOffersHandler::Input input; @@ -169,8 +141,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(limit))) diff --git a/src/rpc/ngHandlers/AccountOffers.h b/src/rpc/ngHandlers/AccountOffers.h index 2f8f574d..be6fa6cc 100644 --- a/src/rpc/ngHandlers/AccountOffers.h +++ b/src/rpc/ngHandlers/AccountOffers.h @@ -63,8 +63,7 @@ public: using Result = RPCng::HandlerReturnType; - AccountOffersHandler( - std::shared_ptr const& sharedPtrBackend) + AccountOffersHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -77,9 +76,9 @@ public: {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, {JS(marker), validation::AccountMarkerValidator}, - {JS(limit), - validation::Type{}, - validation::Between{10, 400}}}; + {JS(limit), validation::Type{}, validation::Between{10, 400}}, + }; + return rpcSpec; } @@ -91,18 +90,12 @@ private: addOffer(std::vector& offers, ripple::SLE const& offerSle) const; friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Offer const& offer); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Offer const& offer); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/AccountTx.cpp b/src/rpc/ngHandlers/AccountTx.cpp index 66737c9a..831a7820 100644 --- a/src/rpc/ngHandlers/AccountTx.cpp +++ b/src/rpc/ngHandlers/AccountTx.cpp @@ -25,51 +25,37 @@ namespace RPCng { // TODO: this is currently very similar to nft_history but its own copy for time // being. we should aim to reuse common logic in some way in the future. AccountTxHandler::Result -AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) - const +AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto [minIndex, maxIndex] = *range; if (input.ledgerIndexMin) { - if (range->maxSequence < input.ledgerIndexMin || - range->minSequence > input.ledgerIndexMin) + if (range->maxSequence < input.ledgerIndexMin || range->minSequence > input.ledgerIndexMin) { - return Error{RPC::Status{ - RPC::RippledError::rpcLGR_IDX_MALFORMED, - "ledgerSeqMinOutOfRange"}}; + return Error{RPC::Status{RPC::RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMinOutOfRange"}}; } minIndex = *input.ledgerIndexMin; } if (input.ledgerIndexMax) { - if (range->maxSequence < input.ledgerIndexMax || - range->minSequence > input.ledgerIndexMax) - return Error{RPC::Status{ - RPC::RippledError::rpcLGR_IDX_MALFORMED, - "ledgerSeqMaxOutOfRange"}}; + if (range->maxSequence < input.ledgerIndexMax || range->minSequence > input.ledgerIndexMax) + return Error{RPC::Status{RPC::RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMaxOutOfRange"}}; maxIndex = *input.ledgerIndexMax; } if (minIndex > maxIndex) - return Error{ - RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidIndex"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidIndex"}}; if (input.ledgerHash || input.ledgerIndex) { // rippled does not have this check if (input.ledgerIndexMax || input.ledgerIndexMin) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "containsLedgerSpecifierAndRange"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"}}; auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -96,17 +82,14 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) auto const limit = input.limit.value_or(limitDefault); auto const accountID = RPC::accountFromStringStrict(input.account); auto const [txnsAndCursor, timeDiff] = util::timed([&]() { - return sharedPtrBackend_->fetchAccountTransactions( - *accountID, limit, input.forward, cursor, ctx.yield); + return sharedPtrBackend_->fetchAccountTransactions(*accountID, limit, input.forward, cursor, ctx.yield); }); - log_.info() << "db fetch took " << timeDiff - << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); + log_.info() << "db fetch took " << timeDiff << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); auto const [blobs, retCursor] = txnsAndCursor; Output response; if (retCursor) - response.marker = { - retCursor->ledgerSequence, retCursor->transactionIndex}; + response.marker = {retCursor->ledgerSequence, retCursor->transactionIndex}; for (auto const& txnPlusMeta : blobs) { @@ -129,8 +112,7 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) auto [txn, meta] = RPC::toExpandedJson(txnPlusMeta); obj[JS(meta)] = std::move(meta); obj[JS(tx)] = std::move(txn); - obj[JS(tx)].as_object()[JS(ledger_index)] = - txnPlusMeta.ledgerSequence; + obj[JS(tx)].as_object()[JS(ledger_index)] = txnPlusMeta.ledgerSequence; obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date; } else @@ -155,10 +137,7 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountTxHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountTxHandler::Output const& output) { jv = { {JS(account), output.account}, @@ -173,29 +152,22 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - AccountTxHandler::Marker const& marker) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountTxHandler::Marker const& marker) { jv = {{JS(ledger), marker.ledger}, {JS(seq), marker.seq}}; } AccountTxHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); AccountTxHandler::Input input; input.account = jsonObject.at(JS(account)).as_string().c_str(); - if (jsonObject.contains(JS(ledger_index_min)) && - jsonObject.at(JS(ledger_index_min)).as_int64() != -1) + if (jsonObject.contains(JS(ledger_index_min)) && jsonObject.at(JS(ledger_index_min)).as_int64() != -1) { input.ledgerIndexMin = jsonObject.at(JS(ledger_index_min)).as_int64(); } - if (jsonObject.contains(JS(ledger_index_max)) && - jsonObject.at(JS(ledger_index_max)).as_int64() != -1) + if (jsonObject.contains(JS(ledger_index_max)) && jsonObject.at(JS(ledger_index_max)).as_int64() != -1) { input.ledgerIndexMax = jsonObject.at(JS(ledger_index_max)).as_int64(); } @@ -211,8 +183,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(binary))) diff --git a/src/rpc/ngHandlers/AccountTx.h b/src/rpc/ngHandlers/AccountTx.h index f2a294d9..1f624f03 100644 --- a/src/rpc/ngHandlers/AccountTx.h +++ b/src/rpc/ngHandlers/AccountTx.h @@ -69,8 +69,7 @@ public: using Result = RPCng::HandlerReturnType; - AccountTxHandler(std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + AccountTxHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -85,22 +84,17 @@ public: {JS(ledger_index_max), validation::Type{}}, {JS(binary), validation::Type{}}, {JS(forward), validation::Type{}}, - {JS(limit), - validation::Type{}, - validation::Between{1, 100}}, + {JS(limit), validation::Type{}, validation::Between{1, 100}}, {JS(marker), validation::WithCustomError{ validation::Type{}, - RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "invalidMarker"}}, + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidMarker"}}, validation::Section{ - {JS(ledger), - validation::Required{}, - validation::Type{}}, - {JS(seq), - validation::Required{}, - validation::Type{}}, - }}}; + {JS(ledger), validation::Required{}, validation::Type{}}, + {JS(seq), validation::Required{}, validation::Type{}}, + }}, + }; + return rpcSpec; } @@ -109,18 +103,12 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Marker const& marker); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Marker const& marker); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/BookOffers.cpp b/src/rpc/ngHandlers/BookOffers.cpp index a99b0fa4..2be971d9 100644 --- a/src/rpc/ngHandlers/BookOffers.cpp +++ b/src/rpc/ngHandlers/BookOffers.cpp @@ -25,19 +25,14 @@ namespace RPCng { BookOffersHandler::Result BookOffersHandler::process(Input input, Context const& ctx) const { - auto bookMaybe = RPC::parseBook( - input.paysCurrency, input.paysID, input.getsCurrency, input.getsID); + auto bookMaybe = RPC::parseBook(input.paysCurrency, input.paysID, input.getsCurrency, input.getsID); if (auto const status = std::get_if(&bookMaybe)) return Error{*status}; // check ledger auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -47,28 +42,19 @@ BookOffersHandler::process(Input input, Context const& ctx) const auto const bookKey = getBookBase(book); // TODO: Add perfomance metrics if needed in future - auto [offers, _] = sharedPtrBackend_->fetchBookOffers( - bookKey, lgrInfo.seq, input.limit, ctx.yield); + auto [offers, _] = sharedPtrBackend_->fetchBookOffers(bookKey, lgrInfo.seq, input.limit, ctx.yield); BookOffersHandler::Output output; output.ledgerHash = ripple::strHex(lgrInfo.hash); output.ledgerIndex = lgrInfo.seq; output.offers = RPC::postProcessOrderBook( - offers, - book, - input.taker ? *(input.taker) : beast::zero, - *sharedPtrBackend_, - lgrInfo.seq, - ctx.yield); + offers, book, input.taker ? *(input.taker) : beast::zero, *sharedPtrBackend_, lgrInfo.seq, ctx.yield); return output; } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - BookOffersHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookOffersHandler::Output const& output) { jv = boost::json::object{ {JS(ledger_hash), output.ledgerHash}, @@ -78,37 +64,19 @@ tag_invoke( } BookOffersHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { BookOffersHandler::Input input; auto const& jsonObject = jv.as_object(); - ripple::to_currency( - input.getsCurrency, - jv.at(JS(taker_gets)).as_object().at(JS(currency)).as_string().c_str()); - ripple::to_currency( - input.paysCurrency, - jv.at(JS(taker_pays)).as_object().at(JS(currency)).as_string().c_str()); + ripple::to_currency(input.getsCurrency, jv.at(JS(taker_gets)).as_object().at(JS(currency)).as_string().c_str()); + ripple::to_currency(input.paysCurrency, jv.at(JS(taker_pays)).as_object().at(JS(currency)).as_string().c_str()); if (jv.at(JS(taker_gets)).as_object().contains(JS(issuer))) { - ripple::to_issuer( - input.getsID, - jv.at(JS(taker_gets)) - .as_object() - .at(JS(issuer)) - .as_string() - .c_str()); + ripple::to_issuer(input.getsID, jv.at(JS(taker_gets)).as_object().at(JS(issuer)).as_string().c_str()); } if (jv.at(JS(taker_pays)).as_object().contains(JS(issuer))) { - ripple::to_issuer( - input.paysID, - jv.at(JS(taker_pays)) - .as_object() - .at(JS(issuer)) - .as_string() - .c_str()); + ripple::to_issuer(input.paysID, jv.at(JS(taker_pays)).as_object().at(JS(issuer)).as_string().c_str()); } if (jsonObject.contains(JS(ledger_hash))) { @@ -122,14 +90,12 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(taker))) { - input.taker = - RPC::accountFromStringStrict(jv.at(JS(taker)).as_string().c_str()); + input.taker = RPC::accountFromStringStrict(jv.at(JS(taker)).as_string().c_str()); } if (jsonObject.contains(JS(limit))) { diff --git a/src/rpc/ngHandlers/BookOffers.h b/src/rpc/ngHandlers/BookOffers.h index be5bda14..95cfd5d5 100644 --- a/src/rpc/ngHandlers/BookOffers.h +++ b/src/rpc/ngHandlers/BookOffers.h @@ -55,8 +55,7 @@ public: using Result = RPCng::HandlerReturnType; - BookOffersHandler(std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + BookOffersHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -71,12 +70,10 @@ public: {JS(currency), validation::Required{}, validation::WithCustomError{ - validation::CurrencyValidator, - RPC::Status(RPC::RippledError::rpcDST_AMT_MALFORMED)}}, + validation::CurrencyValidator, RPC::Status(RPC::RippledError::rpcDST_AMT_MALFORMED)}}, {JS(issuer), validation::WithCustomError{ - validation::IssuerValidator, - RPC::Status(RPC::RippledError::rpcDST_ISR_MALFORMED)}}}}, + validation::IssuerValidator, RPC::Status(RPC::RippledError::rpcDST_ISR_MALFORMED)}}}}, {JS(taker_pays), validation::Required{}, validation::Type{}, @@ -84,18 +81,16 @@ public: {JS(currency), validation::Required{}, validation::WithCustomError{ - validation::CurrencyValidator, - RPC::Status(RPC::RippledError::rpcSRC_CUR_MALFORMED)}}, + validation::CurrencyValidator, RPC::Status(RPC::RippledError::rpcSRC_CUR_MALFORMED)}}, {JS(issuer), validation::WithCustomError{ - validation::IssuerValidator, - RPC::Status(RPC::RippledError::rpcSRC_ISR_MALFORMED)}}}}, + validation::IssuerValidator, RPC::Status(RPC::RippledError::rpcSRC_ISR_MALFORMED)}}}}, {JS(taker), validation::AccountValidator}, - {JS(limit), - validation::Type{}, - validation::Between{1, 100}}, + {JS(limit), validation::Type{}, validation::Between{1, 100}}, {JS(ledger_hash), validation::Uint256HexStringValidator}, - {JS(ledger_index), validation::LedgerIndexValidator}}; + {JS(ledger_index), validation::LedgerIndexValidator}, + }; + return rpcSpec; } @@ -104,10 +99,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/GatewayBalances.cpp b/src/rpc/ngHandlers/GatewayBalances.cpp index 910efad0..50ee79d1 100644 --- a/src/rpc/ngHandlers/GatewayBalances.cpp +++ b/src/rpc/ngHandlers/GatewayBalances.cpp @@ -22,29 +22,22 @@ namespace RPCng { GatewayBalancesHandler::Result -GatewayBalancesHandler::process( - GatewayBalancesHandler::Input input, - Context const& ctx) const +GatewayBalancesHandler::process(GatewayBalancesHandler::Input input, Context const& ctx) const { // check ledger auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; // check account auto const lgrInfo = std::get(lgrInfoOrStatus); auto const accountID = RPC::accountFromStringStrict(input.account); - auto const accountLedgerObject = sharedPtrBackend_->fetchLedgerObject( - ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); + auto const accountLedgerObject = + sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield); if (!accountLedgerObject) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; GatewayBalancesHandler::Output output; @@ -58,8 +51,7 @@ GatewayBalancesHandler::process( auto const highID = highLimit.getIssuer(); auto const viewLowest = (lowLimit.getIssuer() == accountID); auto const flags = sle.getFieldU32(ripple::sfFlags); - auto const freeze = flags & - (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); + auto const freeze = flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze); if (!viewLowest) balance.negate(); @@ -126,12 +118,10 @@ GatewayBalancesHandler::process( if (auto status = std::get_if(&ret)) return Error{*status}; - if (not std::all_of( - input.hotWallets.begin(), - input.hotWallets.end(), - [&](auto const& hw) { return output.hotBalances.contains(hw); })) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "invalidHotWallet"}}; + if (not std::all_of(input.hotWallets.begin(), input.hotWallets.end(), [&](auto const& hw) { + return output.hotBalances.contains(hw); + })) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidHotWallet"}}; output.accountID = input.account; output.ledgerHash = ripple::strHex(lgrInfo.hash); @@ -140,10 +130,7 @@ GatewayBalancesHandler::process( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - GatewayBalancesHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, GatewayBalancesHandler::Output const& output) { boost::json::object obj; if (!output.sums.empty()) @@ -156,28 +143,25 @@ tag_invoke( obj[JS(obligations)] = std::move(obligations); } - auto const toJson = - [](std::map> const& - balances) { - boost::json::object balancesObj; - if (!balances.empty()) + auto const toJson = [](std::map> const& balances) { + boost::json::object balancesObj; + if (!balances.empty()) + { + for (auto const& [accId, accBalances] : balances) { - for (auto const& [accId, accBalances] : balances) + boost::json::array arr; + for (auto const& balance : accBalances) { - boost::json::array arr; - for (auto const& balance : accBalances) - { - boost::json::object entry; - entry[JS(currency)] = - ripple::to_string(balance.issue().currency); - entry[JS(value)] = balance.getText(); - arr.push_back(std::move(entry)); - } - balancesObj[ripple::to_string(accId)] = std::move(arr); + boost::json::object entry; + entry[JS(currency)] = ripple::to_string(balance.issue().currency); + entry[JS(value)] = balance.getText(); + arr.push_back(std::move(entry)); } + balancesObj[ripple::to_string(accId)] = std::move(arr); } - return balancesObj; - }; + } + return balancesObj; + }; if (auto balances = toJson(output.hotBalances); balances.size()) obj[JS(balances)] = balances; @@ -197,9 +181,7 @@ tag_invoke( } GatewayBalancesHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); GatewayBalancesHandler::Input input; @@ -216,16 +198,14 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(hotwallet))) { if (jsonObject.at(JS(hotwallet)).is_string()) { - input.hotWallets.insert(*RPC::accountFromStringStrict( - jv.at(JS(hotwallet)).as_string().c_str())); + input.hotWallets.insert(*RPC::accountFromStringStrict(jv.at(JS(hotwallet)).as_string().c_str())); } else { @@ -234,10 +214,7 @@ tag_invoke( hotWallets.begin(), hotWallets.end(), std::inserter(input.hotWallets, input.hotWallets.begin()), - [](auto const& hotWallet) { - return *RPC::accountFromStringStrict( - hotWallet.as_string().c_str()); - }); + [](auto const& hotWallet) { return *RPC::accountFromStringStrict(hotWallet.as_string().c_str()); }); } } return input; diff --git a/src/rpc/ngHandlers/GatewayBalances.h b/src/rpc/ngHandlers/GatewayBalances.h index fad3ae19..098c4d41 100644 --- a/src/rpc/ngHandlers/GatewayBalances.h +++ b/src/rpc/ngHandlers/GatewayBalances.h @@ -39,8 +39,7 @@ public: std::map sums; std::map> hotBalances; std::map> assets; - std::map> - frozenBalances; + std::map> frozenBalances; // validated should be sent via framework bool validated = true; }; @@ -56,8 +55,7 @@ public: using Result = RPCng::HandlerReturnType; - GatewayBalancesHandler( - std::shared_ptr const& sharedPtrBackend) + GatewayBalancesHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -65,39 +63,30 @@ public: RpcSpecConstRef spec() const { - static auto const hotWalletValidator = validation::CustomValidator{ - [](boost::json::value const& value, - std::string_view key) -> MaybeError { + static auto const hotWalletValidator = + validation::CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { if (!value.is_string() && !value.is_array()) { - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "NotStringOrArray"}}; + return Error{ + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "NotStringOrArray"}}; } // wallet needs to be an valid accountID or public key - auto const wallets = value.is_array() - ? value.as_array() - : boost::json::array{value}; - auto const getAccountID = - [](auto const& j) -> std::optional { + auto const wallets = value.is_array() ? value.as_array() : boost::json::array{value}; + auto const getAccountID = [](auto const& j) -> std::optional { if (j.is_string()) { auto const pk = ripple::parseBase58( - ripple::TokenType::AccountPublic, - j.as_string().c_str()); + ripple::TokenType::AccountPublic, j.as_string().c_str()); if (pk) return ripple::calcAccountID(*pk); - return ripple::parseBase58( - j.as_string().c_str()); + return ripple::parseBase58(j.as_string().c_str()); } return {}; }; for (auto const& wallet : wallets) { if (!getAccountID(wallet)) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - std::string(key) + "Malformed"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, std::string(key) + "Malformed"}}; } return MaybeError{}; }}; @@ -106,7 +95,9 @@ public: {JS(account), validation::Required{}, validation::AccountValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, - {JS(hotwallet), hotWalletValidator}}; + {JS(hotwallet), hotWalletValidator}, + }; + return rpcSpec; } @@ -115,10 +106,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/LedgerEntry.cpp b/src/rpc/ngHandlers/LedgerEntry.cpp index 188a280a..173fbb23 100644 --- a/src/rpc/ngHandlers/LedgerEntry.cpp +++ b/src/rpc/ngHandlers/LedgerEntry.cpp @@ -23,8 +23,7 @@ namespace RPCng { LedgerEntryHandler::Result -LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx) - const +LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx) const { ripple::uint256 key; if (input.index) @@ -33,9 +32,7 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx) } else if (input.accountRoot) { - key = ripple::keylet::account( - *ripple::parseBase58(*(input.accountRoot))) - .key; + key = ripple::keylet::account(*ripple::parseBase58(*(input.accountRoot))).key; } else if (input.directory) { @@ -46,83 +43,57 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx) } else if (input.offer) { - auto const id = ripple::parseBase58( - input.offer->at(JS(account)).as_string().c_str()); - key = - ripple::keylet::offer( - *id, - boost::json::value_to(input.offer->at(JS(seq)))) - .key; + auto const id = ripple::parseBase58(input.offer->at(JS(account)).as_string().c_str()); + key = ripple::keylet::offer(*id, boost::json::value_to(input.offer->at(JS(seq)))).key; } else if (input.rippleStateAccount) { auto const id1 = ripple::parseBase58( - input.rippleStateAccount->at(JS(accounts)) - .as_array() - .at(0) - .as_string() - .c_str()); + input.rippleStateAccount->at(JS(accounts)).as_array().at(0).as_string().c_str()); auto const id2 = ripple::parseBase58( - input.rippleStateAccount->at(JS(accounts)) - .as_array() - .at(1) - .as_string() - .c_str()); - auto const currency = ripple::to_currency( - input.rippleStateAccount->at(JS(currency)).as_string().c_str()); + input.rippleStateAccount->at(JS(accounts)).as_array().at(1).as_string().c_str()); + auto const currency = ripple::to_currency(input.rippleStateAccount->at(JS(currency)).as_string().c_str()); key = ripple::keylet::line(*id1, *id2, currency).key; } else if (input.escrow) { - auto const id = ripple::parseBase58( - input.escrow->at(JS(owner)).as_string().c_str()); - key = ripple::keylet::escrow(*id, input.escrow->at(JS(seq)).as_int64()) - .key; + auto const id = ripple::parseBase58(input.escrow->at(JS(owner)).as_string().c_str()); + key = ripple::keylet::escrow(*id, input.escrow->at(JS(seq)).as_int64()).key; } else if (input.depositPreauth) { - auto const owner = ripple::parseBase58( - input.depositPreauth->at(JS(owner)).as_string().c_str()); - auto const authorized = ripple::parseBase58( - input.depositPreauth->at(JS(authorized)).as_string().c_str()); + auto const owner = + ripple::parseBase58(input.depositPreauth->at(JS(owner)).as_string().c_str()); + auto const authorized = + ripple::parseBase58(input.depositPreauth->at(JS(authorized)).as_string().c_str()); key = ripple::keylet::depositPreauth(*owner, *authorized).key; } else if (input.ticket) { - auto const id = ripple::parseBase58( - input.ticket->at(JS(account)).as_string().c_str()); - key = ripple::getTicketIndex( - *id, input.ticket->at(JS(ticket_seq)).as_int64()); + auto const id = ripple::parseBase58(input.ticket->at(JS(account)).as_string().c_str()); + key = ripple::getTicketIndex(*id, input.ticket->at(JS(ticket_seq)).as_int64()); } else { // Must specify 1 of the following fields to indicate what type - return Error{ - RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "unknownOption"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "unknownOption"}}; } // check ledger exists auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; auto const lgrInfo = std::get(lgrInfoOrStatus); - auto const ledgerObject = - sharedPtrBackend_->fetchLedgerObject(key, lgrInfo.seq, ctx.yield); + auto const ledgerObject = sharedPtrBackend_->fetchLedgerObject(key, lgrInfo.seq, ctx.yield); if (!ledgerObject || ledgerObject->size() == 0) return Error{RPC::Status{"entryNotFound"}}; - ripple::STLedgerEntry const sle{ - ripple::SerialIter{ledgerObject->data(), ledgerObject->size()}, key}; - if (input.expectedType != ripple::ltANY && - sle.getType() != input.expectedType) + ripple::STLedgerEntry const sle{ripple::SerialIter{ledgerObject->data(), ledgerObject->size()}, key}; + if (input.expectedType != ripple::ltANY && sle.getType() != input.expectedType) return Error{RPC::Status{"unexpectedLedgerType"}}; LedgerEntryHandler::Output output; @@ -141,41 +112,30 @@ LedgerEntryHandler::process(LedgerEntryHandler::Input input, Context const& ctx) } std::variant -LedgerEntryHandler::composeKeyFromDirectory( - boost::json::object const& directory) const noexcept +LedgerEntryHandler::composeKeyFromDirectory(boost::json::object const& directory) const noexcept { // can not specify both dir_root and owner. if (directory.contains(JS(dir_root)) && directory.contains(JS(owner))) - return RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "mayNotSpecifyBothDirRootAndOwner"}; + return RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "mayNotSpecifyBothDirRootAndOwner"}; // at least one should availiable if (!(directory.contains(JS(dir_root)) || directory.contains(JS(owner)))) - return RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; + return RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; - uint64_t const subIndex = directory.contains(JS(sub_index)) - ? boost::json::value_to(directory.at(JS(sub_index))) - : 0; + uint64_t const subIndex = + directory.contains(JS(sub_index)) ? boost::json::value_to(directory.at(JS(sub_index))) : 0; if (directory.contains(JS(dir_root))) { - ripple::uint256 const uDirRoot{ - directory.at(JS(dir_root)).as_string().c_str()}; + ripple::uint256 const uDirRoot{directory.at(JS(dir_root)).as_string().c_str()}; return ripple::keylet::page(uDirRoot, subIndex).key; } - auto const ownerID = ripple::parseBase58( - directory.at(JS(owner)).as_string().c_str()); - return ripple::keylet::page(ripple::keylet::ownerDir(*ownerID), subIndex) - .key; + auto const ownerID = ripple::parseBase58(directory.at(JS(owner)).as_string().c_str()); + return ripple::keylet::page(ripple::keylet::ownerDir(*ownerID), subIndex).key; } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - LedgerEntryHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, LedgerEntryHandler::Output const& output) { auto object = boost::json::object{ {JS(ledger_hash), output.ledgerHash}, @@ -194,9 +154,7 @@ tag_invoke( } LedgerEntryHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); LedgerEntryHandler::Input input; @@ -212,8 +170,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(binary))) @@ -221,24 +178,20 @@ tag_invoke( input.binary = jv.at(JS(binary)).as_bool(); } // check all the protential index - static auto const indexFieldTypeMap = - std::unordered_map{ - {JS(index), ripple::ltANY}, - {JS(directory), ripple::ltDIR_NODE}, - {JS(offer), ripple::ltOFFER}, - {JS(check), ripple::ltCHECK}, - {JS(escrow), ripple::ltESCROW}, - {JS(payment_channel), ripple::ltPAYCHAN}, - {JS(deposit_preauth), ripple::ltDEPOSIT_PREAUTH}, - {JS(ticket), ripple::ltTICKET}}; + static auto const indexFieldTypeMap = std::unordered_map{ + {JS(index), ripple::ltANY}, + {JS(directory), ripple::ltDIR_NODE}, + {JS(offer), ripple::ltOFFER}, + {JS(check), ripple::ltCHECK}, + {JS(escrow), ripple::ltESCROW}, + {JS(payment_channel), ripple::ltPAYCHAN}, + {JS(deposit_preauth), ripple::ltDEPOSIT_PREAUTH}, + {JS(ticket), ripple::ltTICKET}}; - auto const indexFieldType = std::find_if( - indexFieldTypeMap.begin(), - indexFieldTypeMap.end(), - [&jsonObject](auto const& pair) { + auto const indexFieldType = + std::find_if(indexFieldTypeMap.begin(), indexFieldTypeMap.end(), [&jsonObject](auto const& pair) { auto const& [field, _] = pair; - return jsonObject.contains(field) && - jsonObject.at(field).is_string(); + return jsonObject.contains(field) && jsonObject.at(field).is_string(); }); if (indexFieldType != indexFieldTypeMap.end()) { diff --git a/src/rpc/ngHandlers/LedgerEntry.h b/src/rpc/ngHandlers/LedgerEntry.h index ebb81ff8..4fb39623 100644 --- a/src/rpc/ngHandlers/LedgerEntry.h +++ b/src/rpc/ngHandlers/LedgerEntry.h @@ -65,9 +65,7 @@ public: using Result = RPCng::HandlerReturnType; - LedgerEntryHandler( - std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + LedgerEntryHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -78,27 +76,17 @@ public: // The accounts array must have two different elements // Each element must be a valid address static auto const rippleStateAccountsCheck = - validation::CustomValidator{ - [](boost::json::value const& value, - std::string_view key) -> MaybeError { - if (!value.is_array() || value.as_array().size() != 2 || - !value.as_array()[0].is_string() || - !value.as_array()[1].is_string() || - value.as_array()[0].as_string() == - value.as_array()[1].as_string()) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "malformedAccounts"}}; - auto const id1 = ripple::parseBase58( - value.as_array()[0].as_string().c_str()); - auto const id2 = ripple::parseBase58( - value.as_array()[1].as_string().c_str()); - if (!id1 || !id2) - return Error{RPC::Status{ - RPC::ClioError::rpcMALFORMED_ADDRESS, - "malformedAddresses"}}; - return MaybeError{}; - }}; + validation::CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError { + if (!value.is_array() || value.as_array().size() != 2 || !value.as_array()[0].is_string() || + !value.as_array()[1].is_string() || + value.as_array()[0].as_string() == value.as_array()[1].as_string()) + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "malformedAccounts"}}; + auto const id1 = ripple::parseBase58(value.as_array()[0].as_string().c_str()); + auto const id2 = ripple::parseBase58(value.as_array()[1].as_string().c_str()); + if (!id1 || !id2) + return Error{RPC::Status{RPC::ClioError::rpcMALFORMED_ADDRESS, "malformedAddresses"}}; + return MaybeError{}; + }}; static auto const rpcSpec = RpcSpec{ {JS(binary), validation::Type{}}, @@ -109,80 +97,56 @@ public: {JS(check), validation::Uint256HexStringValidator}, {JS(deposit_preauth), validation::Type{}, - validation::IfType{ - validation::Uint256HexStringValidator}, + validation::IfType{validation::Uint256HexStringValidator}, validation::IfType{ validation::Section{ - {JS(owner), - validation::Required{}, - validation::AccountBase58Validator}, - {JS(authorized), - validation::Required{}, - validation::AccountBase58Validator}, + {JS(owner), validation::Required{}, validation::AccountBase58Validator}, + {JS(authorized), validation::Required{}, validation::AccountBase58Validator}, }, }}, {JS(directory), validation::Type{}, - validation::IfType{ - validation::Uint256HexStringValidator}, + validation::IfType{validation::Uint256HexStringValidator}, validation::IfType{validation::Section{ {JS(owner), validation::AccountBase58Validator}, {JS(dir_root), validation::Uint256HexStringValidator}, {JS(sub_index), validation::Type{}}}}}, {JS(escrow), validation::Type{}, - validation::IfType{ - validation::Uint256HexStringValidator}, + validation::IfType{validation::Uint256HexStringValidator}, validation::IfType{ validation::Section{ - {JS(owner), - validation::Required{}, - validation::AccountBase58Validator}, - {JS(seq), - validation::Required{}, - validation::Type{}}, + {JS(owner), validation::Required{}, validation::AccountBase58Validator}, + {JS(seq), validation::Required{}, validation::Type{}}, }, }}, {JS(offer), validation::Type{}, - validation::IfType{ - validation::Uint256HexStringValidator}, + validation::IfType{validation::Uint256HexStringValidator}, validation::IfType{ validation::Section{ - {JS(account), - validation::Required{}, - validation::AccountBase58Validator}, - {JS(seq), - validation::Required{}, - validation::Type{}}, + {JS(account), validation::Required{}, validation::AccountBase58Validator}, + {JS(seq), validation::Required{}, validation::Type{}}, }, }}, {JS(payment_channel), validation::Uint256HexStringValidator}, {JS(ripple_state), validation::Type{}, validation::Section{ - {JS(accounts), - validation::Required{}, - rippleStateAccountsCheck}, - {JS(currency), - validation::Required{}, - validation::CurrencyValidator}, + {JS(accounts), validation::Required{}, rippleStateAccountsCheck}, + {JS(currency), validation::Required{}, validation::CurrencyValidator}, }}, {JS(ticket), validation::Type{}, - validation::IfType{ - validation::Uint256HexStringValidator}, + validation::IfType{validation::Uint256HexStringValidator}, validation::IfType{ validation::Section{ - {JS(account), - validation::Required{}, - validation::AccountBase58Validator}, - {JS(ticket_seq), - validation::Required{}, - validation::Type{}}, + {JS(account), validation::Required{}, validation::AccountBase58Validator}, + {JS(ticket_seq), validation::Required{}, validation::Type{}}, }, }}, }; + return rpcSpec; } @@ -193,14 +157,10 @@ private: // dir_root and owner can not be both empty or filled at the same time // This function will return an error if this is the case std::variant - composeKeyFromDirectory( - boost::json::object const& directory) const noexcept; + composeKeyFromDirectory(boost::json::object const& directory) const noexcept; friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/LedgerRange.cpp b/src/rpc/ngHandlers/LedgerRange.cpp index 90efa1fd..781c60ce 100644 --- a/src/rpc/ngHandlers/LedgerRange.cpp +++ b/src/rpc/ngHandlers/LedgerRange.cpp @@ -27,23 +27,18 @@ namespace RPCng { LedgerRangeHandler::Result LedgerRangeHandler::process() const { - if (auto const maybeRange = sharedPtrBackend_->fetchLedgerRange(); - maybeRange) + if (auto const maybeRange = sharedPtrBackend_->fetchLedgerRange(); maybeRange) { return Output{*maybeRange}; } else { - return Error{ - RPC::Status{RPC::RippledError::rpcNOT_READY, "rangeNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcNOT_READY, "rangeNotFound"}}; } } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - LedgerRangeHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, LedgerRangeHandler::Output const& output) { jv = boost::json::object{ {JS(ledger_index_min), output.range.minSequence}, diff --git a/src/rpc/ngHandlers/LedgerRange.h b/src/rpc/ngHandlers/LedgerRange.h index 142e6f2e..554dddb9 100644 --- a/src/rpc/ngHandlers/LedgerRange.h +++ b/src/rpc/ngHandlers/LedgerRange.h @@ -40,9 +40,7 @@ public: using Result = HandlerReturnType; - LedgerRangeHandler( - std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + LedgerRangeHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -51,9 +49,6 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/NFTBuyOffers.cpp b/src/rpc/ngHandlers/NFTBuyOffers.cpp index 7faf5449..355b4d89 100644 --- a/src/rpc/ngHandlers/NFTBuyOffers.cpp +++ b/src/rpc/ngHandlers/NFTBuyOffers.cpp @@ -28,9 +28,7 @@ using namespace ripple; namespace RPCng { NFTBuyOffersHandler::Result -NFTBuyOffersHandler::process( - NFTBuyOffersHandler::Input input, - Context const& ctx) const +NFTBuyOffersHandler::process(NFTBuyOffersHandler::Input input, Context const& ctx) const { auto const tokenID = uint256{input.nftID.c_str()}; auto const directory = keylet::nft_buys(tokenID); diff --git a/src/rpc/ngHandlers/NFTBuyOffers.h b/src/rpc/ngHandlers/NFTBuyOffers.h index 783dc691..cf2e5f12 100644 --- a/src/rpc/ngHandlers/NFTBuyOffers.h +++ b/src/rpc/ngHandlers/NFTBuyOffers.h @@ -26,8 +26,7 @@ namespace RPCng { class NFTBuyOffersHandler : public NFTOffersHandlerBase { public: - NFTBuyOffersHandler( - std::shared_ptr const& sharedPtrBackend) + NFTBuyOffersHandler(std::shared_ptr const& sharedPtrBackend) : NFTOffersHandlerBase(sharedPtrBackend) { } diff --git a/src/rpc/ngHandlers/NFTHistory.cpp b/src/rpc/ngHandlers/NFTHistory.cpp index fdd0816e..242f7d8a 100644 --- a/src/rpc/ngHandlers/NFTHistory.cpp +++ b/src/rpc/ngHandlers/NFTHistory.cpp @@ -25,51 +25,37 @@ namespace RPCng { // TODO: this is currently very similar to account_tx but its own copy for time // being. we should aim to reuse common logic in some way in the future. NFTHistoryHandler::Result -NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) - const +NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto [minIndex, maxIndex] = *range; if (input.ledgerIndexMin) { - if (range->maxSequence < input.ledgerIndexMin || - range->minSequence > input.ledgerIndexMin) + if (range->maxSequence < input.ledgerIndexMin || range->minSequence > input.ledgerIndexMin) { - return Error{RPC::Status{ - RPC::RippledError::rpcLGR_IDX_MALFORMED, - "ledgerSeqMinOutOfRange"}}; + return Error{RPC::Status{RPC::RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMinOutOfRange"}}; } minIndex = *input.ledgerIndexMin; } if (input.ledgerIndexMax) { - if (range->maxSequence < input.ledgerIndexMax || - range->minSequence > input.ledgerIndexMax) - return Error{RPC::Status{ - RPC::RippledError::rpcLGR_IDX_MALFORMED, - "ledgerSeqMaxOutOfRange"}}; + if (range->maxSequence < input.ledgerIndexMax || range->minSequence > input.ledgerIndexMax) + return Error{RPC::Status{RPC::RippledError::rpcLGR_IDX_MALFORMED, "ledgerSeqMaxOutOfRange"}}; maxIndex = *input.ledgerIndexMax; } if (minIndex > maxIndex) - return Error{ - RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidIndex"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidIndex"}}; if (input.ledgerHash || input.ledgerIndex) { // rippled does not have this check if (input.ledgerIndexMax || input.ledgerIndexMin) - return Error{RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "containsLedgerSpecifierAndRange"}}; + return Error{RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "containsLedgerSpecifierAndRange"}}; auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -95,18 +81,14 @@ NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) static auto constexpr limitDefault = 50; auto const limit = input.limit.value_or(limitDefault); auto const tokenID = ripple::uint256{input.nftID.c_str()}; - auto const [txnsAndCursor, timeDiff] = util::timed([&]() { - return sharedPtrBackend_->fetchNFTTransactions( - tokenID, limit, input.forward, cursor, ctx.yield); - }); - log_.info() << "db fetch took " << timeDiff - << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); + auto const [txnsAndCursor, timeDiff] = util::timed( + [&]() { return sharedPtrBackend_->fetchNFTTransactions(tokenID, limit, input.forward, cursor, ctx.yield); }); + log_.info() << "db fetch took " << timeDiff << " milliseconds - num blobs = " << txnsAndCursor.txns.size(); auto const [blobs, retCursor] = txnsAndCursor; Output response; if (retCursor) - response.marker = { - retCursor->ledgerSequence, retCursor->transactionIndex}; + response.marker = {retCursor->ledgerSequence, retCursor->transactionIndex}; for (auto const& txnPlusMeta : blobs) { @@ -129,8 +111,7 @@ NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) auto [txn, meta] = RPC::toExpandedJson(txnPlusMeta); obj[JS(meta)] = std::move(meta); obj[JS(tx)] = std::move(txn); - obj[JS(tx)].as_object()[JS(ledger_index)] = - txnPlusMeta.ledgerSequence; + obj[JS(tx)].as_object()[JS(ledger_index)] = txnPlusMeta.ledgerSequence; obj[JS(tx)].as_object()[JS(date)] = txnPlusMeta.date; } else @@ -155,10 +136,7 @@ NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - NFTHistoryHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, NFTHistoryHandler::Output const& output) { jv = { {JS(nft_id), output.nftID}, @@ -173,29 +151,22 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - NFTHistoryHandler::Marker const& marker) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, NFTHistoryHandler::Marker const& marker) { jv = {{JS(ledger), marker.ledger}, {JS(seq), marker.seq}}; } NFTHistoryHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); NFTHistoryHandler::Input input; input.nftID = jsonObject.at(JS(nft_id)).as_string().c_str(); - if (jsonObject.contains(JS(ledger_index_min)) && - jsonObject.at(JS(ledger_index_min)).as_int64() != -1) + if (jsonObject.contains(JS(ledger_index_min)) && jsonObject.at(JS(ledger_index_min)).as_int64() != -1) { input.ledgerIndexMin = jsonObject.at(JS(ledger_index_min)).as_int64(); } - if (jsonObject.contains(JS(ledger_index_max)) && - jsonObject.at(JS(ledger_index_max)).as_int64() != -1) + if (jsonObject.contains(JS(ledger_index_max)) && jsonObject.at(JS(ledger_index_max)).as_int64() != -1) { input.ledgerIndexMax = jsonObject.at(JS(ledger_index_max)).as_int64(); } @@ -211,8 +182,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } if (jsonObject.contains(JS(binary))) diff --git a/src/rpc/ngHandlers/NFTHistory.h b/src/rpc/ngHandlers/NFTHistory.h index 21336a5f..e81b554e 100644 --- a/src/rpc/ngHandlers/NFTHistory.h +++ b/src/rpc/ngHandlers/NFTHistory.h @@ -70,8 +70,7 @@ public: using Result = RPCng::HandlerReturnType; - NFTHistoryHandler(std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + NFTHistoryHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -79,31 +78,24 @@ public: spec() const { static auto const rpcSpec = RpcSpec{ - {JS(nft_id), - validation::Required{}, - validation::Uint256HexStringValidator}, + {JS(nft_id), validation::Required{}, validation::Uint256HexStringValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, {JS(ledger_index_min), validation::Type{}}, {JS(ledger_index_max), validation::Type{}}, {JS(binary), validation::Type{}}, {JS(forward), validation::Type{}}, - {JS(limit), - validation::Type{}, - validation::Between{1, 100}}, + {JS(limit), validation::Type{}, validation::Between{1, 100}}, {JS(marker), validation::WithCustomError{ validation::Type{}, - RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, "invalidMarker"}}, + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "invalidMarker"}}, validation::Section{ - {JS(ledger), - validation::Required{}, - validation::Type{}}, - {JS(seq), - validation::Required{}, - validation::Type{}}, - }}}; + {JS(ledger), validation::Required{}, validation::Type{}}, + {JS(seq), validation::Required{}, validation::Type{}}, + }}, + }; + return rpcSpec; } @@ -112,18 +104,12 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Marker const& marker); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Marker const& marker); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/NFTInfo.cpp b/src/rpc/ngHandlers/NFTInfo.cpp index 3d7aaa03..6f40efb3 100644 --- a/src/rpc/ngHandlers/NFTInfo.cpp +++ b/src/rpc/ngHandlers/NFTInfo.cpp @@ -34,20 +34,14 @@ NFTInfoHandler::process(NFTInfoHandler::Input input, Context const& ctx) const auto const tokenID = ripple::uint256{input.nftID.c_str()}; auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; auto const lgrInfo = std::get(lgrInfoOrStatus); - auto const maybeNft = - sharedPtrBackend_->fetchNFT(tokenID, lgrInfo.seq, ctx.yield); + auto const maybeNft = sharedPtrBackend_->fetchNFT(tokenID, lgrInfo.seq, ctx.yield); if (not maybeNft.has_value()) - return Error{ - Status{RippledError::rpcOBJECT_NOT_FOUND, "NFT not found"}}; + return Error{Status{RippledError::rpcOBJECT_NOT_FOUND, "NFT not found"}}; auto const& nft = *maybeNft; auto output = NFTInfoHandler::Output{}; @@ -69,10 +63,7 @@ NFTInfoHandler::process(NFTInfoHandler::Input input, Context const& ctx) const } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - NFTInfoHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, NFTInfoHandler::Output const& output) { // TODO: use JStrings when they become available auto object = boost::json::object{ @@ -95,9 +86,7 @@ tag_invoke( } NFTInfoHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); NFTInfoHandler::Input input; @@ -117,8 +106,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } diff --git a/src/rpc/ngHandlers/NFTInfo.h b/src/rpc/ngHandlers/NFTInfo.h index f94ad6e0..c65abc4c 100644 --- a/src/rpc/ngHandlers/NFTInfo.h +++ b/src/rpc/ngHandlers/NFTInfo.h @@ -40,12 +40,10 @@ public: uint32_t transferFee; std::string issuer; uint32_t taxon; - uint32_t - serial; // TODO: documented as 'nft_sequence' atm. - // https://github.com/XRPLF/xrpl-dev-portal/issues/1841 - std::optional - uri; // TODO: documented can be null vs. empty string - // https://github.com/XRPLF/xrpl-dev-portal/issues/1841 + uint32_t serial; // TODO: documented as 'nft_sequence' atm. + // https://github.com/XRPLF/xrpl-dev-portal/issues/1841 + std::optional uri; // TODO: documented can be null vs. empty string + // https://github.com/XRPLF/xrpl-dev-portal/issues/1841 // validated should be sent via framework bool validated = true; @@ -60,8 +58,7 @@ public: using Result = RPCng::HandlerReturnType; - NFTInfoHandler(std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + NFTInfoHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -69,12 +66,11 @@ public: spec() const { static auto const rpcSpec = RpcSpec{ - {JS(nft_id), - validation::Required{}, - validation::Uint256HexStringValidator}, + {JS(nft_id), validation::Required{}, validation::Uint256HexStringValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, }; + return rpcSpec; } @@ -83,10 +79,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/NFTOffersCommon.cpp b/src/rpc/ngHandlers/NFTOffersCommon.cpp index e6204cce..83ef3221 100644 --- a/src/rpc/ngHandlers/NFTOffersCommon.cpp +++ b/src/rpc/ngHandlers/NFTOffersCommon.cpp @@ -30,13 +30,9 @@ namespace ripple { // TODO: move to some common serialization impl place inline void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - SLE const& offer) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, SLE const& offer) { - auto amount = ::RPC::toBoostJson( - offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); + auto amount = ::RPC::toBoostJson(offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); boost::json::object obj = { {JS(nft_offer_index), to_string(offer.key())}, @@ -46,8 +42,7 @@ tag_invoke( }; if (offer.isFieldPresent(sfDestination)) - obj.insert_or_assign( - JS(destination), toBase58(offer.getAccountID(sfDestination))); + obj.insert_or_assign(JS(destination), toBase58(offer.getAccountID(sfDestination))); if (offer.isFieldPresent(sfExpiration)) obj.insert_or_assign(JS(expiration), offer.getFieldU32(sfExpiration)); @@ -67,20 +62,15 @@ NFTOffersHandlerBase::iterateOfferDirectory( boost::asio::yield_context& yield) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); - auto const lgrInfoOrStatus = getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + auto const lgrInfoOrStatus = + getLedgerInfoFromHashOrSeq(*sharedPtrBackend_, yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto const status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; auto const lgrInfo = std::get(lgrInfoOrStatus); // TODO: just check for existence without pulling - if (not sharedPtrBackend_->fetchLedgerObject( - directory.key, lgrInfo.seq, yield)) + if (not sharedPtrBackend_->fetchLedgerObject(directory.key, lgrInfo.seq, yield)) return Error{Status{RippledError::rpcOBJECT_NOT_FOUND, "notFound"}}; auto output = Output{input.nftID}; @@ -95,22 +85,16 @@ NFTOffersHandlerBase::iterateOfferDirectory( // We have a start point. Use limit - 1 from the result and use the // very last one for the resume. - auto const sle = - [this, &cursor, &lgrInfo, &yield]() -> std::shared_ptr { + auto const sle = [this, &cursor, &lgrInfo, &yield]() -> std::shared_ptr { auto const key = keylet::nftoffer(cursor).key; - if (auto const blob = sharedPtrBackend_->fetchLedgerObject( - key, lgrInfo.seq, yield); - blob) + if (auto const blob = sharedPtrBackend_->fetchLedgerObject(key, lgrInfo.seq, yield); blob) { - return std::make_shared( - SerialIter{blob->data(), blob->size()}, key); + return std::make_shared(SerialIter{blob->data(), blob->size()}, key); } return nullptr; }(); - if (!sle || - sle->getFieldU16(ripple::sfLedgerEntryType) != - ripple::ltNFTOKEN_OFFER || + if (!sle || sle->getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER || tokenID != sle->getFieldH256(ripple::sfNFTokenID)) { return Error{Status{RippledError::rpcINVALID_PARAMS}}; @@ -156,19 +140,13 @@ NFTOffersHandlerBase::iterateOfferDirectory( offers.pop_back(); } - std::move( - std::begin(offers), - std::end(offers), - std::back_inserter(output.offers)); + std::move(std::begin(offers), std::end(offers), std::back_inserter(output.offers)); return std::move(output); } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - NFTOffersHandlerBase::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, NFTOffersHandlerBase::Output const& output) { auto object = boost::json::object{ {JS(nft_id), output.nftID}, @@ -185,9 +163,7 @@ tag_invoke( } NFTOffersHandlerBase::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); NFTOffersHandlerBase::Input input; @@ -207,8 +183,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } diff --git a/src/rpc/ngHandlers/NFTOffersCommon.h b/src/rpc/ngHandlers/NFTOffersCommon.h index c4b95da8..1db9ba7c 100644 --- a/src/rpc/ngHandlers/NFTOffersCommon.h +++ b/src/rpc/ngHandlers/NFTOffersCommon.h @@ -53,8 +53,7 @@ public: using Result = RPCng::HandlerReturnType; - NFTOffersHandlerBase( - std::shared_ptr const& sharedPtrBackend) + NFTOffersHandlerBase(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -63,16 +62,13 @@ public: spec() const { static auto const rpcSpec = RpcSpec{ - {JS(nft_id), - validation::Required{}, - validation::Uint256HexStringValidator}, + {JS(nft_id), validation::Required{}, validation::Uint256HexStringValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, - {JS(limit), - validation::Type{}, - validation::Between{50, 500}}, + {JS(limit), validation::Type{}, validation::Between{50, 500}}, {JS(marker), validation::Uint256HexStringValidator}, }; + return rpcSpec; } @@ -86,10 +82,7 @@ protected: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/NFTSellOffers.cpp b/src/rpc/ngHandlers/NFTSellOffers.cpp index 94754907..cfe20c7e 100644 --- a/src/rpc/ngHandlers/NFTSellOffers.cpp +++ b/src/rpc/ngHandlers/NFTSellOffers.cpp @@ -28,9 +28,7 @@ using namespace ripple; namespace RPCng { NFTSellOffersHandler::Result -NFTSellOffersHandler::process( - NFTSellOffersHandler::Input input, - Context const& ctx) const +NFTSellOffersHandler::process(NFTSellOffersHandler::Input input, Context const& ctx) const { auto const tokenID = uint256{input.nftID.c_str()}; auto const directory = keylet::nft_sells(tokenID); diff --git a/src/rpc/ngHandlers/NFTSellOffers.h b/src/rpc/ngHandlers/NFTSellOffers.h index 0eef66a3..bee7c84c 100644 --- a/src/rpc/ngHandlers/NFTSellOffers.h +++ b/src/rpc/ngHandlers/NFTSellOffers.h @@ -26,8 +26,7 @@ namespace RPCng { class NFTSellOffersHandler : public NFTOffersHandlerBase { public: - NFTSellOffersHandler( - std::shared_ptr const& sharedPtrBackend) + NFTSellOffersHandler(std::shared_ptr const& sharedPtrBackend) : NFTOffersHandlerBase(sharedPtrBackend) { } diff --git a/src/rpc/ngHandlers/NoRippleCheck.cpp b/src/rpc/ngHandlers/NoRippleCheck.cpp index d604a1cd..64b203e5 100644 --- a/src/rpc/ngHandlers/NoRippleCheck.cpp +++ b/src/rpc/ngHandlers/NoRippleCheck.cpp @@ -26,17 +26,11 @@ namespace RPCng { NoRippleCheckHandler::Result -NoRippleCheckHandler::process( - NoRippleCheckHandler::Input input, - Context const& ctx) const +NoRippleCheckHandler::process(NoRippleCheckHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; @@ -44,29 +38,23 @@ NoRippleCheckHandler::process( auto const lgrInfo = std::get(lgrInfoOrStatus); auto const accountID = RPC::accountFromStringStrict(input.account); auto const keylet = ripple::keylet::account(*accountID).key; - auto const accountObj = - sharedPtrBackend_->fetchLedgerObject(keylet, lgrInfo.seq, ctx.yield); + auto const accountObj = sharedPtrBackend_->fetchLedgerObject(keylet, lgrInfo.seq, ctx.yield); if (!accountObj) - return Error{RPC::Status{ - RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; + return Error{RPC::Status{RPC::RippledError::rpcACT_NOT_FOUND, "accountNotFound"}}; ripple::SerialIter it{accountObj->data(), accountObj->size()}; ripple::SLE sle{it, keylet}; auto accountSeq = sle.getFieldU32(ripple::sfSequence); - bool const bDefaultRipple = - sle.getFieldU32(ripple::sfFlags) & ripple::lsfDefaultRipple; + bool const bDefaultRipple = sle.getFieldU32(ripple::sfFlags) & ripple::lsfDefaultRipple; - auto const fees = input.transactions - ? sharedPtrBackend_->fetchFees(lgrInfo.seq, ctx.yield) - : std::nullopt; + auto const fees = input.transactions ? sharedPtrBackend_->fetchFees(lgrInfo.seq, ctx.yield) : std::nullopt; auto output = NoRippleCheckHandler::Output(); if (input.transactions) output.transactions.emplace(boost::json::array()); - auto const getBaseTx = [&](ripple::AccountID const& accountID, - std::uint32_t accountSeq) { + auto const getBaseTx = [&](ripple::AccountID const& accountID, std::uint32_t accountSeq) { boost::json::object tx; tx[JS(Sequence)] = accountSeq; tx[JS(Account)] = ripple::toBase58(accountID); @@ -84,8 +72,7 @@ NoRippleCheckHandler::process( } else if (input.roleGateway && !bDefaultRipple) { - output.problems.push_back( - "You should immediately set your default ripple flag"); + output.problems.push_back("You should immediately set your default ripple flag"); if (input.transactions) { auto tx = getBaseTx(*accountID, accountSeq++); @@ -106,11 +93,10 @@ NoRippleCheckHandler::process( // don't push to result if limit is reached if (limit != 0 && ownedItem.getType() == ripple::ltRIPPLE_STATE) { - bool const bLow = accountID == - ownedItem.getFieldAmount(ripple::sfLowLimit).getIssuer(); + bool const bLow = accountID == ownedItem.getFieldAmount(ripple::sfLowLimit).getIssuer(); - bool const bNoRipple = ownedItem.getFieldU32(ripple::sfFlags) & - (bLow ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); + bool const bNoRipple = + ownedItem.getFieldU32(ripple::sfFlags) & (bLow ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple); std::string problem; bool needFix = false; @@ -130,28 +116,21 @@ NoRippleCheckHandler::process( { limit--; ripple::AccountID peer = - ownedItem - .getFieldAmount( - bLow ? ripple::sfHighLimit : ripple::sfLowLimit) - .getIssuer(); - ripple::STAmount peerLimit = ownedItem.getFieldAmount( - bLow ? ripple::sfHighLimit : ripple::sfLowLimit); + ownedItem.getFieldAmount(bLow ? ripple::sfHighLimit : ripple::sfLowLimit).getIssuer(); + ripple::STAmount peerLimit = + ownedItem.getFieldAmount(bLow ? ripple::sfHighLimit : ripple::sfLowLimit); problem += fmt::format( - "{} line to {}", - to_string(peerLimit.getCurrency()), - to_string(peerLimit.getIssuer())); + "{} line to {}", to_string(peerLimit.getCurrency()), to_string(peerLimit.getIssuer())); output.problems.emplace_back(problem); if (input.transactions) { - ripple::STAmount limitAmount(ownedItem.getFieldAmount( - bLow ? ripple::sfLowLimit : ripple::sfHighLimit)); + ripple::STAmount limitAmount( + ownedItem.getFieldAmount(bLow ? ripple::sfLowLimit : ripple::sfHighLimit)); limitAmount.setIssuer(peer); auto tx = getBaseTx(*accountID, accountSeq++); tx[JS(TransactionType)] = "TrustSet"; - tx[JS(LimitAmount)] = RPC::toBoostJson( - limitAmount.getJson(ripple::JsonOptions::none)); - tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple - : ripple::tfSetNoRipple; + tx[JS(LimitAmount)] = RPC::toBoostJson(limitAmount.getJson(ripple::JsonOptions::none)); + tx[JS(Flags)] = bNoRipple ? ripple::tfClearNoRipple : ripple::tfSetNoRipple; output.transactions->push_back(tx); } } @@ -165,9 +144,7 @@ NoRippleCheckHandler::process( } NoRippleCheckHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); NoRippleCheckHandler::Input input; @@ -193,8 +170,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str()); } } @@ -202,15 +178,10 @@ tag_invoke( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - NoRippleCheckHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, NoRippleCheckHandler::Output const& output) { auto obj = boost::json::object{ - {JS(ledger_hash), output.ledgerHash}, - {JS(ledger_index), output.ledgerIndex}, - {"problems", output.problems}}; + {JS(ledger_hash), output.ledgerHash}, {JS(ledger_index), output.ledgerIndex}, {"problems", output.problems}}; if (output.transactions) { obj.emplace(JS(transactions), *(output.transactions)); diff --git a/src/rpc/ngHandlers/NoRippleCheck.h b/src/rpc/ngHandlers/NoRippleCheck.h index d94b11b0..1ed8b497 100644 --- a/src/rpc/ngHandlers/NoRippleCheck.h +++ b/src/rpc/ngHandlers/NoRippleCheck.h @@ -54,8 +54,7 @@ public: using Result = RPCng::HandlerReturnType; - NoRippleCheckHandler( - std::shared_ptr const& sharedPtrBackend) + NoRippleCheckHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -69,15 +68,13 @@ public: validation::Required{}, validation::WithCustomError{ validation::OneOf{"gateway", "user"}, - RPC::Status{ - RPC::RippledError::rpcINVALID_PARAMS, - "role field is invalid"}}}, + RPC::Status{RPC::RippledError::rpcINVALID_PARAMS, "role field is invalid"}}}, {JS(ledger_hash), validation::Uint256HexStringValidator}, {JS(ledger_index), validation::LedgerIndexValidator}, - {JS(limit), - validation::Type(), - validation::Between{1, 500}}, - {JS(transactions), validation::Type()}}; + {JS(limit), validation::Type(), validation::Between{1, 500}}, + {JS(transactions), validation::Type()}, + }; + return rpcSpec; } @@ -86,10 +83,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/Random.cpp b/src/rpc/ngHandlers/Random.cpp index f7fd2089..73d6b176 100644 --- a/src/rpc/ngHandlers/Random.cpp +++ b/src/rpc/ngHandlers/Random.cpp @@ -35,10 +35,7 @@ RandomHandler::process() const } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - RandomHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, RandomHandler::Output const& output) { jv = { {JS(random), output.random}, diff --git a/src/rpc/ngHandlers/Random.h b/src/rpc/ngHandlers/Random.h index 4226794f..29ebecff 100644 --- a/src/rpc/ngHandlers/Random.h +++ b/src/rpc/ngHandlers/Random.h @@ -42,9 +42,6 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); }; } // namespace RPCng diff --git a/src/rpc/ngHandlers/TransactionEntry.cpp b/src/rpc/ngHandlers/TransactionEntry.cpp index 87a76907..6588f5ce 100644 --- a/src/rpc/ngHandlers/TransactionEntry.cpp +++ b/src/rpc/ngHandlers/TransactionEntry.cpp @@ -22,24 +22,17 @@ namespace RPCng { TransactionEntryHandler::Result -TransactionEntryHandler::process( - TransactionEntryHandler::Input input, - Context const& ctx) const +TransactionEntryHandler::process(TransactionEntryHandler::Input input, Context const& ctx) const { auto const range = sharedPtrBackend_->fetchLedgerRange(); auto const lgrInfoOrStatus = RPC::getLedgerInfoFromHashOrSeq( - *sharedPtrBackend_, - ctx.yield, - input.ledgerHash, - input.ledgerIndex, - range->maxSequence); + *sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence); if (auto status = std::get_if(&lgrInfoOrStatus)) return Error{*status}; auto const lgrInfo = std::get(lgrInfoOrStatus); - auto const dbRet = sharedPtrBackend_->fetchTransaction( - ripple::uint256{input.txHash.c_str()}, ctx.yield); + auto const dbRet = sharedPtrBackend_->fetchTransaction(ripple::uint256{input.txHash.c_str()}, ctx.yield); // Note: transaction_entry is meant to only search a specified ledger for // the specified transaction. tx searches the entire range of history. For // rippled, having two separate commands made sense, as tx would use SQLite @@ -50,10 +43,7 @@ TransactionEntryHandler::process( // ledger; we simulate that here by returning not found if the transaction // is in a different ledger than the one specified. if (!dbRet || dbRet->ledgerSequence != lgrInfo.seq) - return Error{RPC::Status{ - RPC::RippledError::rpcTXN_NOT_FOUND, - "transactionNotFound", - "Transaction not found."}}; + return Error{RPC::Status{RPC::RippledError::rpcTXN_NOT_FOUND, "transactionNotFound", "Transaction not found."}}; auto [txn, meta] = RPC::toExpandedJson(*dbRet); TransactionEntryHandler::Output output; @@ -65,10 +55,7 @@ TransactionEntryHandler::process( } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - TransactionEntryHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, TransactionEntryHandler::Output const& output) { jv = { {JS(metadata), output.metadata}, @@ -79,9 +66,7 @@ tag_invoke( } TransactionEntryHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { auto const& jsonObject = jv.as_object(); @@ -99,8 +84,7 @@ tag_invoke( } else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") { - input.ledgerIndex = - std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); + input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str()); } } return input; diff --git a/src/rpc/ngHandlers/TransactionEntry.h b/src/rpc/ngHandlers/TransactionEntry.h index 2bb329b5..188dd77c 100644 --- a/src/rpc/ngHandlers/TransactionEntry.h +++ b/src/rpc/ngHandlers/TransactionEntry.h @@ -50,8 +50,7 @@ public: using Result = RPCng::HandlerReturnType; - TransactionEntryHandler( - std::shared_ptr const& sharedPtrBackend) + TransactionEntryHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -60,11 +59,11 @@ public: spec() const { static auto const rpcSpec = RpcSpec{ - {JS(tx_hash), - validation::Required{}, - validation::Uint256HexStringValidator}, + {JS(tx_hash), validation::Required{}, validation::Uint256HexStringValidator}, {JS(ledger_hash), validation::Uint256HexStringValidator}, - {JS(ledger_index), validation::LedgerIndexValidator}}; + {JS(ledger_index), validation::LedgerIndexValidator}, + }; + return rpcSpec; } @@ -73,10 +72,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/rpc/ngHandlers/Tx.cpp b/src/rpc/ngHandlers/Tx.cpp index 5dd627c0..c6c77324 100644 --- a/src/rpc/ngHandlers/Tx.cpp +++ b/src/rpc/ngHandlers/Tx.cpp @@ -34,23 +34,20 @@ TxHandler::process(Input input, Context const& ctx) const return Error{RPC::Status{RPC::RippledError::rpcINVALID_LGR_RANGE}}; if (*input.maxLedger - *input.minLedger > maxLedgerRange) - return Error{ - RPC::Status{RPC::RippledError::rpcEXCESSIVE_LGR_RANGE}}; + return Error{RPC::Status{RPC::RippledError::rpcEXCESSIVE_LGR_RANGE}}; } TxHandler::Output output; - auto const dbResponse = sharedPtrBackend_->fetchTransaction( - ripple::uint256{std::string_view(input.transaction)}, ctx.yield); + auto const dbResponse = + sharedPtrBackend_->fetchTransaction(ripple::uint256{std::string_view(input.transaction)}, ctx.yield); if (!dbResponse) { if (rangeSupplied) { auto const range = sharedPtrBackend_->fetchLedgerRange(); - auto const searchedAll = range->maxSequence >= *input.maxLedger && - range->minSequence <= *input.minLedger; + auto const searchedAll = range->maxSequence >= *input.maxLedger && range->minSequence <= *input.minLedger; boost::json::object extra; extra["searched_all"] = searchedAll; - return Error{RPC::Status{ - RPC::RippledError::rpcTXN_NOT_FOUND, std::move(extra)}}; + return Error{RPC::Status{RPC::RippledError::rpcTXN_NOT_FOUND, std::move(extra)}}; } return Error{RPC::Status{RPC::RippledError::rpcTXN_NOT_FOUND}}; } @@ -75,10 +72,7 @@ TxHandler::process(Input input, Context const& ctx) const } void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - TxHandler::Output const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, TxHandler::Output const& output) { auto obj = boost::json::object{}; if (output.tx) @@ -98,9 +92,7 @@ tag_invoke( } TxHandler::Input -tag_invoke( - boost::json::value_to_tag, - boost::json::value const& jv) +tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) { TxHandler::Input input; auto const& jsonObject = jv.as_object(); diff --git a/src/rpc/ngHandlers/Tx.h b/src/rpc/ngHandlers/Tx.h index 0b0dc26f..95163aeb 100644 --- a/src/rpc/ngHandlers/Tx.h +++ b/src/rpc/ngHandlers/Tx.h @@ -52,8 +52,7 @@ public: using Result = RPCng::HandlerReturnType; - TxHandler(std::shared_ptr const& sharedPtrBackend) - : sharedPtrBackend_(sharedPtrBackend) + TxHandler(std::shared_ptr const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend) { } @@ -61,9 +60,7 @@ public: spec() const { static const RpcSpec rpcSpec = { - {"transaction", - validation::Required{}, - validation::Uint256HexStringValidator}, + {"transaction", validation::Required{}, validation::Uint256HexStringValidator}, {"binary", validation::Type{}}, {"min_ledger", validation::Type{}}, {"max_ledger", validation::Type{}}, @@ -77,10 +74,7 @@ public: private: friend void - tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - Output const& output); + tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output); friend Input tag_invoke(boost::json::value_to_tag, boost::json::value const& jv); diff --git a/src/subscriptions/SubscriptionManager.cpp b/src/subscriptions/SubscriptionManager.cpp index 273faafd..5da953f3 100644 --- a/src/subscriptions/SubscriptionManager.cpp +++ b/src/subscriptions/SubscriptionManager.cpp @@ -24,25 +24,19 @@ void Subscription::subscribe(std::shared_ptr const& session) { - boost::asio::post(strand_, [this, session]() { - addSession(session, subscribers_, subCount_); - }); + boost::asio::post(strand_, [this, session]() { addSession(session, subscribers_, subCount_); }); } void Subscription::unsubscribe(std::shared_ptr const& session) { - boost::asio::post(strand_, [this, session]() { - removeSession(session, subscribers_, subCount_); - }); + boost::asio::post(strand_, [this, session]() { removeSession(session, subscribers_, subCount_); }); } void Subscription::publish(std::shared_ptr const& message) { - boost::asio::post(strand_, [this, message]() { - sendToSubscribers(message, subscribers_, subCount_); - }); + boost::asio::post(strand_, [this, message]() { sendToSubscribers(message, subscribers_, subCount_); }); } boost::json::object @@ -70,26 +64,20 @@ getLedgerPubMessage( } boost::json::object -SubscriptionManager::subLedger( - boost::asio::yield_context& yield, - std::shared_ptr session) +SubscriptionManager::subLedger(boost::asio::yield_context& yield, std::shared_ptr session) { - subscribeHelper(session, ledgerSubscribers_, [this](session_ptr session) { - unsubLedger(session); - }); + subscribeHelper(session, ledgerSubscribers_, [this](session_ptr session) { unsubLedger(session); }); auto ledgerRange = backend_->fetchLedgerRange(); assert(ledgerRange); - auto lgrInfo = - backend_->fetchLedgerBySequence(ledgerRange->maxSequence, yield); + auto lgrInfo = backend_->fetchLedgerBySequence(ledgerRange->maxSequence, yield); assert(lgrInfo); std::optional fees; fees = backend_->fetchFees(lgrInfo->seq, yield); assert(fees); - std::string range = std::to_string(ledgerRange->minSequence) + "-" + - std::to_string(ledgerRange->maxSequence); + std::string range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence); auto pubMsg = getLedgerPubMessage(*lgrInfo, *fees, range, 0); pubMsg.erase("txn_count"); @@ -106,9 +94,7 @@ SubscriptionManager::unsubLedger(std::shared_ptr session) void SubscriptionManager::subTransactions(std::shared_ptr session) { - subscribeHelper(session, txSubscribers_, [this](session_ptr session) { - unsubTransactions(session); - }); + subscribeHelper(session, txSubscribers_, [this](session_ptr session) { unsubTransactions(session); }); } void @@ -118,42 +104,27 @@ SubscriptionManager::unsubTransactions(std::shared_ptr session) } void -SubscriptionManager::subAccount( - ripple::AccountID const& account, - std::shared_ptr& session) +SubscriptionManager::subAccount(ripple::AccountID const& account, std::shared_ptr& session) { - subscribeHelper( - session, - account, - accountSubscribers_, - [this, account](session_ptr session) { - unsubAccount(account, session); - }); + subscribeHelper(session, account, accountSubscribers_, [this, account](session_ptr session) { + unsubAccount(account, session); + }); } void -SubscriptionManager::unsubAccount( - ripple::AccountID const& account, - std::shared_ptr& session) +SubscriptionManager::unsubAccount(ripple::AccountID const& account, std::shared_ptr& session) { accountSubscribers_.unsubscribe(session, account); } void -SubscriptionManager::subBook( - ripple::Book const& book, - std::shared_ptr session) +SubscriptionManager::subBook(ripple::Book const& book, std::shared_ptr session) { - subscribeHelper( - session, book, bookSubscribers_, [this, book](session_ptr session) { - unsubBook(book, session); - }); + subscribeHelper(session, book, bookSubscribers_, [this, book](session_ptr session) { unsubBook(book, session); }); } void -SubscriptionManager::unsubBook( - ripple::Book const& book, - std::shared_ptr session) +SubscriptionManager::unsubBook(ripple::Book const& book, std::shared_ptr session) { bookSubscribers_.unsubscribe(session, book); } @@ -161,10 +132,7 @@ SubscriptionManager::unsubBook( void SubscriptionManager::subBookChanges(std::shared_ptr session) { - subscribeHelper( - session, bookChangesSubscribers_, [this](session_ptr session) { - unsubBookChanges(session); - }); + subscribeHelper(session, bookChangesSubscribers_, [this](session_ptr session) { unsubBookChanges(session); }); } void @@ -180,31 +148,27 @@ SubscriptionManager::pubLedger( std::string const& ledgerRange, std::uint32_t txnCount) { - auto message = std::make_shared(boost::json::serialize( - getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount))); + auto message = + std::make_shared(boost::json::serialize(getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount))); ledgerSubscribers_.publish(message); } void -SubscriptionManager::pubTransaction( - Backend::TransactionAndMetadata const& blobs, - ripple::LedgerInfo const& lgrInfo) +SubscriptionManager::pubTransaction(Backend::TransactionAndMetadata const& blobs, ripple::LedgerInfo const& lgrInfo) { auto [tx, meta] = RPC::deserializeTxPlusMeta(blobs, lgrInfo.seq); boost::json::object pubObj; pubObj["transaction"] = RPC::toJson(*tx); pubObj["meta"] = RPC::toJson(*meta); - RPC::insertDeliveredAmount( - pubObj["meta"].as_object(), tx, meta, blobs.date); + RPC::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta, blobs.date); pubObj["type"] = "transaction"; pubObj["validated"] = true; pubObj["status"] = "closed"; pubObj["ledger_index"] = lgrInfo.seq; pubObj["ledger_hash"] = ripple::strHex(lgrInfo.hash); - pubObj["transaction"].as_object()["date"] = - lgrInfo.closeTime.time_since_epoch().count(); + pubObj["transaction"].as_object()["date"] = lgrInfo.closeTime.time_since_epoch().count(); pubObj["engine_result_code"] = meta->getResult(); std::string token; @@ -221,15 +185,13 @@ SubscriptionManager::pubTransaction( ripple::STAmount ownerFunds; auto fetchFundsSynchronous = [&]() { Backend::synchronous([&](boost::asio::yield_context& yield) { - ownerFunds = RPC::accountFunds( - *backend_, lgrInfo.seq, amount, account, yield); + ownerFunds = RPC::accountFunds(*backend_, lgrInfo.seq, amount, account, yield); }); }; Backend::retryOnTimeout(fetchFundsSynchronous); - pubObj["transaction"].as_object()["owner_funds"] = - ownerFunds.getText(); + pubObj["transaction"].as_object()["owner_funds"] = ownerFunds.getText(); } } @@ -260,11 +222,9 @@ SubscriptionManager::pubTransaction( if (field) { - auto data = dynamic_cast( - node.peekAtPField(*field)); + auto data = dynamic_cast(node.peekAtPField(*field)); - if (data && data->isFieldPresent(ripple::sfTakerPays) && - data->isFieldPresent(ripple::sfTakerGets)) + if (data && data->isFieldPresent(ripple::sfTakerPays) && data->isFieldPresent(ripple::sfTakerGets)) { // determine the OrderBook ripple::Book book{ @@ -287,14 +247,12 @@ SubscriptionManager::pubBookChanges( std::vector const& transactions) { auto const json = RPC::computeBookChanges(lgrInfo, transactions); - auto const bookChangesMsg = - std::make_shared(boost::json::serialize(json)); + auto const bookChangesMsg = std::make_shared(boost::json::serialize(json)); bookChangesSubscribers_.publish(bookChangesMsg); } void -SubscriptionManager::forwardProposedTransaction( - boost::json::object const& response) +SubscriptionManager::forwardProposedTransaction(boost::json::object const& response) { auto pubMsg = std::make_shared(boost::json::serialize(response)); txProposedSubscribers_.publish(pubMsg); @@ -321,25 +279,17 @@ SubscriptionManager::forwardValidation(boost::json::object const& response) } void -SubscriptionManager::subProposedAccount( - ripple::AccountID const& account, - std::shared_ptr session) +SubscriptionManager::subProposedAccount(ripple::AccountID const& account, std::shared_ptr session) { - subscribeHelper( - session, - account, - accountProposedSubscribers_, - [this, account](session_ptr session) { - unsubProposedAccount(account, session); - }); + subscribeHelper(session, account, accountProposedSubscribers_, [this, account](session_ptr session) { + unsubProposedAccount(account, session); + }); } void SubscriptionManager::subManifest(std::shared_ptr session) { - subscribeHelper(session, manifestSubscribers_, [this](session_ptr session) { - unsubManifest(session); - }); + subscribeHelper(session, manifestSubscribers_, [this](session_ptr session) { unsubManifest(session); }); } void @@ -351,10 +301,7 @@ SubscriptionManager::unsubManifest(std::shared_ptr session) void SubscriptionManager::subValidation(std::shared_ptr session) { - subscribeHelper( - session, validationsSubscribers_, [this](session_ptr session) { - unsubValidation(session); - }); + subscribeHelper(session, validationsSubscribers_, [this](session_ptr session) { unsubValidation(session); }); } void @@ -364,9 +311,7 @@ SubscriptionManager::unsubValidation(std::shared_ptr session) } void -SubscriptionManager::unsubProposedAccount( - ripple::AccountID const& account, - std::shared_ptr session) +SubscriptionManager::unsubProposedAccount(ripple::AccountID const& account, std::shared_ptr session) { accountProposedSubscribers_.unsubscribe(session, account); } @@ -375,9 +320,7 @@ void SubscriptionManager::subProposedTransactions(std::shared_ptr session) { subscribeHelper( - session, txProposedSubscribers_, [this](session_ptr session) { - unsubProposedTransactions(session); - }); + session, txProposedSubscribers_, [this](session_ptr session) { unsubProposedTransactions(session); }); } void @@ -386,10 +329,7 @@ SubscriptionManager::unsubProposedTransactions(std::shared_ptr session) txProposedSubscribers_.unsubscribe(session); } void -SubscriptionManager::subscribeHelper( - std::shared_ptr& session, - Subscription& subs, - CleanupFunction&& func) +SubscriptionManager::subscribeHelper(std::shared_ptr& session, Subscription& subs, CleanupFunction&& func) { subs.subscribe(session); std::scoped_lock lk(cleanupMtx_); diff --git a/src/subscriptions/SubscriptionManager.h b/src/subscriptions/SubscriptionManager.h index 70ef3d7b..cebb60bc 100644 --- a/src/subscriptions/SubscriptionManager.h +++ b/src/subscriptions/SubscriptionManager.h @@ -106,10 +106,7 @@ public: template inline void -sendToSubscribers( - std::shared_ptr const& message, - T& subscribers, - std::atomic_uint64_t& counter) +sendToSubscribers(std::shared_ptr const& message, T& subscribers, std::atomic_uint64_t& counter) { for (auto it = subscribers.begin(); it != subscribers.end();) { @@ -129,10 +126,7 @@ sendToSubscribers( template inline void -addSession( - std::shared_ptr session, - T& subscribers, - std::atomic_uint64_t& counter) +addSession(std::shared_ptr session, T& subscribers, std::atomic_uint64_t& counter) { if (!subscribers.contains(session)) { @@ -143,10 +137,7 @@ addSession( template inline void -removeSession( - std::shared_ptr session, - T& subscribers, - std::atomic_uint64_t& counter) +removeSession(std::shared_ptr session, T& subscribers, std::atomic_uint64_t& counter) { if (subscribers.contains(session)) { @@ -157,20 +148,14 @@ removeSession( template void -SubscriptionMap::subscribe( - std::shared_ptr const& session, - Key const& account) +SubscriptionMap::subscribe(std::shared_ptr const& session, Key const& account) { - boost::asio::post(strand_, [this, session, account]() { - addSession(session, subscribers_[account], subCount_); - }); + boost::asio::post(strand_, [this, session, account]() { addSession(session, subscribers_[account], subCount_); }); } template void -SubscriptionMap::unsubscribe( - std::shared_ptr const& session, - Key const& account) +SubscriptionMap::unsubscribe(std::shared_ptr const& session, Key const& account) { boost::asio::post(strand_, [this, account, session]() { if (!subscribers_.contains(account)) @@ -192,9 +177,7 @@ SubscriptionMap::unsubscribe( template void -SubscriptionMap::publish( - std::shared_ptr const& message, - Key const& account) +SubscriptionMap::publish(std::shared_ptr const& message, Key const& account) { boost::asio::post(strand_, [this, account, message]() { if (!subscribers_.contains(account)) @@ -228,17 +211,13 @@ class SubscriptionManager public: static std::shared_ptr - make_SubscriptionManager( - clio::Config const& config, - std::shared_ptr const& b) + make_SubscriptionManager(clio::Config const& config, std::shared_ptr const& b) { auto numThreads = config.valueOr("subscription_workers", 1); return std::make_shared(numThreads, b); } - SubscriptionManager( - std::uint64_t numThreads, - std::shared_ptr const& b) + SubscriptionManager(std::uint64_t numThreads, std::shared_ptr const& b) : ledgerSubscribers_(ioc_) , txSubscribers_(ioc_) , txProposedSubscribers_(ioc_) @@ -255,8 +234,7 @@ public: // We will eventually want to clamp this to be the number of strands, // since adding more threads than we have strands won't see any // performance benefits - log_.info() << "Starting subscription manager with " << numThreads - << " workers"; + log_.info() << "Starting subscription manager with " << numThreads << " workers"; workers_.reserve(numThreads); for (auto i = numThreads; i > 0; --i) @@ -283,9 +261,7 @@ public: std::uint32_t txnCount); void - pubBookChanges( - ripple::LedgerInfo const& lgrInfo, - std::vector const& transactions); + pubBookChanges(ripple::LedgerInfo const& lgrInfo, std::vector const& transactions); void unsubLedger(session_ptr session); @@ -297,9 +273,7 @@ public: unsubTransactions(session_ptr session); void - pubTransaction( - Backend::TransactionAndMetadata const& blobs, - ripple::LedgerInfo const& lgrInfo); + pubTransaction(Backend::TransactionAndMetadata const& blobs, ripple::LedgerInfo const& lgrInfo); void subAccount(ripple::AccountID const& account, session_ptr& session); @@ -380,18 +354,11 @@ private: using CleanupFunction = std::function; void - subscribeHelper( - std::shared_ptr& session, - Subscription& subs, - CleanupFunction&& func); + subscribeHelper(std::shared_ptr& session, Subscription& subs, CleanupFunction&& func); template void - subscribeHelper( - std::shared_ptr& session, - Key const& k, - SubscriptionMap& subs, - CleanupFunction&& func); + subscribeHelper(std::shared_ptr& session, Key const& k, SubscriptionMap& subs, CleanupFunction&& func); /** * This is how we chose to cleanup subscriptions that have been closed. @@ -400,6 +367,5 @@ private: * closed. */ std::mutex cleanupMtx_; - std::unordered_map> - cleanupFuncs_ = {}; + std::unordered_map> cleanupFuncs_ = {}; }; diff --git a/src/util/Expected.h b/src/util/Expected.h index 2a2afee2..c9d1e517 100644 --- a/src/util/Expected.h +++ b/src/util/Expected.h @@ -143,16 +143,12 @@ class Expected : private boost::outcome_v2::result using Base = boost::outcome_v2::result; public: - template < - typename U, - typename = std::enable_if_t>> + template >> constexpr Expected(U r) : Base(T(std::forward(r))) { } - template < - typename U, - typename = std::enable_if_t>> + template >> constexpr Expected(Unexpected e) : Base(E(std::forward(e.value()))) { } @@ -223,8 +219,7 @@ public: // Specialization of Expected. Allows returning either success // (without a value) or the reason for the failure. template -class [[nodiscard]] Expected - : private boost::outcome_v2::result +class [[nodiscard]] Expected : private boost::outcome_v2::result { using Base = boost::outcome_v2::result; @@ -235,9 +230,7 @@ public: { } - template < - typename U, - typename = std::enable_if_t>> + template >> constexpr Expected(Unexpected e) : Base(E(std::forward(e.value()))) { } diff --git a/src/util/Profiler.h b/src/util/Profiler.h index ff89f5bb..8d8bde13 100644 --- a/src/util/Profiler.h +++ b/src/util/Profiler.h @@ -42,16 +42,12 @@ timed(F&& func) if constexpr (std::is_same_v) { func(); - return std::chrono::duration_cast( - std::chrono::system_clock::now() - start) - .count(); + return std::chrono::duration_cast(std::chrono::system_clock::now() - start).count(); } else { auto ret = func(); - auto elapsed = std::chrono::duration_cast( - std::chrono::system_clock::now() - start) - .count(); + auto elapsed = std::chrono::duration_cast(std::chrono::system_clock::now() - start).count(); return std::make_pair(ret, elapsed); } } diff --git a/src/util/Taggable.cpp b/src/util/Taggable.cpp index cb136367..07893c2f 100644 --- a/src/util/Taggable.cpp +++ b/src/util/Taggable.cpp @@ -56,11 +56,9 @@ TagDecoratorFactory::make() const switch (type_) { case Type::UINT: - return std::make_unique>( - parent_); + return std::make_unique>(parent_); case Type::UUID: - return std::make_unique>( - parent_); + return std::make_unique>(parent_); case Type::NONE: default: return std::make_unique>(); diff --git a/src/util/Taggable.h b/src/util/Taggable.h index 7c899a74..e74058eb 100644 --- a/src/util/Taggable.h +++ b/src/util/Taggable.h @@ -103,8 +103,7 @@ public: template class TagDecorator final : public BaseTagDecorator { - using parent_t = - std::optional>; + using parent_t = std::optional>; using tag_t = typename Generator::tag_t; parent_t parent_ = std::nullopt; @@ -169,8 +168,7 @@ public: */ class TagDecoratorFactory final { - using parent_t = - std::optional>; + using parent_t = std::optional>; /** * @brief Represents the type of tag decorator @@ -181,7 +179,7 @@ class TagDecoratorFactory final UINT /*! atomic_uint64_t tag, thread-safe, lock-free */ }; - Type type_; /*! The type of TagDecorator this factory produces */ + Type type_; /*! The type of TagDecorator this factory produces */ parent_t parent_ = std::nullopt; /*! The parent tag decorator to bind */ public: @@ -191,14 +189,12 @@ public: * @brief Instantiates a tag decorator factory from `clio` configuration. * @param config The configuration as a json object */ - explicit TagDecoratorFactory(clio::Config const& config) - : type_{config.valueOr("log_tag_style", Type::NONE)} + explicit TagDecoratorFactory(clio::Config const& config) : type_{config.valueOr("log_tag_style", Type::NONE)} { } private: - TagDecoratorFactory(Type type, parent_t parent) noexcept - : type_{type}, parent_{parent} + TagDecoratorFactory(Type type, parent_t parent) noexcept : type_{type}, parent_{parent} { } @@ -257,8 +253,7 @@ protected: * @brief New Taggable from a specified factory * @param tagFactory The factory to use */ - explicit Taggable(util::TagDecoratorFactory const& tagFactory) - : tagDecorator_{tagFactory.make()} + explicit Taggable(util::TagDecoratorFactory const& tagFactory) : tagDecorator_{tagFactory.make()} { } diff --git a/src/webserver/DOSGuard.h b/src/webserver/DOSGuard.h index 502b3fb4..2e829f15 100644 --- a/src/webserver/DOSGuard.h +++ b/src/webserver/DOSGuard.h @@ -117,10 +117,8 @@ public: auto [transferedByte, requests] = ipState_.at(ip); if (transferedByte > maxFetches_ || requests > maxRequestCount_) { - log_.warn() - << "Dosguard:Client surpassed the rate limit. ip = " - << ip << " Transfered Byte:" << transferedByte - << " Requests:" << requests; + log_.warn() << "Dosguard:Client surpassed the rate limit. ip = " << ip + << " Transfered Byte:" << transferedByte << " Requests:" << requests; return false; } } @@ -129,9 +127,8 @@ public: { if (it->second > maxConnCount_) { - log_.warn() - << "Dosguard:Client surpassed the rate limit. ip = " - << ip << " Concurrent connection:" << it->second; + log_.warn() << "Dosguard:Client surpassed the rate limit. ip = " << ip + << " Concurrent connection:" << it->second; return false; } } @@ -238,9 +235,7 @@ private: { using T = std::unordered_set const; auto whitelist = config.arrayOr("dos_guard.whitelist", {}); - auto const transform = [](auto const& elem) { - return elem.template value(); - }; + auto const transform = [](auto const& elem) { return elem.template value(); }; return T{ boost::transform_iterator(std::begin(whitelist), transform), boost::transform_iterator(std::end(whitelist), transform)}; @@ -265,13 +260,8 @@ public: * @param config Clio config * @param ctx The boost::asio::io_context */ - IntervalSweepHandler( - clio::Config const& config, - boost::asio::io_context& ctx) - : sweepInterval_{std::max( - 1u, - static_cast( - config.valueOr("dos_guard.sweep_interval", 1.0) * 1000.0))} + IntervalSweepHandler(clio::Config const& config, boost::asio::io_context& ctx) + : sweepInterval_{std::max(1u, static_cast(config.valueOr("dos_guard.sweep_interval", 1.0) * 1000.0))} , ctx_{std::ref(ctx)} { } diff --git a/src/webserver/HttpBase.h b/src/webserver/HttpBase.h index 33172ed4..a7d6e804 100644 --- a/src/webserver/HttpBase.h +++ b/src/webserver/HttpBase.h @@ -89,8 +89,7 @@ class HttpBase : public util::Taggable // The lifetime of the message has to extend // for the duration of the async operation so // we use a shared_ptr to manage it. - auto sp = std::make_shared>( - std::move(msg)); + auto sp = std::make_shared>(std::move(msg)); // Store a type-erased version of the shared // pointer in the class to keep it alive. @@ -101,9 +100,7 @@ class HttpBase : public util::Taggable self_.derived().stream(), *sp, boost::beast::bind_front_handler( - &HttpBase::on_write, - self_.derived().shared_from_this(), - sp->need_eof())); + &HttpBase::on_write, self_.derived().shared_from_this(), sp->need_eof())); } }; @@ -160,9 +157,7 @@ protected: { ec_ = ec; perfLog_.info() << tag() << ": " << what << ": " << ec.message(); - boost::beast::get_lowest_layer(derived().stream()) - .socket() - .close(ec); + boost::beast::get_lowest_layer(derived().stream()).socket().close(ec); } } @@ -215,16 +210,14 @@ public: req_ = {}; // Set the timeout. - boost::beast::get_lowest_layer(derived().stream()) - .expires_after(std::chrono::seconds(30)); + boost::beast::get_lowest_layer(derived().stream()).expires_after(std::chrono::seconds(30)); // Read a request http::async_read( derived().stream(), buffer_, req_, - boost::beast::bind_front_handler( - &HttpBase::on_read, derived().shared_from_this())); + boost::beast::bind_front_handler(&HttpBase::on_read, derived().shared_from_this())); } void @@ -246,13 +239,9 @@ public: return; } - auto const httpResponse = [&](http::status status, - std::string content_type, - std::string message) { + auto const httpResponse = [&](http::status status, std::string content_type, std::string message) { http::response res{status, req_.version()}; - res.set( - http::field::server, - "clio-server-" + Build::getClioVersionString()); + res.set(http::field::server, "clio-server-" + Build::getClioVersionString()); res.set(http::field::content_type, content_type); res.keep_alive(req_.keep_alive()); res.body() = std::string(message); @@ -287,14 +276,10 @@ public: // connection limit if (!dosGuard_.request(ip.value())) { - return lambda_(httpResponse( - http::status::service_unavailable, - "text/plain", - "Server is overloaded")); + return lambda_(httpResponse(http::status::service_unavailable, "text/plain", "Server is overloaded")); } - log_.info() << tag() << "Received request from ip = " << *ip - << " - posting to WorkQueue"; + log_.info() << tag() << "Received request from ip = " << *ip << " - posting to WorkQueue"; auto session = derived().shared_from_this(); @@ -324,16 +309,12 @@ public: lambda_(httpResponse( http::status::ok, "application/json", - boost::json::serialize( - RPC::makeError(RPC::RippledError::rpcTOO_BUSY)))); + boost::json::serialize(RPC::makeError(RPC::RippledError::rpcTOO_BUSY)))); } } void - on_write( - bool close, - boost::beast::error_code ec, - std::size_t bytes_transferred) + on_write(bool close, boost::beast::error_code ec, std::size_t bytes_transferred) { boost::ignore_unused(bytes_transferred); @@ -363,8 +344,7 @@ template void handle_request( boost::asio::yield_context& yc, - boost::beast::http:: - request>&& req, + boost::beast::http::request>&& req, Send&& send, std::shared_ptr backend, std::shared_ptr subscriptions, @@ -377,14 +357,9 @@ handle_request( std::shared_ptr http, clio::Logger& perfLog) { - auto const httpResponse = [&req]( - http::status status, - std::string content_type, - std::string message) { + auto const httpResponse = [&req](http::status status, std::string content_type, std::string message) { http::response res{status, req.version()}; - res.set( - http::field::server, - "clio-server-" + Build::getClioVersionString()); + res.set(http::field::server, "clio-server-" + Build::getClioVersionString()); res.set(http::field::content_type, content_type); res.keep_alive(req.keep_alive()); res.body() = std::string(message); @@ -399,14 +374,11 @@ handle_request( } if (req.method() != http::verb::post) - return send(httpResponse( - http::status::bad_request, "text/html", "Expected a POST request")); + return send(httpResponse(http::status::bad_request, "text/html", "Expected a POST request")); try { - perfLog.debug() << http->tag() - << "http received request from work queue: " - << req.body(); + perfLog.debug() << http->tag() << "http received request from work queue: " << req.body(); boost::json::object request; std::string responseStr = ""; @@ -422,8 +394,7 @@ handle_request( return send(httpResponse( http::status::ok, "application/json", - boost::json::serialize( - RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); + boost::json::serialize(RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); } auto range = backend->fetchLedgerRange(); @@ -431,8 +402,7 @@ handle_request( return send(httpResponse( http::status::ok, "application/json", - boost::json::serialize( - RPC::makeError(RPC::RippledError::rpcNOT_READY)))); + boost::json::serialize(RPC::makeError(RPC::RippledError::rpcNOT_READY)))); std::optional context = RPC::make_HttpContext( yc, @@ -450,12 +420,10 @@ handle_request( return send(httpResponse( http::status::ok, "application/json", - boost::json::serialize( - RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); + boost::json::serialize(RPC::makeError(RPC::RippledError::rpcBAD_SYNTAX)))); boost::json::object response; - auto [v, timeDiff] = - util::timed([&]() { return RPC::buildResponse(*context); }); + auto [v, timeDiff] = util::timed([&]() { return RPC::buildResponse(*context); }); auto us = std::chrono::duration(timeDiff); RPC::logDuration(*context, us); @@ -467,8 +435,7 @@ handle_request( error["request"] = request; response["result"] = error; - perfLog.debug() - << http->tag() << "Encountered error: " << responseStr; + perfLog.debug() << http->tag() << "Encountered error: " << responseStr; } else { @@ -502,8 +469,7 @@ handle_request( // reserialize when we need to include this warning responseStr = boost::json::serialize(response); } - return send( - httpResponse(http::status::ok, "application/json", responseStr)); + return send(httpResponse(http::status::ok, "application/json", responseStr)); } catch (std::exception const& e) { @@ -511,7 +477,6 @@ handle_request( return send(httpResponse( http::status::internal_server_error, "application/json", - boost::json::serialize( - RPC::makeError(RPC::RippledError::rpcINTERNAL)))); + boost::json::serialize(RPC::makeError(RPC::RippledError::rpcINTERNAL)))); } } diff --git a/src/webserver/HttpSession.h b/src/webserver/HttpSession.h index 7c0eac66..7e9eca26 100644 --- a/src/webserver/HttpSession.h +++ b/src/webserver/HttpSession.h @@ -27,8 +27,7 @@ namespace ssl = boost::asio::ssl; using tcp = boost::asio::ip::tcp; // Handles an HTTP server connection -class HttpSession : public HttpBase, - public std::enable_shared_from_this +class HttpSession : public HttpBase, public std::enable_shared_from_this { boost::beast::tcp_stream stream_; std::optional ip_; @@ -102,10 +101,7 @@ public: // on the I/O objects in this HttpSession. Although not strictly // necessary for single-threaded contexts, this example code is written // to be thread-safe by default. - net::dispatch( - stream_.get_executor(), - boost::beast::bind_front_handler( - &HttpBase::do_read, shared_from_this())); + net::dispatch(stream_.get_executor(), boost::beast::bind_front_handler(&HttpBase::do_read, shared_from_this())); } void diff --git a/src/webserver/Listener.h b/src/webserver/Listener.h index 200ce313..330382ab 100644 --- a/src/webserver/Listener.h +++ b/src/webserver/Listener.h @@ -36,11 +36,9 @@ class SubscriptionManager; template -class Detector - : public std::enable_shared_from_this> +class Detector : public std::enable_shared_from_this> { - using std::enable_shared_from_this< - Detector>::shared_from_this; + using std::enable_shared_from_this>::shared_from_this; clio::Logger log_{"WebServer"}; boost::asio::io_context& ioc_; @@ -97,14 +95,9 @@ public: run() { // Set the timeout. - boost::beast::get_lowest_layer(stream_).expires_after( - std::chrono::seconds(30)); + boost::beast::get_lowest_layer(stream_).expires_after(std::chrono::seconds(30)); // Detect a TLS handshake - async_detect_ssl( - stream_, - buffer_, - boost::beast::bind_front_handler( - &Detector::on_detect, shared_from_this())); + async_detect_ssl(stream_, buffer_, boost::beast::bind_front_handler(&Detector::on_detect, shared_from_this())); } void @@ -219,11 +212,9 @@ make_websocket_session( } template -class Listener - : public std::enable_shared_from_this> +class Listener : public std::enable_shared_from_this> { - using std::enable_shared_from_this< - Listener>::shared_from_this; + using std::enable_shared_from_this>::shared_from_this; clio::Logger log_{"WebServer"}; boost::asio::io_context& ioc_; @@ -279,8 +270,7 @@ public: acceptor_.bind(endpoint, ec); if (ec) { - log_.error() << "Failed to bind to endpoint: " << endpoint - << ". message: " << ec.message(); + log_.error() << "Failed to bind to endpoint: " << endpoint << ". message: " << ec.message(); throw std::runtime_error("Failed to bind to specified endpoint"); } @@ -288,8 +278,7 @@ public: acceptor_.listen(net::socket_base::max_listen_connections, ec); if (ec) { - log_.error() << "Failed to listen at endpoint: " << endpoint - << ". message: " << ec.message(); + log_.error() << "Failed to listen at endpoint: " << endpoint << ". message: " << ec.message(); throw std::runtime_error("Failed to listen at specified endpoint"); } } @@ -307,9 +296,7 @@ private: { // The new connection gets its own strand acceptor_.async_accept( - net::make_strand(ioc_), - boost::beast::bind_front_handler( - &Listener::on_accept, shared_from_this())); + net::make_strand(ioc_), boost::beast::bind_front_handler(&Listener::on_accept, shared_from_this())); } void @@ -317,10 +304,7 @@ private: { if (!ec) { - auto ctxRef = ctx_ - ? std::optional< - std::reference_wrapper>{ctx_.value()} - : std::nullopt; + auto ctxRef = ctx_ ? std::optional>{ctx_.value()} : std::nullopt; // Create the detector session and run it std::make_shared>( ioc_, @@ -363,16 +347,12 @@ make_HttpServer( return nullptr; auto const serverConfig = config.section("server"); - auto const address = - boost::asio::ip::make_address(serverConfig.value("ip")); + auto const address = boost::asio::ip::make_address(serverConfig.value("ip")); auto const port = serverConfig.value("port"); - auto const numThreads = config.valueOr( - "workers", std::thread::hardware_concurrency()); - auto const maxQueueSize = - serverConfig.valueOr("max_queue_size", 0); // 0 is no limit + auto const numThreads = config.valueOr("workers", std::thread::hardware_concurrency()); + auto const maxQueueSize = serverConfig.valueOr("max_queue_size", 0); // 0 is no limit - log.info() << "Number of workers = " << numThreads - << ". Max queue size = " << maxQueueSize; + log.info() << "Number of workers = " << numThreads << ". Max queue size = " << maxQueueSize; auto server = std::make_shared( ioc, diff --git a/src/webserver/PlainWsSession.h b/src/webserver/PlainWsSession.h index c2b299a2..4aa42243 100644 --- a/src/webserver/PlainWsSession.h +++ b/src/webserver/PlainWsSession.h @@ -175,9 +175,7 @@ public: // thread-safe by default. net::dispatch( - http_.get_executor(), - boost::beast::bind_front_handler( - &WsUpgrader::do_upgrade, shared_from_this())); + http_.get_executor(), boost::beast::bind_front_handler(&WsUpgrader::do_upgrade, shared_from_this())); } private: @@ -191,8 +189,7 @@ private: parser_->body_limit(10000); // Set the timeout. - boost::beast::get_lowest_layer(http_).expires_after( - std::chrono::seconds(30)); + boost::beast::get_lowest_layer(http_).expires_after(std::chrono::seconds(30)); on_upgrade(); } diff --git a/src/webserver/Ssl.h b/src/webserver/Ssl.h index dd64a1a0..1ef4bd9f 100644 --- a/src/webserver/Ssl.h +++ b/src/webserver/Ssl.h @@ -49,15 +49,11 @@ parse_certs(const char* certFilename, const char* keyFilename) ssl::context ctx{ssl::context::tlsv12}; - ctx.set_options( - boost::asio::ssl::context::default_workarounds | - boost::asio::ssl::context::no_sslv2); + ctx.set_options(boost::asio::ssl::context::default_workarounds | boost::asio::ssl::context::no_sslv2); ctx.use_certificate_chain(boost::asio::buffer(cert.data(), cert.size())); - ctx.use_private_key( - boost::asio::buffer(key.data(), key.size()), - boost::asio::ssl::context::file_format::pem); + ctx.use_private_key(boost::asio::buffer(key.data(), key.size()), boost::asio::ssl::context::file_format::pem); return ctx; } diff --git a/src/webserver/SslHttpSession.h b/src/webserver/SslHttpSession.h index 34d3b76b..2b977e44 100644 --- a/src/webserver/SslHttpSession.h +++ b/src/webserver/SslHttpSession.h @@ -27,8 +27,7 @@ namespace ssl = boost::asio::ssl; using tcp = boost::asio::ip::tcp; // Handles an HTTPS server connection -class SslHttpSession : public HttpBase, - public std::enable_shared_from_this +class SslHttpSession : public HttpBase, public std::enable_shared_from_this { boost::beast::ssl_stream stream_; std::optional ip_; @@ -63,11 +62,7 @@ public: { try { - ip_ = stream_.next_layer() - .socket() - .remote_endpoint() - .address() - .to_string(); + ip_ = stream_.next_layer().socket().remote_endpoint().address().to_string(); } catch (std::exception const&) { @@ -108,16 +103,14 @@ public: // on the I/O objects in this session. net::dispatch(stream_.get_executor(), [self]() { // Set the timeout. - boost::beast::get_lowest_layer(self->stream()) - .expires_after(std::chrono::seconds(30)); + boost::beast::get_lowest_layer(self->stream()).expires_after(std::chrono::seconds(30)); // Perform the SSL handshake // Note, this is the buffered version of the handshake. self->stream_.async_handshake( ssl::stream_base::server, self->buffer_.data(), - boost::beast::bind_front_handler( - &SslHttpSession::on_handshake, self)); + boost::beast::bind_front_handler(&SslHttpSession::on_handshake, self)); }); } @@ -136,12 +129,10 @@ public: do_close() { // Set the timeout. - boost::beast::get_lowest_layer(stream_).expires_after( - std::chrono::seconds(30)); + boost::beast::get_lowest_layer(stream_).expires_after(std::chrono::seconds(30)); // Perform the SSL shutdown - stream_.async_shutdown(boost::beast::bind_front_handler( - &SslHttpSession::on_shutdown, shared_from_this())); + stream_.async_shutdown(boost::beast::bind_front_handler(&SslHttpSession::on_shutdown, shared_from_this())); } void diff --git a/src/webserver/SslWsSession.h b/src/webserver/SslWsSession.h index 00a867d7..728d7a28 100644 --- a/src/webserver/SslWsSession.h +++ b/src/webserver/SslWsSession.h @@ -39,9 +39,7 @@ class ReportingETL; class SslWsSession : public WsSession { - boost::beast::websocket::stream< - boost::beast::ssl_stream> - ws_; + boost::beast::websocket::stream> ws_; public: // Take ownership of the socket @@ -58,23 +56,11 @@ public: RPC::Counters& counters, WorkQueue& queue, boost::beast::flat_buffer&& b) - : WsSession( - ioc, - ip, - backend, - subscriptions, - balancer, - etl, - tagFactory, - dosGuard, - counters, - queue, - std::move(b)) + : WsSession(ioc, ip, backend, subscriptions, balancer, etl, tagFactory, dosGuard, counters, queue, std::move(b)) , ws_(std::move(stream)) { } - boost::beast::websocket::stream< - boost::beast::ssl_stream>& + boost::beast::websocket::stream>& ws() { return ws_; @@ -169,13 +155,10 @@ public: run() { // Set the timeout. - boost::beast::get_lowest_layer(https_).expires_after( - std::chrono::seconds(30)); + boost::beast::get_lowest_layer(https_).expires_after(std::chrono::seconds(30)); net::dispatch( - https_.get_executor(), - boost::beast::bind_front_handler( - &SslWsUpgrader::do_upgrade, shared_from_this())); + https_.get_executor(), boost::beast::bind_front_handler(&SslWsUpgrader::do_upgrade, shared_from_this())); } private: @@ -201,8 +184,7 @@ private: parser_->body_limit(10000); // Set the timeout. - boost::beast::get_lowest_layer(https_).expires_after( - std::chrono::seconds(30)); + boost::beast::get_lowest_layer(https_).expires_after(std::chrono::seconds(30)); on_upgrade(); } diff --git a/src/webserver/WsBase.h b/src/webserver/WsBase.h index 2648d576..2b99c609 100644 --- a/src/webserver/WsBase.h +++ b/src/webserver/WsBase.h @@ -73,8 +73,7 @@ protected: boost::system::error_code ec_; public: - explicit WsBase(util::TagDecoratorFactory const& tagFactory) - : Taggable{tagFactory} + explicit WsBase(util::TagDecoratorFactory const& tagFactory) : Taggable{tagFactory} { } @@ -106,8 +105,7 @@ class ETLLoadBalancer; // Echoes back all received WebSocket messages template -class WsSession : public WsBase, - public std::enable_shared_from_this> +class WsSession : public WsBase, public std::enable_shared_from_this> { using std::enable_shared_from_this>::shared_from_this; @@ -197,8 +195,7 @@ public: sending_ = true; derived().ws().async_write( net::buffer(messages_.front()->data(), messages_.front()->size()), - boost::beast::bind_front_handler( - &WsSession::on_write, derived().shared_from_this())); + boost::beast::bind_front_handler(&WsSession::on_write, derived().shared_from_this())); } void @@ -229,10 +226,7 @@ public: send(std::shared_ptr msg) override { net::dispatch( - derived().ws().get_executor(), - [this, - self = derived().shared_from_this(), - msg = std::move(msg)]() { + derived().ws().get_executor(), [this, self = derived().shared_from_this(), msg = std::move(msg)]() { messages_.push(std::move(msg)); maybe_send_next(); }); @@ -249,22 +243,15 @@ public: run(http::request req) { // Set suggested timeout settings for the websocket - derived().ws().set_option(websocket::stream_base::timeout::suggested( - boost::beast::role_type::server)); + derived().ws().set_option(websocket::stream_base::timeout::suggested(boost::beast::role_type::server)); // Set a decorator to change the Server of the handshake - derived().ws().set_option(websocket::stream_base::decorator( - [](websocket::response_type& res) { - res.set( - http::field::server, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-server-async"); - })); + derived().ws().set_option(websocket::stream_base::decorator([](websocket::response_type& res) { + res.set(http::field::server, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-server-async"); + })); derived().ws().async_accept( - req, - boost::beast::bind_front_handler( - &WsSession::on_accept, this->shared_from_this())); + req, boost::beast::bind_front_handler(&WsSession::on_accept, this->shared_from_this())); } void @@ -290,16 +277,11 @@ public: buffer_.consume(buffer_.size()); // Read a message into our buffer derived().ws().async_read( - buffer_, - boost::beast::bind_front_handler( - &WsSession::on_read, this->shared_from_this())); + buffer_, boost::beast::bind_front_handler(&WsSession::on_read, this->shared_from_this())); } void - handle_request( - boost::json::object const&& request, - boost::json::value const& id, - boost::asio::yield_context& yield) + handle_request(boost::json::object const&& request, boost::json::value const& id, boost::asio::yield_context& yield) { auto ip = derived().ip(); if (!ip) @@ -316,8 +298,7 @@ public: try { - log_.info() << tag() - << "ws received request from work queue : " << request; + log_.info() << tag() << "ws received request from work queue : " << request; auto range = backend_->fetchLedgerRange(); if (!range) @@ -344,8 +325,7 @@ public: response = getDefaultWsResponse(id); - auto [v, timeDiff] = - util::timed([&]() { return RPC::buildResponse(*context); }); + auto [v, timeDiff] = util::timed([&]() { return RPC::buildResponse(*context); }); auto us = std::chrono::duration(timeDiff); logDuration(*context, us); @@ -367,8 +347,7 @@ public: counters_.rpcComplete(context->method, us); auto const& result = std::get(v); - auto const isForwarded = result.contains("forwarded") && - result.at("forwarded").is_bool() && + auto const isForwarded = result.contains("forwarded") && result.at("forwarded").is_bool() && result.at("forwarded").as_bool(); // if the result is forwarded - just use it as is @@ -415,8 +394,7 @@ public: if (ec) return wsFail(ec, "read"); - std::string msg{ - static_cast(buffer_.data().data()), buffer_.size()}; + std::string msg{static_cast(buffer_.data().data()), buffer_.size()}; auto ip = derived().ip(); if (!ip) @@ -424,10 +402,7 @@ public: perfLog_.info() << tag() << "Received request from ip = " << *ip; - auto sendError = [this, ip]( - auto error, - boost::json::value const& id, - boost::json::object const& request) { + auto sendError = [this, ip](auto error, boost::json::value const& id, boost::json::object const& request) { auto e = RPC::makeError(error); if (!id.is_null()) @@ -471,9 +446,7 @@ public: perfLog_.debug() << tag() << "Adding to work queue"; if (!queue_.postCoro( - [shared_this = shared_from_this(), - r = std::move(request), - id](boost::asio::yield_context yield) { + [shared_this = shared_from_this(), r = std::move(request), id](boost::asio::yield_context yield) { shared_this->handle_request(std::move(r), id, yield); }, dosGuard_.isWhiteListed(*ip))) diff --git a/unittests/Backend.cpp b/unittests/Backend.cpp index 837878a5..585dbff0 100644 --- a/unittests/Backend.cpp +++ b/unittests/Backend.cpp @@ -44,1130 +44,913 @@ TEST_F(BackendTest, Basic) work.emplace(ioc); std::atomic_bool done = false; - boost::asio::spawn( - ioc, [&done, &work, &ioc](boost::asio::yield_context yield) { - boost::log::core::get()->set_filter( - clio::log_severity >= clio::Severity::WRN); - std::string keyspace = "clio_test_" + - std::to_string(std::chrono::system_clock::now() - .time_since_epoch() - .count()); - boost::json::object cassandraConfig{ - {"database", - {{"type", "cassandra"}, - {"cassandra", - {{"contact_points", "127.0.0.1"}, - {"port", 9042}, - {"keyspace", keyspace.c_str()}, - {"replication_factor", 1}, - {"table_prefix", ""}, - {"max_requests_outstanding", 1000}, - {"indexer_key_shift", 2}, - {"threads", 8}}}}}}; - std::vector configs = {cassandraConfig}; - for (auto& config : configs) + boost::asio::spawn(ioc, [&done, &work, &ioc](boost::asio::yield_context yield) { + boost::log::core::get()->set_filter(clio::log_severity >= clio::Severity::WRN); + std::string keyspace = + "clio_test_" + std::to_string(std::chrono::system_clock::now().time_since_epoch().count()); + boost::json::object cassandraConfig{ + {"database", + {{"type", "cassandra"}, + {"cassandra", + {{"contact_points", "127.0.0.1"}, + {"port", 9042}, + {"keyspace", keyspace.c_str()}, + {"replication_factor", 1}, + {"table_prefix", ""}, + {"max_requests_outstanding", 1000}, + {"indexer_key_shift", 2}, + {"threads", 8}}}}}}; + std::vector configs = {cassandraConfig}; + for (auto& config : configs) + { + auto backend = Backend::make_Backend(ioc, clio::Config{config}); + + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" + "BC54351E" + "DD73" + "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A6DB6" + "FE30" + "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" + "3E2232B3" + "3EF5" + "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" + "A29652EF" + "FD80" + "AC59CD91416E4E13DBBE"; + + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) + { + strBlob += c; + } + return strBlob; + }; + auto binaryStringToUint256 = [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) + { + strBlob += c; + } + return strBlob; + }; + + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + + backend->startWrites(); + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfo.seq, uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); { - auto backend = Backend::make_Backend(ioc, clio::Config{config}); + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); + } - std::string rawHeader = - "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" - "BC54351E" - "DD73" - "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" - "315A6DB6" - "FE30" - "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" - "3E2232B3" - "3EF5" - "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" - "A29652EF" - "FD80" - "AC59CD91416E4E13DBBE"; + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(lgrInfo), RPC::ledgerInfoToBlob(*retLgr)); + } - auto hexStringToBinaryString = [](auto const& hex) { - auto blob = ripple::strUnHex(hex); - std::string strBlob; - for (auto c : *blob) - { - strBlob += c; - } - return strBlob; - }; - auto binaryStringToUint256 = - [](auto const& bin) -> ripple::uint256 { - ripple::uint256 uint; - return uint.fromVoid((void const*)bin.data()); - }; - auto ledgerInfoToBinaryString = [](auto const& info) { - auto blob = RPC::ledgerInfoToBlob(info, true); - std::string strBlob; - for (auto c : blob) - { - strBlob += c; - } - return strBlob; - }; + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield).has_value()); + auto lgrInfoOld = lgrInfo; - std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext); backend->startWrites(); - backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, rng->maxSequence); - EXPECT_EQ(rng->maxSequence, lgrInfo.seq); - } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_TRUE(seq.has_value()); - EXPECT_EQ(*seq, lgrInfo.seq); - } + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield).has_value()); - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfo.seq, yield); - ASSERT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfo.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(lgrInfo), - RPC::ledgerInfoToBlob(*retLgr)); - } + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); - EXPECT_FALSE( - backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) - .has_value()); - auto lgrInfoOld = lgrInfo; + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } - auto lgrInfoNext = lgrInfo; - lgrInfoNext.seq = lgrInfo.seq + 1; - lgrInfoNext.parentHash = lgrInfo.hash; + // the below dummy data is not expected to be consistent. The + // metadata string does represent valid metadata. Don't assume + // though that the transaction or its hash correspond to the + // metadata, or anything like that. These tests are purely + // binary tests to make sure the same data that goes in, comes + // back out + std::string metaHex = + "201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A2" + "9B46" + "3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A86" + "3300" + "000000000000003400000000000000005529983CBAED30F54747145292" + "1C3C" + "6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7" + "F830" + "F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217" + "CB65" + "D54B640B31521B05000000000000000000000000434E59000000000003" + "60E3" + "E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF9126341776" + "2570" + "D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B61" + "6DCB" + "6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC0" + "9600" + "F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF78368103" + "1100" + "0000000000000000000000434E59000000000004110360E3E0751BD9A5" + "66CD" + "03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F8" + "0B61" + "6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B" + "6686" + "0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05" + "ED5B" + "66860B9501110000000000000000000000000000000000000000021100" + "0000" + "0000000000000000000000000000000000031100000000000000000000" + "0000" + "434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82B" + "A0E1" + "E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9" + "D24B" + "D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B61" + "6DCB" + "6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A" + "6291" + "9C2D18000000000000000000000000434E5900000000000360E3E0751B" + "D9A5" + "66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220" + "CCB3" + "3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98" + "F588" + "C2D55C84718FAFBBD2604AE72200000000310000000000000000320000" + "0000" + "0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C" + "8471" + "8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1" + "E1E5" + "1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E" + "1E90" + "CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0" + "576A" + "CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E7" + "2200" + "000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF" + "9126" + "3417762570D67220CCB33B1370E1E1F1031000"; + std::string txnHex = + "1200072200000000240480FDB920190480FDB5201B03CE1A8964400000" + "033C" + "83A95F65D59D9A62919C2D18000000000000000000000000434E590000" + "0000" + "000360E3E0751BD9A566CD03FA6CAFC78118B82BA06840000000000000" + "0C73" + "21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91" + "D685" + "E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E" + "9911" + "FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC" + "60A0" + "9EED153BF60C992930D68F09F981142252F328CF91263417762570D672" + "20CC" + "B33B1370"; + std::string hashHex = + "0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22" + "D28BF7"; + + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" + "018EFFBE" + "17C5" + "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "07811422" + "52F3" + "28CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" + "004F06"; + + // An NFTokenMint tx + std::string nftTxnHex = + "1200192200000008240011CC9B201B001F71D6202A0000000168400000" + "000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E" + "4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F" + "C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA" + "34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F" + "677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16" + "DE3538F248662FC73C"; + + std::string nftTxnMeta = + "201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E" + "07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000" + "0006751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1" + "C200000028751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "9808B6B90000001D751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CA048C0A300000007751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F" + "2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21" + "D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470" + "733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203" + "F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868" + "747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800" + "006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A" + "751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A" + "000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00" + "00001F751868747470733A2F2F677265677765697362726F642E636F6D" + "E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4" + "5DAE00000014751868747470733A2F2F677265677765697362726F642E" + "636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7" + "3CCE1462A500000009751868747470733A2F2F67726567776569736272" + "6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248" + "662FC73CD89A24C70000002B751868747470733A2F2F67726567776569" + "7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35" + "38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567" + "7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000" + "0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1" + "E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C" + "662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000" + "07751868747470733A2F2F677265677765697362726F642E636F6DE1EC" + "5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5" + "00000029751868747470733A2F2F677265677765697362726F642E636F" + "6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE" + "EE87B80000001E751868747470733A2F2F677265677765697362726F64" + "2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F" + "C73CB30E8CAF00000013751868747470733A2F2F677265677765697362" + "726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2" + "48662FC73CB72E91A200000008751868747470733A2F2F677265677765" + "697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE" + "3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265" + "677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022" + "CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F" + "677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5" + "D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073" + "3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4" + "9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874" + "7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000" + "6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75" + "1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000" + "0020751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E" + "B100000015751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "E4FA33A40000000A751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F" + "2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71" + "B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435" + "40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94" + "883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3" + "ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000" + "066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866" + "2FC73CE1E1F1031000"; + std::string nftTxnHashHex = + "6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757" + "E9284E"; + ripple::uint256 nftID; + EXPECT_TRUE( + nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662" + "FC73CEF7FF5C60000002C")); + + std::string metaBlob = hexStringToBinaryString(metaHex); + std::string txnBlob = hexStringToBinaryString(txnHex); + std::string hashBlob = hexStringToBinaryString(hashHex); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = hexStringToBinaryString(accountIndexHex); + std::vector affectedAccounts; + + std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex); + std::string nftTxnMetaBlob = hexStringToBinaryString(nftTxnMeta); + + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; lgrInfoNext.hash++; - lgrInfoNext.accountHash = ~lgrInfo.accountHash; - { - std::string rawHeaderBlob = - ledgerInfoToBinaryString(lgrInfoNext); - backend->startWrites(); - backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } + ripple::uint256 hash256; + EXPECT_TRUE(hash256.parseHex(hashHex)); + ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob}; + auto journal = ripple::debugLog(); + auto accountsSet = txMeta.getAffectedAccounts(); + for (auto& a : accountsSet) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + affectedAccounts.push_back(a); } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_EQ(seq, lgrInfoNext.seq); - } - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - retLgr = backend->fetchLedgerBySequence( - lgrInfoNext.seq - 1, yield); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - retLgr = backend->fetchLedgerBySequence( - lgrInfoNext.seq - 2, yield); - EXPECT_FALSE( - backend - ->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) - .has_value()); + std::vector accountTxData; + accountTxData.emplace_back(txMeta, hash256, journal); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); + ripple::uint256 nftHash256; + EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex)); + ripple::TxMeta nftTxMeta{nftHash256, lgrInfoNext.seq, nftTxnMetaBlob}; + ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()}; + ripple::STTx sttx{it}; + auto const [parsedNFTTxsRef, parsedNFT] = getNFTDataFromTx(nftTxMeta, sttx); + // need to copy the nft txns so we can std::move later + std::vector parsedNFTTxs; + parsedNFTTxs.insert(parsedNFTTxs.end(), parsedNFTTxsRef.begin(), parsedNFTTxsRef.end()); + EXPECT_EQ(parsedNFTTxs.size(), 1); + EXPECT_TRUE(parsedNFT.has_value()); + EXPECT_EQ(parsedNFT->tokenID, nftID); + std::vector nftData; + nftData.push_back(*parsedNFT); - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 0); + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeTransaction( + std::string{hashBlob}, + lgrInfoNext.seq, + lgrInfoNext.closeTime.time_since_epoch().count(), + std::string{txnBlob}, + std::string{metaBlob}); + backend->writeAccountTransactions(std::move(accountTxData)); + + // NFT writing not yet implemented for pg + if (config == cassandraConfig) + { + backend->writeNFTs(std::move(nftData)); + backend->writeNFTTransactions(std::move(parsedNFTTxs)); + } + else + { + EXPECT_THROW({ backend->writeNFTs(std::move(nftData)); }, std::runtime_error); + EXPECT_THROW({ backend->writeNFTTransactions(std::move(parsedNFTTxs)); }, std::runtime_error); } - // the below dummy data is not expected to be consistent. The - // metadata string does represent valid metadata. Don't assume - // though that the transaction or its hash correspond to the - // metadata, or anything like that. These tests are purely - // binary tests to make sure the same data that goes in, comes - // back out - std::string metaHex = - "201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A2" - "9B46" - "3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A86" - "3300" - "000000000000003400000000000000005529983CBAED30F54747145292" - "1C3C" - "6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7" - "F830" - "F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217" - "CB65" - "D54B640B31521B05000000000000000000000000434E59000000000003" - "60E3" - "E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF9126341776" - "2570" - "D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B61" - "6DCB" - "6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC0" - "9600" - "F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF78368103" - "1100" - "0000000000000000000000434E59000000000004110360E3E0751BD9A5" - "66CD" - "03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F8" - "0B61" - "6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B" - "6686" - "0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05" - "ED5B" - "66860B9501110000000000000000000000000000000000000000021100" - "0000" - "0000000000000000000000000000000000031100000000000000000000" - "0000" - "434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82B" - "A0E1" - "E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9" - "D24B" - "D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B61" - "6DCB" - "6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A" - "6291" - "9C2D18000000000000000000000000434E5900000000000360E3E0751B" - "D9A5" - "66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220" - "CCB3" - "3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98" - "F588" - "C2D55C84718FAFBBD2604AE72200000000310000000000000000320000" - "0000" - "0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C" - "8471" - "8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1" - "E1E5" - "1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E" - "1E90" - "CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0" - "576A" - "CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E7" - "2200" - "000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF" - "9126" - "3417762570D67220CCB33B1370E1E1F1031000"; - std::string txnHex = - "1200072200000000240480FDB920190480FDB5201B03CE1A8964400000" - "033C" - "83A95F65D59D9A62919C2D18000000000000000000000000434E590000" - "0000" - "000360E3E0751BD9A566CD03FA6CAFC78118B82BA06840000000000000" - "0C73" - "21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91" - "D685" - "E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E" - "9911" - "FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC" - "60A0" - "9EED153BF60C992930D68F09F981142252F328CF91263417762570D672" - "20CC" - "B33B1370"; - std::string hashHex = - "0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22" - "D28BF7"; + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, std::string{accountIndexBlob}); + backend->writeSuccessor( + std::string{accountIndexBlob}, lgrInfoNext.seq, uint256ToString(Backend::lastKey)); - // this account is not related to the above transaction and - // metadata - std::string accountHex = - "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" - "018EFFBE" - "17C5" - "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" - "07811422" - "52F3" - "28CF91263417762570D67220CCB33B1370"; - std::string accountIndexHex = - "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" - "004F06"; - - // An NFTokenMint tx - std::string nftTxnHex = - "1200192200000008240011CC9B201B001F71D6202A0000000168400000" - "000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E" - "4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F" - "C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA" - "34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F" - "677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16" - "DE3538F248662FC73C"; - - std::string nftTxnMeta = - "201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E" - "07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0" - "22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00" - "0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000" - "0006751868747470733A2F2F677265677765697362726F642E636F6DE1" - "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1" - "C200000028751868747470733A2F2F677265677765697362726F642E63" - "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" - "9808B6B90000001D751868747470733A2F2F677265677765697362726F" - "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" - "2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973" - "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" - "F248662FC73CA048C0A300000007751868747470733A2F2F6772656777" - "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" - "DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772" - "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" - "22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F" - "2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21" - "D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470" - "733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203" - "F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868" - "747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800" - "006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A" - "751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A" - "000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00" - "00001F751868747470733A2F2F677265677765697362726F642E636F6D" - "E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4" - "5DAE00000014751868747470733A2F2F677265677765697362726F642E" - "636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7" - "3CCE1462A500000009751868747470733A2F2F67726567776569736272" - "6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248" - "662FC73CD89A24C70000002B751868747470733A2F2F67726567776569" - "7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35" - "38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567" - "7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB" - "16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67" - "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" - "E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A" - "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" - "21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474" - "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" - "03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518" - "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" - "00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000" - "0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1" - "E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C" - "662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB" - "16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67" - "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" - "E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A" - "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" - "21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474" - "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" - "03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518" - "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" - "00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000" - "07751868747470733A2F2F677265677765697362726F642E636F6DE1EC" - "5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5" - "00000029751868747470733A2F2F677265677765697362726F642E636F" - "6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE" - "EE87B80000001E751868747470733A2F2F677265677765697362726F64" - "2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F" - "C73CB30E8CAF00000013751868747470733A2F2F677265677765697362" - "726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2" - "48662FC73CB72E91A200000008751868747470733A2F2F677265677765" - "697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE" - "3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265" - "677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022" - "CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F" - "677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5" - "D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073" - "3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4" - "9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874" - "7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000" - "6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75" - "1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00" - "0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000" - "0020751868747470733A2F2F677265677765697362726F642E636F6DE1" - "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E" - "B100000015751868747470733A2F2F677265677765697362726F642E63" - "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" - "E4FA33A40000000A751868747470733A2F2F677265677765697362726F" - "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" - "2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973" - "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" - "F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777" - "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" - "DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772" - "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" - "22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F" - "2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71" - "B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435" - "40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94" - "883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3" - "ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000" - "066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866" - "2FC73CE1E1F1031000"; - std::string nftTxnHashHex = - "6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757" - "E9284E"; - ripple::uint256 nftID; - EXPECT_TRUE( - nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662" - "FC73CEF7FF5C60000002C")); - - std::string metaBlob = hexStringToBinaryString(metaHex); - std::string txnBlob = hexStringToBinaryString(txnHex); - std::string hashBlob = hexStringToBinaryString(hashHex); - std::string accountBlob = hexStringToBinaryString(accountHex); - std::string accountIndexBlob = - hexStringToBinaryString(accountIndexHex); - std::vector affectedAccounts; - - std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex); - std::string nftTxnMetaBlob = - hexStringToBinaryString(nftTxnMeta); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 1); + EXPECT_STREQ((const char*)txns[0].transaction.data(), (const char*)txnBlob.data()); + EXPECT_STREQ((const char*)txns[0].metadata.data(), (const char*)metaBlob.data()); + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 1); + EXPECT_EQ(ripple::strHex(hashes[0]), hashHex); + for (auto& a : affectedAccounts) { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.txHash = ~lgrInfo.txHash; - lgrInfoNext.accountHash = - lgrInfoNext.accountHash ^ lgrInfoNext.txHash; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - - ripple::uint256 hash256; - EXPECT_TRUE(hash256.parseHex(hashHex)); - ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob}; - auto journal = ripple::debugLog(); - auto accountsSet = txMeta.getAffectedAccounts(); - for (auto& a : accountsSet) - { - affectedAccounts.push_back(a); - } - std::vector accountTxData; - accountTxData.emplace_back(txMeta, hash256, journal); - - ripple::uint256 nftHash256; - EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex)); - ripple::TxMeta nftTxMeta{ - nftHash256, lgrInfoNext.seq, nftTxnMetaBlob}; - ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()}; - ripple::STTx sttx{it}; - auto const [parsedNFTTxsRef, parsedNFT] = - getNFTDataFromTx(nftTxMeta, sttx); - // need to copy the nft txns so we can std::move later - std::vector parsedNFTTxs; - parsedNFTTxs.insert( - parsedNFTTxs.end(), - parsedNFTTxsRef.begin(), - parsedNFTTxsRef.end()); - EXPECT_EQ(parsedNFTTxs.size(), 1); - EXPECT_TRUE(parsedNFT.has_value()); - EXPECT_EQ(parsedNFT->tokenID, nftID); - std::vector nftData; - nftData.push_back(*parsedNFT); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeTransaction( - std::string{hashBlob}, - lgrInfoNext.seq, - lgrInfoNext.closeTime.time_since_epoch().count(), - std::string{txnBlob}, - std::string{metaBlob}); - backend->writeAccountTransactions(std::move(accountTxData)); - - // NFT writing not yet implemented for pg - if (config == cassandraConfig) - { - backend->writeNFTs(std::move(nftData)); - backend->writeNFTTransactions(std::move(parsedNFTTxs)); - } - else - { - EXPECT_THROW( - { backend->writeNFTs(std::move(nftData)); }, - std::runtime_error); - EXPECT_THROW( - { - backend->writeNFTTransactions( - std::move(parsedNFTTxs)); - }, - std::runtime_error); - } - - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - std::string{accountIndexBlob}); - backend->writeSuccessor( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); + auto [txns, cursor] = backend->fetchAccountTransactions(a, 100, true, {}, yield); EXPECT_EQ(txns.size(), 1); - EXPECT_STREQ( - (const char*)txns[0].transaction.data(), - (const char*)txnBlob.data()); - EXPECT_STREQ( - (const char*)txns[0].metadata.data(), - (const char*)metaBlob.data()); - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 1); - EXPECT_EQ(ripple::strHex(hashes[0]), hashHex); - for (auto& a : affectedAccounts) - { - auto [txns, cursor] = backend->fetchAccountTransactions( - a, 100, true, {}, yield); - EXPECT_EQ(txns.size(), 1); - EXPECT_EQ(txns[0], txns[0]); - EXPECT_FALSE(cursor); - } + EXPECT_EQ(txns[0], txns[0]); + EXPECT_FALSE(cursor); + } - // NFT fetching not yet implemented for pg - if (config == cassandraConfig) + // NFT fetching not yet implemented for pg + if (config == cassandraConfig) + { + auto nft = backend->fetchNFT(nftID, lgrInfoNext.seq, yield); + EXPECT_TRUE(nft.has_value()); + auto [nftTxns, cursor] = backend->fetchNFTTransactions(nftID, 100, true, {}, yield); + EXPECT_EQ(nftTxns.size(), 1); + EXPECT_EQ(nftTxns[0], nftTxns[0]); + EXPECT_FALSE(cursor); + } + else + { + EXPECT_THROW({ backend->fetchNFT(nftID, lgrInfoNext.seq, yield); }, std::runtime_error); + EXPECT_THROW({ backend->fetchNFTTransactions(nftID, 100, true, {}, yield); }, std::runtime_error); + } + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + // obtain a time-based seed: + unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle(accountBlob.begin(), accountBlob.end(), std::default_random_engine(seed)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, uint32_t ledgerSequence) { + std::vector> res{numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) + { + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; + } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) + { + obj = std::to_string(ledgerSequence) + obj; + } + return objs; + }; + auto generateTxns = [](size_t numTxns, uint32_t ledgerSequence) { + std::vector> res{numTxns}; + ripple::uint256 base; + base = ledgerSequence * 100000; + for (auto& blob : res) + { + ++base; + std::string hashStr{(const char*)base.data(), base.size()}; + std::string txnStr = "tx" + std::to_string(ledgerSequence) + hashStr; + std::string metaStr = "meta" + std::to_string(ledgerSequence) + hashStr; + blob = std::make_tuple(hashStr, txnStr, metaStr); + } + return res; + }; + auto generateAccounts = [](uint32_t ledgerSequence, uint32_t numAccounts) { + std::vector accounts; + ripple::AccountID base; + base = ledgerSequence * 998765; + for (size_t i = 0; i < numAccounts; ++i) + { + ++base; + accounts.push_back(base); + } + return accounts; + }; + auto generateAccountTx = [&](uint32_t ledgerSequence, auto txns) { + std::vector ret; + auto accounts = generateAccounts(ledgerSequence, 10); + std::srand(std::time(nullptr)); + uint32_t idx = 0; + for (auto& [hash, txn, meta] : txns) + { + AccountTransactionsData data; + data.ledgerSequence = ledgerSequence; + data.transactionIndex = idx; + data.txHash = hash; + for (size_t i = 0; i < 3; ++i) { - auto nft = - backend->fetchNFT(nftID, lgrInfoNext.seq, yield); - EXPECT_TRUE(nft.has_value()); - auto [nftTxns, cursor] = backend->fetchNFTTransactions( - nftID, 100, true, {}, yield); - EXPECT_EQ(nftTxns.size(), 1); - EXPECT_EQ(nftTxns[0], nftTxns[0]); - EXPECT_FALSE(cursor); + data.accounts.insert(accounts[std::rand() % accounts.size()]); } + ++idx; + ret.push_back(data); + } + return ret; + }; + + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle(lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); + std::shuffle(lgrInfo.accountHash.begin(), lgrInfo.accountHash.end(), randomEngine); + std::shuffle(lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, auto txns, auto objs, auto accountTx, auto state) { + backend->startWrites(); + + backend->writeLedger(lgrInfo, ledgerInfoToBinaryString(lgrInfo)); + for (auto [hash, txn, meta] : txns) + { + backend->writeTransaction( + std::move(hash), + lgrInfo.seq, + lgrInfo.closeTime.time_since_epoch().count(), + std::move(txn), + std::move(meta)); + } + for (auto [key, obj] : objs) + { + backend->writeLedgerObject(std::string{key}, lgrInfo.seq, std::string{obj}); + } + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if(state[lgrInfo.seq - 1].begin(), state[lgrInfo.seq - 1].end(), [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) + { + for (size_t i = 0; i < objs.size(); ++i) + { + if (i + 1 < objs.size()) + backend->writeSuccessor( + std::string{objs[i].first}, lgrInfo.seq, std::string{objs[i + 1].first}); + else + backend->writeSuccessor( + std::string{objs[i].first}, lgrInfo.seq, uint256ToString(Backend::lastKey)); + } + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{state[lgrInfo.seq - 1].back().first}, lgrInfo.seq, std::string{objs[0].first}); else - { - EXPECT_THROW( - { - backend->fetchNFT( - nftID, lgrInfoNext.seq, yield); - }, - std::runtime_error); - EXPECT_THROW( - { - backend->fetchNFTTransactions( - nftID, 100, true, {}, yield); - }, - std::runtime_error); - } - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfo.seq, std::string{objs[0].first}); } - // obtain a time-based seed: - unsigned seed = - std::chrono::system_clock::now().time_since_epoch().count(); - std::string accountBlobOld = accountBlob; + + backend->writeAccountTransactions(std::move(accountTx)); + + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; + + auto checkLedger = [&](auto lgrInfo, auto txns, auto objs, auto accountTx) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)); + // retLgr = backend->fetchLedgerByHash(lgrInfo.hash); + // EXPECT_TRUE(retLgr); + // EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), + // RPC::ledgerInfoToBlob(lgrInfo)); + auto retTxns = backend->fetchAllTransactionsInLedger(seq, yield); + for (auto [hash, txn, meta] : txns) { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - std::shuffle( - accountBlob.begin(), - accountBlob.end(), - std::default_random_engine(seed)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + bool found = false; + for (auto [retTxn, retMeta, retSeq, retDate] : retTxns) + { + if (std::strncmp((const char*)retTxn.data(), (const char*)txn.data(), txn.size()) == 0 && + std::strncmp((const char*)retMeta.data(), (const char*)meta.data(), meta.size()) == 0) + found = true; + } + ASSERT_TRUE(found); } + for (auto [account, data] : accountTx) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 2, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - - auto generateObjects = [](size_t numObjects, - uint32_t ledgerSequence) { - std::vector> res{ - numObjects}; - ripple::uint256 key; - key = ledgerSequence * 100000; - - for (auto& blob : res) - { - ++key; - std::string keyStr{(const char*)key.data(), key.size()}; - blob.first = keyStr; - blob.second = std::to_string(ledgerSequence) + keyStr; - } - return res; - }; - auto updateObjects = [](uint32_t ledgerSequence, auto objs) { - for (auto& [key, obj] : objs) - { - obj = std::to_string(ledgerSequence) + obj; - } - return objs; - }; - auto generateTxns = [](size_t numTxns, - uint32_t ledgerSequence) { - std::vector< - std::tuple> - res{numTxns}; - ripple::uint256 base; - base = ledgerSequence * 100000; - for (auto& blob : res) - { - ++base; - std::string hashStr{ - (const char*)base.data(), base.size()}; - std::string txnStr = - "tx" + std::to_string(ledgerSequence) + hashStr; - std::string metaStr = - "meta" + std::to_string(ledgerSequence) + hashStr; - blob = std::make_tuple(hashStr, txnStr, metaStr); - } - return res; - }; - auto generateAccounts = [](uint32_t ledgerSequence, - uint32_t numAccounts) { - std::vector accounts; - ripple::AccountID base; - base = ledgerSequence * 998765; - for (size_t i = 0; i < numAccounts; ++i) - { - ++base; - accounts.push_back(base); - } - return accounts; - }; - auto generateAccountTx = [&](uint32_t ledgerSequence, - auto txns) { - std::vector ret; - auto accounts = generateAccounts(ledgerSequence, 10); - std::srand(std::time(nullptr)); - uint32_t idx = 0; - for (auto& [hash, txn, meta] : txns) - { - AccountTransactionsData data; - data.ledgerSequence = ledgerSequence; - data.transactionIndex = idx; - data.txHash = hash; - for (size_t i = 0; i < 3; ++i) - { - data.accounts.insert( - accounts[std::rand() % accounts.size()]); - } - ++idx; - ret.push_back(data); - } - return ret; - }; - - auto generateNextLedger = [seed](auto lgrInfo) { - ++lgrInfo.seq; - lgrInfo.parentHash = lgrInfo.hash; - static auto randomEngine = std::default_random_engine(seed); - std::shuffle( - lgrInfo.txHash.begin(), - lgrInfo.txHash.end(), - randomEngine); - std::shuffle( - lgrInfo.accountHash.begin(), - lgrInfo.accountHash.end(), - randomEngine); - std::shuffle( - lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); - return lgrInfo; - }; - auto writeLedger = [&](auto lgrInfo, - auto txns, - auto objs, - auto accountTx, - auto state) { - backend->startWrites(); - - backend->writeLedger( - lgrInfo, ledgerInfoToBinaryString(lgrInfo)); - for (auto [hash, txn, meta] : txns) - { - backend->writeTransaction( - std::move(hash), - lgrInfo.seq, - lgrInfo.closeTime.time_since_epoch().count(), - std::move(txn), - std::move(meta)); - } - for (auto [key, obj] : objs) - { - backend->writeLedgerObject( - std::string{key}, lgrInfo.seq, std::string{obj}); - } - if (state.count(lgrInfo.seq - 1) == 0 || - std::find_if( - state[lgrInfo.seq - 1].begin(), - state[lgrInfo.seq - 1].end(), - [&](auto obj) { - return obj.first == objs[0].first; - }) == state[lgrInfo.seq - 1].end()) - { - for (size_t i = 0; i < objs.size(); ++i) - { - if (i + 1 < objs.size()) - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - std::string{objs[i + 1].first}); - else - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - } - if (state.count(lgrInfo.seq - 1)) - backend->writeSuccessor( - std::string{ - state[lgrInfo.seq - 1].back().first}, - lgrInfo.seq, - std::string{objs[0].first}); - else - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - std::string{objs[0].first}); - } - - backend->writeAccountTransactions(std::move(accountTx)); - - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - }; - - auto checkLedger = [&](auto lgrInfo, - auto txns, - auto objs, - auto accountTx) { - auto rng = backend->fetchLedgerRange(); - auto seq = lgrInfo.seq; - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_GE(rng->maxSequence, seq); - auto retLgr = backend->fetchLedgerBySequence(seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)); - // retLgr = backend->fetchLedgerByHash(lgrInfo.hash); - // EXPECT_TRUE(retLgr); - // EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), - // RPC::ledgerInfoToBlob(lgrInfo)); - auto retTxns = - backend->fetchAllTransactionsInLedger(seq, yield); - for (auto [hash, txn, meta] : txns) - { - bool found = false; - for (auto [retTxn, retMeta, retSeq, retDate] : retTxns) - { - if (std::strncmp( - (const char*)retTxn.data(), - (const char*)txn.data(), - txn.size()) == 0 && - std::strncmp( - (const char*)retMeta.data(), - (const char*)meta.data(), - meta.size()) == 0) - found = true; - } - ASSERT_TRUE(found); - } - for (auto [account, data] : accountTx) - { - std::vector retData; - std::optional cursor; - do - { - uint32_t limit = 10; - auto [txns, retCursor] = - backend->fetchAccountTransactions( - account, limit, false, cursor, yield); - if (retCursor) - EXPECT_EQ(txns.size(), limit); - retData.insert( - retData.end(), txns.begin(), txns.end()); - cursor = retCursor; - } while (cursor); - EXPECT_EQ(retData.size(), data.size()); - for (size_t i = 0; i < retData.size(); ++i) - { - auto [txn, meta, seq, date] = retData[i]; - auto [hash, expTxn, expMeta] = data[i]; - EXPECT_STREQ( - (const char*)txn.data(), - (const char*)expTxn.data()); - EXPECT_STREQ( - (const char*)meta.data(), - (const char*)expMeta.data()); - } - } - std::vector keys; - for (auto [key, obj] : objs) - { - auto retObj = backend->fetchLedgerObject( - binaryStringToUint256(key), seq, yield); - if (obj.size()) - { - ASSERT_TRUE(retObj.has_value()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj->data()); - } - else - { - ASSERT_FALSE(retObj.has_value()); - } - keys.push_back(binaryStringToUint256(key)); - } - - { - auto retObjs = - backend->fetchLedgerObjects(keys, seq, yield); - ASSERT_EQ(retObjs.size(), objs.size()); - - for (size_t i = 0; i < keys.size(); ++i) - { - auto [key, obj] = objs[i]; - auto retObj = retObjs[i]; - if (obj.size()) - { - ASSERT_TRUE(retObj.size()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj.data()); - } - else - { - ASSERT_FALSE(retObj.size()); - } - } - } - - Backend::LedgerPage page; - std::vector retObjs; + std::vector retData; + std::optional cursor; do { uint32_t limit = 10; - page = backend->fetchLedgerPage( - page.cursor, seq, limit, false, yield); - // if (page.cursor) - // EXPECT_EQ(page.objects.size(), limit); - retObjs.insert( - retObjs.end(), - page.objects.begin(), - page.objects.end()); - } while (page.cursor); - - for (auto obj : objs) + auto [txns, retCursor] = + backend->fetchAccountTransactions(account, limit, false, cursor, yield); + if (retCursor) + EXPECT_EQ(txns.size(), limit); + retData.insert(retData.end(), txns.begin(), txns.end()); + cursor = retCursor; + } while (cursor); + EXPECT_EQ(retData.size(), data.size()); + for (size_t i = 0; i < retData.size(); ++i) { - bool found = false; - for (auto retObj : retObjs) - { - if (ripple::strHex(obj.first) == - ripple::strHex(retObj.key)) - { - found = true; - ASSERT_EQ( - ripple::strHex(obj.second), - ripple::strHex(retObj.blob)); - } - } - if (found != (obj.second.size() != 0)) - ASSERT_EQ(found, obj.second.size() != 0); - } - }; - - std::map< - uint32_t, - std::vector>> - state; - std::map< - uint32_t, - std::vector< - std::tuple>> - allTxns; - std::unordered_map< - std::string, - std::pair> - allTxnsMap; - std::map< - uint32_t, - std::map>> - allAccountTx; - std::map lgrInfos; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - auto objs = generateObjects(25, lgrInfoNext.seq); - auto txns = generateTxns(10, lgrInfoNext.seq); - auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); - for (auto rec : accountTx) - { - for (auto account : rec.accounts) - { - allAccountTx[lgrInfoNext.seq][account].push_back( - std::string{ - (const char*)rec.txHash.data(), - rec.txHash.size()}); - } - } - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - EXPECT_EQ(txns.size(), 10); - EXPECT_NE(txns[0], txns[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, txns, objs, accountTx, state); - allTxns[lgrInfoNext.seq] = txns; - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - for (auto& [hash, txn, meta] : txns) - { - allTxnsMap[hash] = std::make_pair(txn, meta); + auto [txn, meta, seq, date] = retData[i]; + auto [hash, expTxn, expMeta] = data[i]; + EXPECT_STREQ((const char*)txn.data(), (const char*)expTxn.data()); + EXPECT_STREQ((const char*)meta.data(), (const char*)expMeta.data()); } } - - std::vector> objs; - for (size_t i = 0; i < 10; ++i) + std::vector keys; + for (auto [key, obj] : objs) { - lgrInfoNext = generateNextLedger(lgrInfoNext); - if (!objs.size()) - objs = generateObjects(25, lgrInfoNext.seq); + auto retObj = backend->fetchLedgerObject(binaryStringToUint256(key), seq, yield); + if (obj.size()) + { + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data()); + } else - objs = updateObjects(lgrInfoNext.seq, objs); - auto txns = generateTxns(10, lgrInfoNext.seq); - auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); - for (auto rec : accountTx) { - for (auto account : rec.accounts) - { - allAccountTx[lgrInfoNext.seq][account].push_back( - std::string{ - (const char*)rec.txHash.data(), - rec.txHash.size()}); - } + ASSERT_FALSE(retObj.has_value()); } - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - EXPECT_EQ(txns.size(), 10); - EXPECT_NE(txns[0], txns[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, txns, objs, accountTx, state); - allTxns[lgrInfoNext.seq] = txns; - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - for (auto& [hash, txn, meta] : txns) + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) { - allTxnsMap[hash] = std::make_pair(txn, meta); + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; + if (obj.size()) + { + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data()); + } + else + { + ASSERT_FALSE(retObj.size()); + } } } - auto flatten = [&](uint32_t max) { - std::vector> flat; - std::map objs; - for (auto [seq, diff] : state) - { - for (auto [k, v] : diff) - { - if (seq > max) - { - if (objs.count(k) == 0) - objs[k] = ""; - } - else - { - objs[k] = v; - } - } - } - for (auto [key, value] : objs) - { - flat.push_back(std::make_pair(key, value)); - } - return flat; - }; - - auto flattenAccountTx = [&](uint32_t max) { - std::unordered_map< - ripple::AccountID, - std::vector< - std::tuple>> - accountTx; - for (auto [seq, map] : allAccountTx) - { - if (seq > max) - break; - for (auto& [account, hashes] : map) - { - for (auto& hash : hashes) - { - auto& [txn, meta] = allTxnsMap[hash]; - accountTx[account].push_back( - std::make_tuple(hash, txn, meta)); - } - } - } - for (auto& [account, data] : accountTx) - std::reverse(data.begin(), data.end()); - return accountTx; - }; - - for (auto [seq, diff] : state) + Backend::LedgerPage page; + std::vector retObjs; + do { - auto flat = flatten(seq); - checkLedger( - lgrInfos[seq], - allTxns[seq], - flat, - flattenAccountTx(seq)); + uint32_t limit = 10; + page = backend->fetchLedgerPage(page.cursor, seq, limit, false, yield); + // if (page.cursor) + // EXPECT_EQ(page.objects.size(), limit); + retObjs.insert(retObjs.end(), page.objects.begin(), page.objects.end()); + } while (page.cursor); + + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) + { + if (ripple::strHex(obj.first) == ripple::strHex(retObj.key)) + { + found = true; + ASSERT_EQ(ripple::strHex(obj.second), ripple::strHex(retObj.blob)); + } + } + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; + + std::map>> state; + std::map>> allTxns; + std::unordered_map> allTxnsMap; + std::map>> allAccountTx; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) + { + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{(const char*)rec.txHash.data(), rec.txHash.size()}); + } + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); } } - done = true; - work.reset(); - }); + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) + { + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{(const char*)rec.txHash.data(), rec.txHash.size()}); + } + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); + } + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; + for (auto [seq, diff] : state) + { + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } + } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; + + auto flattenAccountTx = [&](uint32_t max) { + std::unordered_map>> + accountTx; + for (auto [seq, map] : allAccountTx) + { + if (seq > max) + break; + for (auto& [account, hashes] : map) + { + for (auto& hash : hashes) + { + auto& [txn, meta] = allTxnsMap[hash]; + accountTx[account].push_back(std::make_tuple(hash, txn, meta)); + } + } + } + for (auto& [account, data] : accountTx) + std::reverse(data.begin(), data.end()); + return accountTx; + }; + + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger(lgrInfos[seq], allTxns[seq], flat, flattenAccountTx(seq)); + } + } + + done = true; + work.reset(); + }); ioc.run(); EXPECT_EQ(done, true); @@ -1176,8 +959,7 @@ TEST_F(BackendTest, Basic) TEST_F(BackendTest, cache) { using namespace Backend; - boost::log::core::get()->set_filter( - clio::log_severity >= clio::Severity::WRN); + boost::log::core::get()->set_filter(clio::log_severity >= clio::Severity::WRN); SimpleCache cache; ASSERT_FALSE(cache.isFull()); cache.setFull(); @@ -1285,9 +1067,7 @@ TEST_F(BackendTest, cache) objs.resize(10); for (size_t i = 0; i < objs.size(); ++i) { - objs[i] = { - ripple::uint256{i * 100 + 1}, - {(unsigned char)i, (unsigned char)i * 2, (unsigned char)i + 1}}; + objs[i] = {ripple::uint256{i * 100 + 1}, {(unsigned char)i, (unsigned char)i * 2, (unsigned char)i + 1}}; } cache.update(objs, curSeq); { @@ -1315,9 +1095,7 @@ TEST_F(BackendTest, cache) auto objs2 = objs; for (size_t i = 0; i < objs.size(); ++i) { - objs2[i] = { - ripple::uint256{i * 100 + 50}, - {(unsigned char)i, (unsigned char)i * 3, (unsigned char)i + 5}}; + objs2[i] = {ripple::uint256{i * 100 + 50}, {(unsigned char)i, (unsigned char)i * 3, (unsigned char)i + 5}}; } cache.update(objs2, curSeq); { @@ -1397,14 +1175,9 @@ TEST_F(BackendTest, cache) auto allObjs = objs; allObjs.clear(); std::copy_if( - objs.begin(), - objs.end(), - std::back_inserter(allObjs), - [](auto obj) { return obj.blob.size() > 0; }); + objs.begin(), objs.end(), std::back_inserter(allObjs), [](auto obj) { return obj.blob.size() > 0; }); std::copy(objs2.begin(), objs2.end(), std::back_inserter(allObjs)); - std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { - return a.key < b.key; - }); + std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { return a.key < b.key; }); std::optional succ = {{firstKey, {}}}; size_t idx = 0; while ((succ = cache.getSuccessor(succ->key, curSeq))) @@ -1418,8 +1191,7 @@ TEST_F(BackendTest, cache) TEST_F(BackendTest, cacheBackground) { using namespace Backend; - boost::log::core::get()->set_filter( - clio::log_severity >= clio::Severity::WRN); + boost::log::core::get()->set_filter(clio::log_severity >= clio::Severity::WRN); SimpleCache cache; ASSERT_FALSE(cache.isFull()); ASSERT_EQ(cache.size(), 0); @@ -1475,9 +1247,7 @@ TEST_F(BackendTest, cacheBackground) auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); ASSERT_TRUE(cacheObj); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); if (newObj == objs1.end()) { ASSERT_EQ(*cacheObj, obj.blob); @@ -1496,8 +1266,7 @@ TEST_F(BackendTest, cacheBackground) { auto objs = bObjs; objs.clear(); - std::copy( - bObjs.begin() + 10, bObjs.begin() + 20, std::back_inserter(objs)); + std::copy(bObjs.begin() + 10, bObjs.begin() + 20, std::back_inserter(objs)); cache.update(objs, startSeq, true); } { @@ -1513,9 +1282,7 @@ TEST_F(BackendTest, cacheBackground) auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); ASSERT_TRUE(cacheObj); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); if (newObj == objs1.end()) { ASSERT_EQ(*cacheObj, obj.blob); @@ -1566,9 +1333,7 @@ TEST_F(BackendTest, cacheBackground) auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); ASSERT_TRUE(cacheObj); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); if (newObj == objs1.end()) { ASSERT_EQ(*cacheObj, obj.blob); @@ -1587,8 +1352,7 @@ TEST_F(BackendTest, cacheBackground) { auto objs = bObjs; objs.clear(); - std::copy( - bObjs.begin() + 20, bObjs.begin() + 30, std::back_inserter(objs)); + std::copy(bObjs.begin() + 20, bObjs.begin() + 30, std::back_inserter(objs)); cache.update(objs, startSeq, true); } { @@ -1611,9 +1375,7 @@ TEST_F(BackendTest, cacheBackground) auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); ASSERT_TRUE(cacheObj); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); if (newObj == objs1.end()) { ASSERT_EQ(*cacheObj, obj.blob); @@ -1647,9 +1409,7 @@ TEST_F(BackendTest, cacheBackground) for (auto& obj : objs1) { auto cacheObj = cache.get(obj.key, curSeq); - if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { - return o.key == obj.key; - }) == objs3.end()) + if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { return o.key == obj.key; }) == objs3.end()) { ASSERT_TRUE(cacheObj); ASSERT_EQ(*cacheObj, obj.blob); @@ -1677,12 +1437,8 @@ TEST_F(BackendTest, cacheBackground) { auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); - auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { return o.key == obj.key; }); if (delObj != objs3.end()) { ASSERT_FALSE(cacheObj); @@ -1713,9 +1469,7 @@ TEST_F(BackendTest, cacheBackground) for (auto& obj : objs1) { auto cacheObj = cache.get(obj.key, curSeq); - if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { - return o.key == obj.key; - }) == objs3.end()) + if (std::find_if(objs3.begin(), objs3.end(), [&](auto o) { return o.key == obj.key; }) == objs3.end()) { ASSERT_TRUE(cacheObj); ASSERT_EQ(*cacheObj, obj.blob); @@ -1743,12 +1497,8 @@ TEST_F(BackendTest, cacheBackground) { auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); - auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { return o.key == obj.key; }); if (delObj != objs3.end()) { ASSERT_FALSE(cacheObj); @@ -1776,12 +1526,8 @@ TEST_F(BackendTest, cacheBackground) { auto& obj = bObjs[i]; auto cacheObj = cache.get(obj.key, curSeq); - auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { - return o.key == obj.key; - }); - auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { - return o.key == obj.key; - }); + auto newObj = std::find_if(objs1.begin(), objs1.end(), [&](auto o) { return o.key == obj.key; }); + auto delObj = std::find_if(objs3.begin(), objs3.end(), [&](auto o) { return o.key == obj.key; }); if (delObj != objs3.end()) { ASSERT_FALSE(cacheObj); @@ -1807,9 +1553,7 @@ TEST_F(BackendTest, cacheBackground) { allObjs.push_back(obj); } - std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { - return a.key < b.key; - }); + std::sort(allObjs.begin(), allObjs.end(), [](auto a, auto b) { return a.key < b.key; }); std::optional succ = {{firstKey, {}}}; size_t idx = 0; while ((succ = cache.getSuccessor(succ->key, curSeq))) @@ -1826,620 +1570,479 @@ TEST_F(BackendTest, cacheIntegration) work.emplace(ioc); std::atomic_bool done = false; - boost::asio::spawn( - ioc, [&ioc, &done, &work](boost::asio::yield_context yield) { - boost::log::core::get()->set_filter( - clio::log_severity >= clio::Severity::WRN); - std::string keyspace = "clio_test_" + - std::to_string(std::chrono::system_clock::now() - .time_since_epoch() - .count()); - boost::json::object cassandraConfig{ - {"database", - {{"type", "cassandra"}, - {"cassandra", - {{"contact_points", "127.0.0.1"}, - {"port", 9042}, - {"keyspace", keyspace.c_str()}, - {"replication_factor", 1}, - {"table_prefix", ""}, - {"max_requests_outstanding", 1000}, - {"indexer_key_shift", 2}, - {"threads", 8}}}}}}; - std::vector configs = {cassandraConfig}; - for (auto& config : configs) + boost::asio::spawn(ioc, [&ioc, &done, &work](boost::asio::yield_context yield) { + boost::log::core::get()->set_filter(clio::log_severity >= clio::Severity::WRN); + std::string keyspace = + "clio_test_" + std::to_string(std::chrono::system_clock::now().time_since_epoch().count()); + boost::json::object cassandraConfig{ + {"database", + {{"type", "cassandra"}, + {"cassandra", + {{"contact_points", "127.0.0.1"}, + {"port", 9042}, + {"keyspace", keyspace.c_str()}, + {"replication_factor", 1}, + {"table_prefix", ""}, + {"max_requests_outstanding", 1000}, + {"indexer_key_shift", 2}, + {"threads", 8}}}}}}; + std::vector configs = {cassandraConfig}; + for (auto& config : configs) + { + auto backend = Backend::make_Backend(ioc, clio::Config{config}); + backend->cache().setFull(); + + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" + "BC54351E" + "DD73" + "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A6DB6" + "FE30" + "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" + "3E2232B3" + "3EF5" + "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" + "A29652EF" + "FD80" + "AC59CD91416E4E13DBBE"; + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" + "018EFFBE" + "17C5" + "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "07811422" + "52F3" + "28CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" + "004F06"; + + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) + { + strBlob += c; + } + return strBlob; + }; + auto binaryStringToUint256 = [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) + { + strBlob += c; + } + return strBlob; + }; + + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = hexStringToBinaryString(accountIndexHex); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + + backend->startWrites(); + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfo.seq, uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); { - auto backend = Backend::make_Backend(ioc, clio::Config{config}); - backend->cache().setFull(); - - std::string rawHeader = - "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335" - "BC54351E" - "DD73" - "3898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" - "315A6DB6" - "FE30" - "CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED3CF5" - "3E2232B3" - "3EF5" - "7CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE282656A58CE5A" - "A29652EF" - "FD80" - "AC59CD91416E4E13DBBE"; - // this account is not related to the above transaction and - // metadata - std::string accountHex = - "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD" - "018EFFBE" - "17C5" - "C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" - "07811422" - "52F3" - "28CF91263417762570D67220CCB33B1370"; - std::string accountIndexHex = - "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C" - "004F06"; - - auto hexStringToBinaryString = [](auto const& hex) { - auto blob = ripple::strUnHex(hex); - std::string strBlob; - for (auto c : *blob) - { - strBlob += c; - } - return strBlob; - }; - auto binaryStringToUint256 = - [](auto const& bin) -> ripple::uint256 { - ripple::uint256 uint; - return uint.fromVoid((void const*)bin.data()); - }; - auto ledgerInfoToBinaryString = [](auto const& info) { - auto blob = RPC::ledgerInfoToBlob(info, true); - std::string strBlob; - for (auto c : blob) - { - strBlob += c; - } - return strBlob; - }; - - std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); - std::string accountBlob = hexStringToBinaryString(accountHex); - std::string accountIndexBlob = - hexStringToBinaryString(accountIndexHex); - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(rawHeaderBlob)); - - backend->startWrites(); - backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, rng->maxSequence); - EXPECT_EQ(rng->maxSequence, lgrInfo.seq); - } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_TRUE(seq.has_value()); - EXPECT_EQ(*seq, lgrInfo.seq); - } - - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfo.seq, yield); - ASSERT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfo.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(lgrInfo), - RPC::ledgerInfoToBlob(*retLgr)); - } - - EXPECT_FALSE( - backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) - .has_value()); - auto lgrInfoOld = lgrInfo; - - auto lgrInfoNext = lgrInfo; - lgrInfoNext.seq = lgrInfo.seq + 1; - lgrInfoNext.parentHash = lgrInfo.hash; - lgrInfoNext.hash++; - lgrInfoNext.accountHash = ~lgrInfo.accountHash; - { - std::string rawHeaderBlob = - ledgerInfoToBinaryString(lgrInfoNext); - - backend->startWrites(); - backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_EQ(seq, lgrInfoNext.seq); - } - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - retLgr = backend->fetchLedgerBySequence( - lgrInfoNext.seq - 1, yield); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - retLgr = backend->fetchLedgerBySequence( - lgrInfoNext.seq - 2, yield); - EXPECT_FALSE( - backend - ->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) - .has_value()); - - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 0); - } - - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.txHash = ~lgrInfo.txHash; - lgrInfoNext.accountHash = - lgrInfoNext.accountHash ^ lgrInfoNext.txHash; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - auto key = - ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update( - {{*key, {accountBlob.begin(), accountBlob.end()}}}, - lgrInfoNext.seq); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - std::string{accountIndexBlob}); - backend->writeSuccessor( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - // obtain a time-based seed: - unsigned seed = - std::chrono::system_clock::now().time_since_epoch().count(); - std::string accountBlobOld = accountBlob; - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - std::shuffle( - accountBlob.begin(), - accountBlob.end(), - std::default_random_engine(seed)); - auto key = - ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update( - {{*key, {accountBlob.begin(), accountBlob.end()}}}, - lgrInfoNext.seq); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - auto key = - ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update({{*key, {}}}, lgrInfoNext.seq); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 2, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - - auto generateObjects = [](size_t numObjects, - uint32_t ledgerSequence) { - std::vector> res{ - numObjects}; - ripple::uint256 key; - key = ledgerSequence * 100000; - - for (auto& blob : res) - { - ++key; - std::string keyStr{(const char*)key.data(), key.size()}; - blob.first = keyStr; - blob.second = std::to_string(ledgerSequence) + keyStr; - } - return res; - }; - auto updateObjects = [](uint32_t ledgerSequence, auto objs) { - for (auto& [key, obj] : objs) - { - obj = std::to_string(ledgerSequence) + obj; - } - return objs; - }; - - auto generateNextLedger = [seed](auto lgrInfo) { - ++lgrInfo.seq; - lgrInfo.parentHash = lgrInfo.hash; - static auto randomEngine = std::default_random_engine(seed); - std::shuffle( - lgrInfo.txHash.begin(), - lgrInfo.txHash.end(), - randomEngine); - std::shuffle( - lgrInfo.accountHash.begin(), - lgrInfo.accountHash.end(), - randomEngine); - std::shuffle( - lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); - return lgrInfo; - }; - auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { - backend->startWrites(); - - backend->writeLedger( - lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo))); - std::vector cacheUpdates; - for (auto [key, obj] : objs) - { - backend->writeLedgerObject( - std::string{key}, lgrInfo.seq, std::string{obj}); - auto key256 = ripple::uint256::fromVoidChecked(key); - cacheUpdates.push_back( - {*key256, {obj.begin(), obj.end()}}); - } - backend->cache().update(cacheUpdates, lgrInfo.seq); - if (state.count(lgrInfo.seq - 1) == 0 || - std::find_if( - state[lgrInfo.seq - 1].begin(), - state[lgrInfo.seq - 1].end(), - [&](auto obj) { - return obj.first == objs[0].first; - }) == state[lgrInfo.seq - 1].end()) - { - for (size_t i = 0; i < objs.size(); ++i) - { - if (i + 1 < objs.size()) - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - std::string{objs[i + 1].first}); - else - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - } - if (state.count(lgrInfo.seq - 1)) - backend->writeSuccessor( - std::string{ - state[lgrInfo.seq - 1].back().first}, - lgrInfo.seq, - std::string{objs[0].first}); - else - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - std::string{objs[0].first}); - } - - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - }; - - auto checkLedger = [&](auto lgrInfo, auto objs) { - auto rng = backend->fetchLedgerRange(); - auto seq = lgrInfo.seq; - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_GE(rng->maxSequence, seq); - auto retLgr = backend->fetchLedgerBySequence(seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)); - retLgr = backend->fetchLedgerByHash(lgrInfo.hash, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)) - << "retLgr seq:" << retLgr->seq - << "; lgrInfo seq:" << lgrInfo.seq - << "; retLgr hash:" << retLgr->hash - << "; lgrInfo hash:" << lgrInfo.hash - << "; retLgr parentHash:" << retLgr->parentHash - << "; lgr Info parentHash:" << lgrInfo.parentHash; - - std::vector keys; - for (auto [key, obj] : objs) - { - auto retObj = backend->fetchLedgerObject( - binaryStringToUint256(key), seq, yield); - if (obj.size()) - { - ASSERT_TRUE(retObj.has_value()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj->data()); - } - else - { - ASSERT_FALSE(retObj.has_value()); - } - keys.push_back(binaryStringToUint256(key)); - } - - { - auto retObjs = - backend->fetchLedgerObjects(keys, seq, yield); - ASSERT_EQ(retObjs.size(), objs.size()); - - for (size_t i = 0; i < keys.size(); ++i) - { - auto [key, obj] = objs[i]; - auto retObj = retObjs[i]; - if (obj.size()) - { - ASSERT_TRUE(retObj.size()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj.data()); - } - else - { - ASSERT_FALSE(retObj.size()); - } - } - } - Backend::LedgerPage page; - std::vector retObjs; - do - { - uint32_t limit = 10; - page = backend->fetchLedgerPage( - page.cursor, seq, limit, false, yield); - // if (page.cursor) - // EXPECT_EQ(page.objects.size(), limit); - retObjs.insert( - retObjs.end(), - page.objects.begin(), - page.objects.end()); - } while (page.cursor); - for (auto obj : objs) - { - bool found = false; - for (auto retObj : retObjs) - { - if (ripple::strHex(obj.first) == - ripple::strHex(retObj.key)) - { - found = true; - ASSERT_EQ( - ripple::strHex(obj.second), - ripple::strHex(retObj.blob)); - } - } - if (found != (obj.second.size() != 0)) - ASSERT_EQ(found, obj.second.size() != 0); - } - }; - - std::map< - uint32_t, - std::vector>> - state; - std::map lgrInfos; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - auto objs = generateObjects(25, lgrInfoNext.seq); - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, objs, state); - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - } - - std::vector> objs; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - if (!objs.size()) - objs = generateObjects(25, lgrInfoNext.seq); - else - objs = updateObjects(lgrInfoNext.seq, objs); - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, objs, state); - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - } - - auto flatten = [&](uint32_t max) { - std::vector> flat; - std::map objs; - for (auto [seq, diff] : state) - { - for (auto [k, v] : diff) - { - if (seq > max) - { - if (objs.count(k) == 0) - objs[k] = ""; - } - else - { - objs[k] = v; - } - } - } - for (auto [key, value] : objs) - { - flat.push_back(std::make_pair(key, value)); - } - return flat; - }; - - for (auto [seq, diff] : state) - { - auto flat = flatten(seq); - checkLedger(lgrInfos[seq], flat); - } + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); } - done = true; - work.reset(); - }); + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(lgrInfo), RPC::ledgerInfoToBlob(*retLgr)); + } + + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield).has_value()); + auto lgrInfoOld = lgrInfo; + + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext); + + backend->startWrites(); + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield).has_value()); + + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } + + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {accountBlob.begin(), accountBlob.end()}}}, lgrInfoNext.seq); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, std::string{accountIndexBlob}); + backend->writeSuccessor( + std::string{accountIndexBlob}, lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + // obtain a time-based seed: + unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle(accountBlob.begin(), accountBlob.end(), std::default_random_engine(seed)); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {accountBlob.begin(), accountBlob.end()}}}, lgrInfoNext.seq); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {}}}, lgrInfoNext.seq); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, uint32_t ledgerSequence) { + std::vector> res{numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) + { + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; + } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) + { + obj = std::to_string(ledgerSequence) + obj; + } + return objs; + }; + + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle(lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); + std::shuffle(lgrInfo.accountHash.begin(), lgrInfo.accountHash.end(), randomEngine); + std::shuffle(lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { + backend->startWrites(); + + backend->writeLedger(lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo))); + std::vector cacheUpdates; + for (auto [key, obj] : objs) + { + backend->writeLedgerObject(std::string{key}, lgrInfo.seq, std::string{obj}); + auto key256 = ripple::uint256::fromVoidChecked(key); + cacheUpdates.push_back({*key256, {obj.begin(), obj.end()}}); + } + backend->cache().update(cacheUpdates, lgrInfo.seq); + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if(state[lgrInfo.seq - 1].begin(), state[lgrInfo.seq - 1].end(), [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) + { + for (size_t i = 0; i < objs.size(); ++i) + { + if (i + 1 < objs.size()) + backend->writeSuccessor( + std::string{objs[i].first}, lgrInfo.seq, std::string{objs[i + 1].first}); + else + backend->writeSuccessor( + std::string{objs[i].first}, lgrInfo.seq, uint256ToString(Backend::lastKey)); + } + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{state[lgrInfo.seq - 1].back().first}, lgrInfo.seq, std::string{objs[0].first}); + else + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfo.seq, std::string{objs[0].first}); + } + + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; + + auto checkLedger = [&](auto lgrInfo, auto objs) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)); + retLgr = backend->fetchLedgerByHash(lgrInfo.hash, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)) + << "retLgr seq:" << retLgr->seq << "; lgrInfo seq:" << lgrInfo.seq + << "; retLgr hash:" << retLgr->hash << "; lgrInfo hash:" << lgrInfo.hash + << "; retLgr parentHash:" << retLgr->parentHash << "; lgr Info parentHash:" << lgrInfo.parentHash; + + std::vector keys; + for (auto [key, obj] : objs) + { + auto retObj = backend->fetchLedgerObject(binaryStringToUint256(key), seq, yield); + if (obj.size()) + { + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data()); + } + else + { + ASSERT_FALSE(retObj.has_value()); + } + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) + { + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; + if (obj.size()) + { + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data()); + } + else + { + ASSERT_FALSE(retObj.size()); + } + } + } + Backend::LedgerPage page; + std::vector retObjs; + do + { + uint32_t limit = 10; + page = backend->fetchLedgerPage(page.cursor, seq, limit, false, yield); + // if (page.cursor) + // EXPECT_EQ(page.objects.size(), limit); + retObjs.insert(retObjs.end(), page.objects.begin(), page.objects.end()); + } while (page.cursor); + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) + { + if (ripple::strHex(obj.first) == ripple::strHex(retObj.key)) + { + found = true; + ASSERT_EQ(ripple::strHex(obj.second), ripple::strHex(retObj.blob)); + } + } + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; + + std::map>> state; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; + for (auto [seq, diff] : state) + { + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } + } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; + + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger(lgrInfos[seq], flat); + } + } + + done = true; + work.reset(); + }); ioc.run(); } diff --git a/unittests/Config.cpp b/unittests/Config.cpp index 0c5804f2..677fa1db 100644 --- a/unittests/Config.cpp +++ b/unittests/Config.cpp @@ -94,18 +94,14 @@ TEST_F(ConfigTest, Access) ASSERT_EQ(cfg.value("section.test.int"), 9042); ASSERT_EQ(cfg.value("section.test.bool"), true); - ASSERT_ANY_THROW((void)cfg.value( - "section.test.bool")); // wrong type requested + ASSERT_ANY_THROW((void)cfg.value("section.test.bool")); // wrong type requested ASSERT_ANY_THROW((void)cfg.value("section.doesnotexist")); ASSERT_EQ(cfg.valueOr("section.test.str", "fallback"), "hello"); - ASSERT_EQ( - cfg.valueOr("section.test.nonexistent", "fallback"), - "fallback"); + ASSERT_EQ(cfg.valueOr("section.test.nonexistent", "fallback"), "fallback"); ASSERT_EQ(cfg.valueOr("section.test.bool", false), true); - ASSERT_ANY_THROW( - (void)cfg.valueOr("section.test.bool", 1234)); // wrong type requested + ASSERT_ANY_THROW((void)cfg.valueOr("section.test.bool", 1234)); // wrong type requested } TEST_F(ConfigTest, ErrorHandling) @@ -126,7 +122,7 @@ TEST_F(ConfigTest, ErrorHandling) try { (void)arr[3].array()[1].valueOrThrow("msg"); // wrong type - ASSERT_FALSE(true); // should not get here + ASSERT_FALSE(true); // should not get here } catch (std::runtime_error const& e) { @@ -195,10 +191,7 @@ struct Custom { assert(value.is_object()); auto const& obj = value.as_object(); - return { - obj.at("str").as_string().c_str(), - obj.at("int").as_int64(), - obj.at("bool").as_bool()}; + return {obj.at("str").as_string().c_str(), obj.at("int").as_int64(), obj.at("bool").as_bool()}; } }; @@ -217,8 +210,7 @@ TEST_F(ConfigTest, Extend) class TmpFile { public: - TmpFile(std::string const& data) - : tmpPath_{boost::filesystem::unique_path().string()} + TmpFile(std::string const& data) : tmpPath_{boost::filesystem::unique_path().string()} { std::ofstream of; of.open(tmpPath_); diff --git a/unittests/ProfilerTest.cpp b/unittests/ProfilerTest.cpp index b9cefd30..bc556bd0 100644 --- a/unittests/ProfilerTest.cpp +++ b/unittests/ProfilerTest.cpp @@ -35,8 +35,7 @@ TEST(TimedTest, HasReturnValue) TEST(TimedTest, ReturnVoid) { - auto time = timed( - []() { std::this_thread::sleep_for(std::chrono::milliseconds(5)); }); + auto time = timed([]() { std::this_thread::sleep_for(std::chrono::milliseconds(5)); }); ASSERT_NE(time, 0); } @@ -85,9 +84,7 @@ TEST(TimedTest, NestedLambda) double timeNested; auto f = [&]() { std::this_thread::sleep_for(std::chrono::milliseconds(5)); - timeNested = timed([]() { - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - }); + timeNested = timed([]() { std::this_thread::sleep_for(std::chrono::milliseconds(5)); }); return 8; }; auto [ret, time] = timed(std::move(f)); diff --git a/unittests/SubscriptionManagerTest.cpp b/unittests/SubscriptionManagerTest.cpp index b65c24a1..b1807332 100644 --- a/unittests/SubscriptionManagerTest.cpp +++ b/unittests/SubscriptionManagerTest.cpp @@ -39,12 +39,9 @@ constexpr static auto CURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000"; constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; constexpr static auto ACCOUNT1 = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto LEDGERHASH2 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; -constexpr static auto TXNID = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH2 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto TXNID = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; /* * test subscription factory method and report function @@ -64,29 +61,23 @@ TEST(SubscriptionManagerTest, InitAndReport) })"; clio::Config cfg; auto backend = std::make_shared(cfg); - auto subManager = - SubscriptionManager::make_SubscriptionManager(cfg, backend); + auto subManager = SubscriptionManager::make_SubscriptionManager(cfg, backend); EXPECT_EQ(subManager->report(), json::parse(ReportReturn)); } void -CheckSubscriberMessage( - std::string out, - std::shared_ptr session, - int retry = 10) +CheckSubscriberMessage(std::string out, std::shared_ptr session, int retry = 10) { auto sessionPtr = static_cast(session.get()); while (retry-- != 0) { std::this_thread::sleep_for(20ms); - if ((!sessionPtr->message.empty()) && - json::parse(sessionPtr->message) == json::parse(out)) + if ((!sessionPtr->message.empty()) && json::parse(sessionPtr->message) == json::parse(out)) { return; } } - EXPECT_TRUE(false) << "Could not wait the subscriber message, expect:" - << out << " Get:" << sessionPtr->message; + EXPECT_TRUE(false) << "Could not wait the subscriber message, expect:" << out << " Get:" << sessionPtr->message; } // Fixture contains test target and mock backend @@ -101,8 +92,7 @@ protected: SetUp() override { MockBackendTest::SetUp(); - subManagerPtr = - SubscriptionManager::make_SubscriptionManager(cfg, mockBackendPtr); + subManagerPtr = SubscriptionManager::make_SubscriptionManager(cfg, mockBackendPtr); session = std::make_shared(tagDecoratorFactory); } void @@ -129,10 +119,8 @@ TEST_F(SubscriptionManagerSimpleBackendTest, ReportCurrentSubscriber) "books":2, "book_changes":2 })"; - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); subManagerPtr->subBookChanges(session1); subManagerPtr->subBookChanges(session2); subManagerPtr->subManifest(session1); @@ -182,23 +170,19 @@ TEST_F(SubscriptionManagerSimpleBackendTest, ReportCurrentSubscriber) TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerLedgerUnSub) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max boost::asio::io_context ctx; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); // mock fetchLedgerBySequence return this ledger - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // mock doFetchLedgerObject return fee setting ledger object auto feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0); ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(feeBlob)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); - boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { - subManagerPtr->subLedger(yield, session); - }); + boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { subManagerPtr->subLedger(yield, session); }); ctx.run(); std::this_thread::sleep_for(20ms); auto report = subManagerPtr->report(); @@ -239,9 +223,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerValidation) * We don't need the valid transaction in this test, subscription manager just * forward the message to subscriber */ -TEST_F( - SubscriptionManagerSimpleBackendTest, - SubscriptionManagerProposedTransaction) +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerProposedTransaction) { subManagerPtr->subProposedTransactions(session); constexpr static auto dummyTransaction = R"({ @@ -250,8 +232,7 @@ TEST_F( "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" } })"; - subManagerPtr->forwardProposedTransaction( - json::parse(dummyTransaction).get_object()); + subManagerPtr->forwardProposedTransaction(json::parse(dummyTransaction).get_object()); CheckSubscriberMessage(dummyTransaction, session); } @@ -262,15 +243,12 @@ TEST_F( * but only forward a transaction with one of them * check the correct session is called */ -TEST_F( - SubscriptionManagerSimpleBackendTest, - SubscriptionManagerAccountProposedTransaction) +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerAccountProposedTransaction) { auto account = GetAccountIDWithString(ACCOUNT1); subManagerPtr->subProposedAccount(account, session); - std::shared_ptr sessionIdle = - std::make_shared(tagDecoratorFactory); + std::shared_ptr sessionIdle = std::make_shared(tagDecoratorFactory); auto accountIdle = GetAccountIDWithString(ACCOUNT2); subManagerPtr->subProposedAccount(accountIdle, sessionIdle); @@ -280,8 +258,7 @@ TEST_F( "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" } })"; - subManagerPtr->forwardProposedTransaction( - json::parse(dummyTransaction).get_object()); + subManagerPtr->forwardProposedTransaction(json::parse(dummyTransaction).get_object()); CheckSubscriberMessage(dummyTransaction, session); auto rawIdle = (MockSession*)(sessionIdle.get()); EXPECT_EQ("", rawIdle->message); @@ -294,15 +271,13 @@ TEST_F( */ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerLedger) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max boost::asio::io_context ctx; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); // mock fetchLedgerBySequence return this ledger - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // mock doFetchLedgerObject return fee setting ledger object auto feeBlob = CreateFeeSettingBlob(1, 2, 3, 4, 0); @@ -361,12 +336,10 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerBookChange) auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 32); auto transactions = std::vector{}; auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = - CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + ripple::STObject obj = CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; - ripple::STObject metaObj = - CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 1, 3, 3, 1); + ripple::STObject metaObj = CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 1, 3, 3, 1); trans1.metadata = metaObj.getSerializer().peekData(); transactions.push_back(trans1); subManagerPtr->pubBookChanges(ledgerinfo, transactions); @@ -401,8 +374,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransaction) auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = - CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + ripple::STObject obj = CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; // create an empty meta object @@ -448,16 +420,13 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransaction) * check owner_funds * mock backend return a trustline */ -TEST_F( - SubscriptionManagerSimpleBackendTest, - SubscriptionManagerTransactionOfferCreation) +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransactionOfferCreation) { subManagerPtr->subTransactions(session); auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = CreateCreateOfferTransactionObject( - ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + ripple::STObject obj = CreateCreateOfferTransactionObject(ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; ripple::STArray metaArray{0}; @@ -476,11 +445,9 @@ TEST_F( line.setFieldU32(ripple::sfFlags, 0); auto issue2 = GetIssue(CURRENCY, ISSUER); line.setFieldAmount(ripple::sfBalance, ripple::STAmount(issue2, 100)); - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(line.getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(line.getSerializer().peekData())); subManagerPtr->pubTransaction(trans1, ledgerinfo); constexpr static auto TransactionForOwnerFund = R"({ "transaction":{ @@ -553,16 +520,13 @@ constexpr static auto TransactionForOwnerFundFrozen = R"({ * check owner_funds when line is frozen * mock backend return a trustline */ -TEST_F( - SubscriptionManagerSimpleBackendTest, - SubscriptionManagerTransactionOfferCreationFrozenLine) +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransactionOfferCreationFrozenLine) { subManagerPtr->subTransactions(session); auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = CreateCreateOfferTransactionObject( - ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + ripple::STObject obj = CreateCreateOfferTransactionObject(ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; ripple::STArray metaArray{0}; @@ -579,13 +543,10 @@ TEST_F( line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{TXNID}); line.setFieldU32(ripple::sfPreviousTxnLgrSeq, 3); line.setFieldU32(ripple::sfFlags, ripple::lsfHighFreeze); - line.setFieldAmount( - ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + line.setFieldAmount(ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(line.getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(line.getSerializer().peekData())); subManagerPtr->pubTransaction(trans1, ledgerinfo); CheckSubscriberMessage(TransactionForOwnerFundFrozen, session); } @@ -595,16 +556,13 @@ TEST_F( * check owner_funds when issue global frozen * mock backend return a frozen account setting */ -TEST_F( - SubscriptionManagerSimpleBackendTest, - SubscriptionManagerTransactionOfferCreationGlobalFrozen) +TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerTransactionOfferCreationGlobalFrozen) { subManagerPtr->subTransactions(session); auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = CreateCreateOfferTransactionObject( - ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); + ripple::STObject obj = CreateCreateOfferTransactionObject(ACCOUNT1, 1, 32, CURRENCY, ISSUER, 1, 3); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; ripple::STArray metaArray{0}; @@ -622,17 +580,13 @@ TEST_F( line.setFieldU32(ripple::sfPreviousTxnLgrSeq, 3); line.setFieldU32(ripple::sfFlags, ripple::lsfHighFreeze); auto issueAccount = GetAccountIDWithString(ISSUER); - line.setFieldAmount( - ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + line.setFieldAmount(ripple::sfBalance, ripple::STAmount(GetIssue(CURRENCY, ISSUER), 100)); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto kk = ripple::keylet::account(issueAccount).key; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(testing::_, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(testing::_, testing::_, testing::_)) .WillByDefault(Return(line.getSerializer().peekData())); - ripple::STObject accountRoot = CreateAccountRootObject( - ISSUER, ripple::lsfGlobalFreeze, 1, 10, 2, TXNID, 3); + ripple::STObject accountRoot = CreateAccountRootObject(ISSUER, ripple::lsfGlobalFreeze, 1, 10, 2, TXNID, 3); ON_CALL(*rawBackendPtr, doFetchLedgerObject(kk, testing::_, testing::_)) .WillByDefault(Return(accountRoot.getSerializer().peekData())); subManagerPtr->pubTransaction(trans1, ledgerinfo); @@ -648,8 +602,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerAccount) subManagerPtr->subAccount(account, session); auto ledgerinfo = CreateLedgerInfo(LEDGERHASH2, 33); - ripple::STObject obj = - CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); + ripple::STObject obj = CreatePaymentTransactionObject(ACCOUNT1, ACCOUNT2, 1, 1, 32); auto trans1 = TransactionAndMetadata(); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; @@ -723,8 +676,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerOrderBook) trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = 32; - auto metaObj = - CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 3, 1, 1, 3); + auto metaObj = CreateMetaDataForBookChange(CURRENCY, ISSUER, 22, 3, 1, 1, 3); trans1.metadata = metaObj.getSerializer().peekData(); subManagerPtr->pubTransaction(trans1, ledgerinfo); @@ -780,8 +732,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerOrderBook) CheckSubscriberMessage(OrderbookPublish, session); // trigger by offer cancel meta data - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); subManagerPtr->subBook(book, session1); metaObj = CreateMetaDataForCancelOffer(CURRENCY, ISSUER, 22, 3, 1); trans1.metadata = metaObj.getSerializer().peekData(); @@ -870,8 +821,7 @@ TEST_F(SubscriptionManagerSimpleBackendTest, SubscriptionManagerOrderBook) "engine_result":"tesSUCCESS", "engine_result_message":"The transaction was applied. Only final in a validated ledger." })"; - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); subManagerPtr->subBook(book, session2); metaObj = CreateMetaDataForCreateOffer(CURRENCY, ISSUER, 22, 3, 1); trans1.metadata = metaObj.getSerializer().peekData(); diff --git a/unittests/SubscriptionTest.cpp b/unittests/SubscriptionTest.cpp index 5f3f3a1f..b96df835 100644 --- a/unittests/SubscriptionTest.cpp +++ b/unittests/SubscriptionTest.cpp @@ -49,10 +49,8 @@ class SubscriptionMapTest : public SubscriptionTest TEST_F(SubscriptionTest, SubscriptionCount) { Subscription sub(ctx); - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); sub.subscribe(session1); sub.subscribe(session2); ctx.run(); @@ -81,10 +79,8 @@ TEST_F(SubscriptionTest, SubscriptionCount) TEST_F(SubscriptionTest, SubscriptionPublish) { Subscription sub(ctx); - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); sub.subscribe(session1); sub.subscribe(session2); ctx.run(); @@ -127,12 +123,9 @@ TEST_F(SubscriptionTest, SubscriptionDeadRemoveSubscriber) TEST_F(SubscriptionMapTest, SubscriptionMapCount) { - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session3 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session3 = std::make_shared(tagDecoratorFactory); SubscriptionMap subMap(ctx); subMap.subscribe(session1, "topic1"); subMap.subscribe(session2, "topic1"); @@ -162,10 +155,8 @@ TEST_F(SubscriptionMapTest, SubscriptionMapCount) TEST_F(SubscriptionMapTest, SubscriptionMapPublish) { - std::shared_ptr session1 = - std::make_shared(tagDecoratorFactory); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session1 = std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); SubscriptionMap subMap(ctx); const std::string topic1 = "topic1"; const std::string topic2 = "topic2"; @@ -176,9 +167,8 @@ TEST_F(SubscriptionMapTest, SubscriptionMapPublish) ctx.run(); EXPECT_EQ(subMap.count(), 2); auto message1 = std::make_shared(topic1Message.data()); - subMap.publish(message1, topic1); // lvalue - subMap.publish( - std::make_shared(topic2Message.data()), topic2); // rvalue + subMap.publish(message1, topic1); // lvalue + subMap.publish(std::make_shared(topic2Message.data()), topic2); // rvalue ctx.restart(); ctx.run(); MockSession* p1 = (MockSession*)(session1.get()); @@ -190,8 +180,7 @@ TEST_F(SubscriptionMapTest, SubscriptionMapPublish) TEST_F(SubscriptionMapTest, SubscriptionMapDeadRemoveSubscriber) { std::shared_ptr session1(new MockDeadSession(tagDecoratorFactory)); - std::shared_ptr session2 = - std::make_shared(tagDecoratorFactory); + std::shared_ptr session2 = std::make_shared(tagDecoratorFactory); SubscriptionMap subMap(ctx); const std::string topic1 = "topic1"; const std::string topic2 = "topic2"; @@ -202,9 +191,8 @@ TEST_F(SubscriptionMapTest, SubscriptionMapDeadRemoveSubscriber) ctx.run(); EXPECT_EQ(subMap.count(), 2); auto message1 = std::make_shared(topic1Message.data()); - subMap.publish(message1, topic1); // lvalue - subMap.publish( - std::make_shared(topic2Message.data()), topic2); // rvalue + subMap.publish(message1, topic1); // lvalue + subMap.publish(std::make_shared(topic2Message.data()), topic2); // rvalue ctx.restart(); ctx.run(); MockDeadSession* p1 = (MockDeadSession*)(session1.get()); diff --git a/unittests/backend/cassandra/AsyncExecutorTests.cpp b/unittests/backend/cassandra/AsyncExecutorTests.cpp index 2191318f..f363fb76 100644 --- a/unittests/backend/cassandra/AsyncExecutorTests.cpp +++ b/unittests/backend/cassandra/AsyncExecutorTests.cpp @@ -38,83 +38,60 @@ TEST_F(BackendCassandraAsyncExecutorTest, CompletionCalledOnSuccess) auto statement = FakeStatement{}; auto handle = MockHandle{}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([this](auto const&, auto&& cb) { ctx.post([cb = std::move(cb)]() { cb({}); }); return FakeFutureWithCallback{}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - AsyncExecutor::run( - ctx, handle, statement, [&called, &work](auto&&) { - called = true; - work.reset(); - }); + AsyncExecutor::run(ctx, handle, statement, [&called, &work](auto&&) { + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraAsyncExecutorTest, - ExecutedMultipleTimesByRetryPolicyOnMainThread) +TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnMainThread) { auto callCount = std::atomic_int{0}; auto statement = FakeStatement{}; auto handle = MockHandle{}; // emulate successfull execution after some attempts - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([&callCount](auto const&, auto&& cb) { ++callCount; if (callCount >= 3) cb({}); else - cb({CassandraError{ - "timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}); + cb({CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}); return FakeFutureWithCallback{}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(3); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - AsyncExecutor::run( - ctx, handle, statement, [&called, &work](auto&&) { - called = true; - work.reset(); - }); + AsyncExecutor::run(ctx, handle, statement, [&called, &work](auto&&) { + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(callCount >= 3); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraAsyncExecutorTest, - ExecutedMultipleTimesByRetryPolicyOnOtherThread) +TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnOtherThread) { auto callCount = std::atomic_int{0}; auto statement = FakeStatement{}; @@ -125,37 +102,27 @@ TEST_F( auto thread = std::thread{[&threadedCtx] { threadedCtx.run(); }}; // emulate successfull execution after some attempts - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([&callCount](auto const&, auto&& cb) { ++callCount; if (callCount >= 3) cb({}); else - cb({CassandraError{ - "timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}); + cb({CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}); return FakeFutureWithCallback{}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(3); auto called = std::atomic_bool{false}; auto work2 = std::optional{ctx}; - AsyncExecutor::run( - threadedCtx, handle, statement, [&called, &work, &work2](auto&&) { - called = true; - work.reset(); - work2.reset(); - }); + AsyncExecutor::run(threadedCtx, handle, statement, [&called, &work, &work2](auto&&) { + called = true; + work.reset(); + work2.reset(); + }); ctx.run(); ASSERT_TRUE(callCount >= 3); @@ -164,30 +131,19 @@ TEST_F( thread.join(); } -TEST_F( - BackendCassandraAsyncExecutorTest, - CompletionCalledOnFailureAfterRetryCountExceeded) +TEST_F(BackendCassandraAsyncExecutorTest, CompletionCalledOnFailureAfterRetryCountExceeded) { auto statement = FakeStatement{}; auto handle = MockHandle{}; // FakeRetryPolicy returns false for shouldRetry in which case we should // still call onComplete giving it whatever error we have raised internally. - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([](auto const&, auto&& cb) { - cb({CassandraError{ - "not a timeout", CASS_ERROR_LIB_INTERNAL_ERROR}}); + cb({CassandraError{"not a timeout", CASS_ERROR_LIB_INTERNAL_ERROR}}); return FakeFutureWithCallback{}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; diff --git a/unittests/backend/cassandra/BackendTests.cpp b/unittests/backend/cassandra/BackendTests.cpp index 8e1225e6..c775a3a7 100644 --- a/unittests/backend/cassandra/BackendTests.cpp +++ b/unittests/backend/cassandra/BackendTests.cpp @@ -80,988 +80,813 @@ TEST_F(BackendCassandraTest, Basic) std::optional work; work.emplace(ctx); - boost::asio::spawn( - ctx, [this, &done, &work](boost::asio::yield_context yield) { - std::string rawHeader = - "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335BC54" - "351E" - "DD733898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" - "315A" - "6DB6FE30CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED" - "3CF5" - "3E2232B33EF57CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE28265" - "6A58" - "CE5AA29652EFFD80AC59CD91416E4E13DBBE"; + boost::asio::spawn(ctx, [this, &done, &work](boost::asio::yield_context yield) { + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335BC54" + "351E" + "DD733898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A" + "6DB6FE30CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED" + "3CF5" + "3E2232B33EF57CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE28265" + "6A58" + "CE5AA29652EFFD80AC59CD91416E4E13DBBE"; - auto hexStringToBinaryString = [](auto const& hex) { - auto blob = ripple::strUnHex(hex); - std::string strBlob; - for (auto c : *blob) - { - strBlob += c; - } - return strBlob; - }; - [[maybe_unused]] auto binaryStringToUint256 = - [](auto const& bin) -> ripple::uint256 { - ripple::uint256 uint; - return uint.fromVoid((void const*)bin.data()); - }; - [[maybe_unused]] auto ledgerInfoToBinaryString = - [](auto const& info) { - auto blob = RPC::ledgerInfoToBlob(info, true); - std::string strBlob; - for (auto c : blob) - { - strBlob += c; - } - return strBlob; - }; - - std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(rawHeaderBlob)); - - backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) { - auto rng = backend->fetchLedgerRange(); - ASSERT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, rng->maxSequence); - EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + strBlob += c; } + return strBlob; + }; + [[maybe_unused]] auto binaryStringToUint256 = [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + [[maybe_unused]] auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) { - auto seq = backend->fetchLatestLedgerSequence(yield); - ASSERT_TRUE(seq.has_value()); - EXPECT_EQ(*seq, lgrInfo.seq); - } - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfo.seq, yield); - ASSERT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfo.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(lgrInfo), - RPC::ledgerInfoToBlob(*retLgr)); + strBlob += c; } + return strBlob; + }; - EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) - .has_value()); - auto lgrInfoOld = lgrInfo; + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawHeaderBlob)); - auto lgrInfoNext = lgrInfo; - lgrInfoNext.seq = lgrInfo.seq + 1; - lgrInfoNext.parentHash = lgrInfo.hash; + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfo.seq, uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + { + auto rng = backend->fetchLedgerRange(); + ASSERT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + ASSERT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(lgrInfo), RPC::ledgerInfoToBlob(*retLgr)); + } + + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield).has_value()); + auto lgrInfoOld = lgrInfo; + + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext); + + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield).has_value()); + + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } + + // the below dummy data is not expected to be consistent. The + // metadata string does represent valid metadata. Don't assume + // though that the transaction or its hash correspond to the + // metadata, or anything like that. These tests are purely + // binary tests to make sure the same data that goes in, comes + // back out + std::string metaHex = + "201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A29B46" + "3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A863300" + "000000000000003400000000000000005529983CBAED30F547471452921C3C" + "6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7F830" + "F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217CB65" + "D54B640B31521B05000000000000000000000000434E5900000000000360E3" + "E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF91263417762570" + "D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B616DCB" + "6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC09600" + "F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF783681031100" + "0000000000000000000000434E59000000000004110360E3E0751BD9A566CD" + "03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F80B61" + "6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B6686" + "0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED5B" + "66860B95011100000000000000000000000000000000000000000211000000" + "00000000000000000000000000000000000311000000000000000000000000" + "434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82BA0E1" + "E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9D24B" + "D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B616DCB" + "6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A6291" + "9C2D18000000000000000000000000434E5900000000000360E3E0751BD9A5" + "66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220CCB3" + "3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588" + "C2D55C84718FAFBBD2604AE722000000003100000000000000003200000000" + "0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C8471" + "8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1E1E5" + "1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E1E90" + "CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0576A" + "CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E72200" + "000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF9126" + "3417762570D67220CCB33B1370E1E1F1031000"; + std::string txnHex = + "1200072200000000240480FDB920190480FDB5201B03CE1A8964400000033C" + "83A95F65D59D9A62919C2D18000000000000000000000000434E5900000000" + "000360E3E0751BD9A566CD03FA6CAFC78118B82BA068400000000000000C73" + "21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91D685" + "E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E9911" + "FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC60A0" + "9EED153BF60C992930D68F09F981142252F328CF91263417762570D67220CC" + "B33B1370"; + std::string hashHex = + "0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22D28B" + "F7"; + + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD018E" + "FFBE" + "17C5C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "0781" + "142252F328CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C004F" + "06"; + + // An NFTokenMint tx + std::string nftTxnHex = + "1200192200000008240011CC9B201B001F71D6202A0000000168400000" + "000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E" + "4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F" + "C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA" + "34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F" + "677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16" + "DE3538F248662FC73C"; + + std::string nftTxnMeta = + "201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E" + "07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000" + "0006751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1" + "C200000028751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "9808B6B90000001D751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CA048C0A300000007751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F" + "2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21" + "D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470" + "733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203" + "F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868" + "747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800" + "006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A" + "751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A" + "000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00" + "00001F751868747470733A2F2F677265677765697362726F642E636F6D" + "E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4" + "5DAE00000014751868747470733A2F2F677265677765697362726F642E" + "636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7" + "3CCE1462A500000009751868747470733A2F2F67726567776569736272" + "6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248" + "662FC73CD89A24C70000002B751868747470733A2F2F67726567776569" + "7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35" + "38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567" + "7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000" + "0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1" + "E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C" + "662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB" + "16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67" + "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" + "E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A" + "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" + "21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474" + "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" + "03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518" + "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" + "00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000" + "07751868747470733A2F2F677265677765697362726F642E636F6DE1EC" + "5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5" + "00000029751868747470733A2F2F677265677765697362726F642E636F" + "6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE" + "EE87B80000001E751868747470733A2F2F677265677765697362726F64" + "2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F" + "C73CB30E8CAF00000013751868747470733A2F2F677265677765697362" + "726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2" + "48662FC73CB72E91A200000008751868747470733A2F2F677265677765" + "697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE" + "3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265" + "677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022" + "CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F" + "677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5" + "D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073" + "3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4" + "9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874" + "7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000" + "6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75" + "1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00" + "0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000" + "0020751868747470733A2F2F677265677765697362726F642E636F6DE1" + "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E" + "B100000015751868747470733A2F2F677265677765697362726F642E63" + "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" + "E4FA33A40000000A751868747470733A2F2F677265677765697362726F" + "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" + "2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973" + "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" + "F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777" + "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" + "DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772" + "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" + "22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F" + "2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71" + "B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435" + "40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94" + "883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3" + "ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000" + "066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866" + "2FC73CE1E1F1031000"; + std::string nftTxnHashHex = + "6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757" + "E9284E"; + ripple::uint256 nftID; + EXPECT_TRUE( + nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662" + "FC73CEF7FF5C60000002C")); + + std::string metaBlob = hexStringToBinaryString(metaHex); + std::string txnBlob = hexStringToBinaryString(txnHex); + std::string hashBlob = hexStringToBinaryString(hashHex); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = hexStringToBinaryString(accountIndexHex); + std::vector affectedAccounts; + + std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex); + std::string nftTxnMetaBlob = hexStringToBinaryString(nftTxnMeta); + + { + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; lgrInfoNext.hash++; - lgrInfoNext.accountHash = ~lgrInfo.accountHash; - { - std::string rawHeaderBlob = - ledgerInfoToBinaryString(lgrInfoNext); - backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + ripple::uint256 hash256; + EXPECT_TRUE(hash256.parseHex(hashHex)); + ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob}; + auto journal = ripple::debugLog(); + auto accountsSet = txMeta.getAffectedAccounts(); + for (auto& a : accountsSet) + { + affectedAccounts.push_back(a); } + std::vector accountTxData; + accountTxData.emplace_back(txMeta, hash256, journal); + + ripple::uint256 nftHash256; + EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex)); + ripple::TxMeta nftTxMeta{nftHash256, lgrInfoNext.seq, nftTxnMetaBlob}; + ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()}; + ripple::STTx sttx{it}; + auto const [parsedNFTTxsRef, parsedNFT] = getNFTDataFromTx(nftTxMeta, sttx); + // need to copy the nft txns so we can std::move later + std::vector parsedNFTTxs; + parsedNFTTxs.insert(parsedNFTTxs.end(), parsedNFTTxsRef.begin(), parsedNFTTxsRef.end()); + EXPECT_EQ(parsedNFTTxs.size(), 1); + EXPECT_TRUE(parsedNFT.has_value()); + EXPECT_EQ(parsedNFT->tokenID, nftID); + std::vector nftData; + nftData.push_back(*parsedNFT); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeTransaction( + std::string{hashBlob}, + lgrInfoNext.seq, + lgrInfoNext.closeTime.time_since_epoch().count(), + std::string{txnBlob}, + std::string{metaBlob}); + backend->writeAccountTransactions(std::move(accountTxData)); + backend->writeNFTs(std::move(nftData)); + backend->writeNFTTransactions(std::move(parsedNFTTxs)); + + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfoNext.seq, std::string{accountIndexBlob}); + backend->writeSuccessor(std::string{accountIndexBlob}, lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + ASSERT_EQ(txns.size(), 1); + EXPECT_STREQ((const char*)txns[0].transaction.data(), (const char*)txnBlob.data()); + EXPECT_STREQ((const char*)txns[0].metadata.data(), (const char*)metaBlob.data()); + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 1); + EXPECT_EQ(ripple::strHex(hashes[0]), hashHex); + for (auto& a : affectedAccounts) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_EQ(seq, lgrInfoNext.seq); - } - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); - EXPECT_FALSE( - backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) - .has_value()); - - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 0); - } - - // the below dummy data is not expected to be consistent. The - // metadata string does represent valid metadata. Don't assume - // though that the transaction or its hash correspond to the - // metadata, or anything like that. These tests are purely - // binary tests to make sure the same data that goes in, comes - // back out - std::string metaHex = - "201C0000001AF8E411006F560A3E08122A05AC91DEFA87052B0554E4A29B46" - "3A27642EBB060B6052196592EEE72200000000240480FDB52503CE1A863300" - "000000000000003400000000000000005529983CBAED30F547471452921C3C" - "6B9F9685F292F6291000EED0A44413AF18C250101AC09600F4B502C8F7F830" - "F80B616DCB6F3970CB79AB70975A05ED5B66860B9564400000001FE217CB65" - "D54B640B31521B05000000000000000000000000434E5900000000000360E3" - "E0751BD9A566CD03FA6CAFC78118B82BA081142252F328CF91263417762570" - "D67220CCB33B1370E1E1E3110064561AC09600F4B502C8F7F830F80B616DCB" - "6F3970CB79AB70975A05ED33DF783681E8365A05ED33DF783681581AC09600" - "F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED33DF783681031100" - "0000000000000000000000434E59000000000004110360E3E0751BD9A566CD" - "03FA6CAFC78118B82BA0E1E1E4110064561AC09600F4B502C8F7F830F80B61" - "6DCB6F3970CB79AB70975A05ED5B66860B95E72200000000365A05ED5B6686" - "0B95581AC09600F4B502C8F7F830F80B616DCB6F3970CB79AB70975A05ED5B" - "66860B95011100000000000000000000000000000000000000000211000000" - "00000000000000000000000000000000000311000000000000000000000000" - "434E59000000000004110360E3E0751BD9A566CD03FA6CAFC78118B82BA0E1" - "E1E311006F5647B05E66DE9F3DF2689E8F4CE6126D3136B6C5E79587F9D24B" - "D71A952B0852BAE8240480FDB950101AC09600F4B502C8F7F830F80B616DCB" - "6F3970CB79AB70975A05ED33DF78368164400000033C83A95F65D59D9A6291" - "9C2D18000000000000000000000000434E5900000000000360E3E0751BD9A5" - "66CD03FA6CAFC78118B82BA081142252F328CF91263417762570D67220CCB3" - "3B1370E1E1E511006456AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588" - "C2D55C84718FAFBBD2604AE722000000003100000000000000003200000000" - "0000000058AEA3074F10FE15DAC592F8A0405C61FB7D4C98F588C2D55C8471" - "8FAFBBD2604A82142252F328CF91263417762570D67220CCB33B1370E1E1E5" - "1100612503CE1A8755CE935137F8C6C8DEF26B5CD93BE18105CA83F65E1E90" - "CEC546F562D25957DC0856E0311EB450B6177F969B94DBDDA83E99B7A0576A" - "CD9079573876F16C0C004F06E6240480FDB9624000000005FF0E2BE1E72200" - "000000240480FDBA2D00000005624000000005FF0E1F81142252F328CF9126" - "3417762570D67220CCB33B1370E1E1F1031000"; - std::string txnHex = - "1200072200000000240480FDB920190480FDB5201B03CE1A8964400000033C" - "83A95F65D59D9A62919C2D18000000000000000000000000434E5900000000" - "000360E3E0751BD9A566CD03FA6CAFC78118B82BA068400000000000000C73" - "21022D40673B44C82DEE1DDB8B9BB53DCCE4F97B27404DB850F068DD91D685" - "E337EA7446304402202EA6B702B48B39F2197112382838F92D4C02948E9911" - "FE6B2DEBCF9183A426BC022005DAC06CD4517E86C2548A80996019F3AC60A0" - "9EED153BF60C992930D68F09F981142252F328CF91263417762570D67220CC" - "B33B1370"; - std::string hashHex = - "0A81FB3D6324C2DCF73131505C6E4DC67981D7FC39F5E9574CEC4B1F22D28B" - "F7"; - - // this account is not related to the above transaction and - // metadata - std::string accountHex = - "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD018E" - "FFBE" - "17C5C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" - "0781" - "142252F328CF91263417762570D67220CCB33B1370"; - std::string accountIndexHex = - "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C004F" - "06"; - - // An NFTokenMint tx - std::string nftTxnHex = - "1200192200000008240011CC9B201B001F71D6202A0000000168400000" - "000000000C7321ED475D1452031E8F9641AF1631519A58F7B8681E172E" - "4838AA0E59408ADA1727DD74406960041F34F10E0CBB39444B4D4E577F" - "C0B7E8D843D091C2917E96E7EE0E08B30C91413EC551A2B8A1D405E8BA" - "34FE185D8B10C53B40928611F2DE3B746F0303751868747470733A2F2F" - "677265677765697362726F642E636F6D81146203F49C21D5D6E022CB16" - "DE3538F248662FC73C"; - - std::string nftTxnMeta = - "201C00000001F8E511005025001F71B3556ED9C9459001E4F4A9121F4E" - "07AB6D14898A5BBEF13D85C25D743540DB59F3CF566203F49C21D5D6E0" - "22CB16DE3538F248662FC73CFFFFFFFFFFFFFFFFFFFFFFFFE6FAEC5A00" - "0800006203F49C21D5D6E022CB16DE3538F248662FC73C8962EFA00000" - "0006751868747470733A2F2F677265677765697362726F642E636F6DE1" - "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C93E8B1" - "C200000028751868747470733A2F2F677265677765697362726F642E63" - "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" - "9808B6B90000001D751868747470733A2F2F677265677765697362726F" - "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" - "2FC73C9C28BBAC00000012751868747470733A2F2F6772656777656973" - "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" - "F248662FC73CA048C0A300000007751868747470733A2F2F6772656777" - "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" - "DE3538F248662FC73CAACE82C500000029751868747470733A2F2F6772" - "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" - "22CB16DE3538F248662FC73CAEEE87B80000001E751868747470733A2F" - "2F677265677765697362726F642E636F6DE1EC5A000800006203F49C21" - "D5D6E022CB16DE3538F248662FC73CB30E8CAF00000013751868747470" - "733A2F2F677265677765697362726F642E636F6DE1EC5A000800006203" - "F49C21D5D6E022CB16DE3538F248662FC73CB72E91A200000008751868" - "747470733A2F2F677265677765697362726F642E636F6DE1EC5A000800" - "006203F49C21D5D6E022CB16DE3538F248662FC73CC1B453C40000002A" - "751868747470733A2F2F677265677765697362726F642E636F6DE1EC5A" - "000800006203F49C21D5D6E022CB16DE3538F248662FC73CC5D458BB00" - "00001F751868747470733A2F2F677265677765697362726F642E636F6D" - "E1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CC9F4" - "5DAE00000014751868747470733A2F2F677265677765697362726F642E" - "636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC7" - "3CCE1462A500000009751868747470733A2F2F67726567776569736272" - "6F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248" - "662FC73CD89A24C70000002B751868747470733A2F2F67726567776569" - "7362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE35" - "38F248662FC73CDCBA29BA00000020751868747470733A2F2F67726567" - "7765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB" - "16DE3538F248662FC73CE0DA2EB100000015751868747470733A2F2F67" - "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" - "E022CB16DE3538F248662FC73CE4FA33A40000000A751868747470733A" - "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" - "21D5D6E022CB16DE3538F248662FC73CF39FFABD000000217518687474" - "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" - "03F49C21D5D6E022CB16DE3538F248662FC73CF7BFFFB0000000167518" - "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" - "00006203F49C21D5D6E022CB16DE3538F248662FC73CFBE004A7000000" - "0B751868747470733A2F2F677265677765697362726F642E636F6DE1F1" - "E1E72200000000501A6203F49C21D5D6E022CB16DE3538F248662FC73C" - "662FC73C8962EFA000000006FAEC5A000800006203F49C21D5D6E022CB" - "16DE3538F248662FC73C8962EFA000000006751868747470733A2F2F67" - "7265677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6" - "E022CB16DE3538F248662FC73C93E8B1C200000028751868747470733A" - "2F2F677265677765697362726F642E636F6DE1EC5A000800006203F49C" - "21D5D6E022CB16DE3538F248662FC73C9808B6B90000001D7518687474" - "70733A2F2F677265677765697362726F642E636F6DE1EC5A0008000062" - "03F49C21D5D6E022CB16DE3538F248662FC73C9C28BBAC000000127518" - "68747470733A2F2F677265677765697362726F642E636F6DE1EC5A0008" - "00006203F49C21D5D6E022CB16DE3538F248662FC73CA048C0A3000000" - "07751868747470733A2F2F677265677765697362726F642E636F6DE1EC" - "5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAACE82C5" - "00000029751868747470733A2F2F677265677765697362726F642E636F" - "6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CAE" - "EE87B80000001E751868747470733A2F2F677265677765697362726F64" - "2E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662F" - "C73CB30E8CAF00000013751868747470733A2F2F677265677765697362" - "726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F2" - "48662FC73CB72E91A200000008751868747470733A2F2F677265677765" - "697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE" - "3538F248662FC73CC1B453C40000002A751868747470733A2F2F677265" - "677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022" - "CB16DE3538F248662FC73CC5D458BB0000001F751868747470733A2F2F" - "677265677765697362726F642E636F6DE1EC5A000800006203F49C21D5" - "D6E022CB16DE3538F248662FC73CC9F45DAE0000001475186874747073" - "3A2F2F677265677765697362726F642E636F6DE1EC5A000800006203F4" - "9C21D5D6E022CB16DE3538F248662FC73CCE1462A50000000975186874" - "7470733A2F2F677265677765697362726F642E636F6DE1EC5A00080000" - "6203F49C21D5D6E022CB16DE3538F248662FC73CD89A24C70000002B75" - "1868747470733A2F2F677265677765697362726F642E636F6DE1EC5A00" - "0800006203F49C21D5D6E022CB16DE3538F248662FC73CDCBA29BA0000" - "0020751868747470733A2F2F677265677765697362726F642E636F6DE1" - "EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73CE0DA2E" - "B100000015751868747470733A2F2F677265677765697362726F642E63" - "6F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F248662FC73C" - "E4FA33A40000000A751868747470733A2F2F677265677765697362726F" - "642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538F24866" - "2FC73CEF7FF5C60000002C751868747470733A2F2F6772656777656973" - "62726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16DE3538" - "F248662FC73CF39FFABD00000021751868747470733A2F2F6772656777" - "65697362726F642E636F6DE1EC5A000800006203F49C21D5D6E022CB16" - "DE3538F248662FC73CF7BFFFB000000016751868747470733A2F2F6772" - "65677765697362726F642E636F6DE1EC5A000800006203F49C21D5D6E0" - "22CB16DE3538F248662FC73CFBE004A70000000B751868747470733A2F" - "2F677265677765697362726F642E636F6DE1F1E1E1E511006125001F71" - "B3556ED9C9459001E4F4A9121F4E07AB6D14898A5BBEF13D85C25D7435" - "40DB59F3CF56BE121B82D5812149D633F605EB07265A80B762A365CE94" - "883089FEEE4B955701E6240011CC9B202B0000002C6240000002540BE3" - "ECE1E72200000000240011CC9C2D0000000A202B0000002D202C000000" - "066240000002540BE3E081146203F49C21D5D6E022CB16DE3538F24866" - "2FC73CE1E1F1031000"; - std::string nftTxnHashHex = - "6C7F69A6D25A13AC4A2E9145999F45D4674F939900017A96885FDC2757" - "E9284E"; - ripple::uint256 nftID; - EXPECT_TRUE( - nftID.parseHex("000800006203F49C21D5D6E022CB16DE3538F248662" - "FC73CEF7FF5C60000002C")); - - std::string metaBlob = hexStringToBinaryString(metaHex); - std::string txnBlob = hexStringToBinaryString(txnHex); - std::string hashBlob = hexStringToBinaryString(hashHex); - std::string accountBlob = hexStringToBinaryString(accountHex); - std::string accountIndexBlob = - hexStringToBinaryString(accountIndexHex); - std::vector affectedAccounts; - - std::string nftTxnBlob = hexStringToBinaryString(nftTxnHex); - std::string nftTxnMetaBlob = hexStringToBinaryString(nftTxnMeta); - - { - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.txHash = ~lgrInfo.txHash; - lgrInfoNext.accountHash = - lgrInfoNext.accountHash ^ lgrInfoNext.txHash; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - - ripple::uint256 hash256; - EXPECT_TRUE(hash256.parseHex(hashHex)); - ripple::TxMeta txMeta{hash256, lgrInfoNext.seq, metaBlob}; - auto journal = ripple::debugLog(); - auto accountsSet = txMeta.getAffectedAccounts(); - for (auto& a : accountsSet) - { - affectedAccounts.push_back(a); - } - std::vector accountTxData; - accountTxData.emplace_back(txMeta, hash256, journal); - - ripple::uint256 nftHash256; - EXPECT_TRUE(nftHash256.parseHex(nftTxnHashHex)); - ripple::TxMeta nftTxMeta{ - nftHash256, lgrInfoNext.seq, nftTxnMetaBlob}; - ripple::SerialIter it{nftTxnBlob.data(), nftTxnBlob.size()}; - ripple::STTx sttx{it}; - auto const [parsedNFTTxsRef, parsedNFT] = - getNFTDataFromTx(nftTxMeta, sttx); - // need to copy the nft txns so we can std::move later - std::vector parsedNFTTxs; - parsedNFTTxs.insert( - parsedNFTTxs.end(), - parsedNFTTxsRef.begin(), - parsedNFTTxsRef.end()); - EXPECT_EQ(parsedNFTTxs.size(), 1); - EXPECT_TRUE(parsedNFT.has_value()); - EXPECT_EQ(parsedNFT->tokenID, nftID); - std::vector nftData; - nftData.push_back(*parsedNFT); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeTransaction( - std::string{hashBlob}, - lgrInfoNext.seq, - lgrInfoNext.closeTime.time_since_epoch().count(), - std::string{txnBlob}, - std::string{metaBlob}); - backend->writeAccountTransactions(std::move(accountTxData)); - backend->writeNFTs(std::move(nftData)); - backend->writeNFTTransactions(std::move(parsedNFTTxs)); - - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - std::string{accountIndexBlob}); - backend->writeSuccessor( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - ASSERT_EQ(txns.size(), 1); - EXPECT_STREQ( - (const char*)txns[0].transaction.data(), - (const char*)txnBlob.data()); - EXPECT_STREQ( - (const char*)txns[0].metadata.data(), - (const char*)metaBlob.data()); - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 1); - EXPECT_EQ(ripple::strHex(hashes[0]), hashHex); - for (auto& a : affectedAccounts) - { - auto [txns, cursor] = backend->fetchAccountTransactions( - a, 100, true, {}, yield); - EXPECT_EQ(txns.size(), 1); - EXPECT_EQ(txns[0], txns[0]); - EXPECT_FALSE(cursor); - } - auto nft = backend->fetchNFT(nftID, lgrInfoNext.seq, yield); - EXPECT_TRUE(nft.has_value()); - auto [nftTxns, cursor] = - backend->fetchNFTTransactions(nftID, 100, true, {}, yield); - EXPECT_EQ(nftTxns.size(), 1); - EXPECT_EQ(nftTxns[0], nftTxns[0]); + auto [txns, cursor] = backend->fetchAccountTransactions(a, 100, true, {}, yield); + EXPECT_EQ(txns.size(), 1); + EXPECT_EQ(txns[0], txns[0]); EXPECT_FALSE(cursor); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); } + auto nft = backend->fetchNFT(nftID, lgrInfoNext.seq, yield); + EXPECT_TRUE(nft.has_value()); + auto [nftTxns, cursor] = backend->fetchNFTTransactions(nftID, 100, true, {}, yield); + EXPECT_EQ(nftTxns.size(), 1); + EXPECT_EQ(nftTxns[0], nftTxns[0]); + EXPECT_FALSE(cursor); - // obtain a time-based seed: - unsigned seed = - std::chrono::system_clock::now().time_since_epoch().count(); - std::string accountBlobOld = accountBlob; + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + // obtain a time-based seed: + unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle(accountBlob.begin(), accountBlob.end(), std::default_random_engine(seed)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, uint32_t ledgerSequence) { + std::vector> res{numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) { - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - std::shuffle( - accountBlob.begin(), - accountBlob.end(), - std::default_random_engine(seed)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); + obj = std::to_string(ledgerSequence) + obj; } + return objs; + }; + auto generateTxns = [](size_t numTxns, uint32_t ledgerSequence) { + std::vector> res{numTxns}; + ripple::uint256 base; + base = ledgerSequence * 100000; + for (auto& blob : res) { - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + ++base; + std::string hashStr{(const char*)base.data(), base.size()}; + std::string txnStr = "tx" + std::to_string(ledgerSequence) + hashStr; + std::string metaStr = "meta" + std::to_string(ledgerSequence) + hashStr; + blob = std::make_tuple(hashStr, txnStr, metaStr); } + return res; + }; + auto generateAccounts = [](uint32_t ledgerSequence, uint32_t numAccounts) { + std::vector accounts; + ripple::AccountID base; + base = ledgerSequence * 998765; + for (size_t i = 0; i < numAccounts; ++i) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 2, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); + ++base; + accounts.push_back(base); } + return accounts; + }; + auto generateAccountTx = [&](uint32_t ledgerSequence, auto txns) { + std::vector ret; + auto accounts = generateAccounts(ledgerSequence, 10); + std::srand(std::time(nullptr)); + uint32_t idx = 0; + for (auto& [hash, txn, meta] : txns) + { + AccountTransactionsData data; + data.ledgerSequence = ledgerSequence; + data.transactionIndex = idx; + data.txHash = hash; + for (size_t i = 0; i < 3; ++i) + { + data.accounts.insert(accounts[std::rand() % accounts.size()]); + } + ++idx; + ret.push_back(data); + } + return ret; + }; + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle(lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); + std::shuffle(lgrInfo.accountHash.begin(), lgrInfo.accountHash.end(), randomEngine); + std::shuffle(lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, auto txns, auto objs, auto accountTx, auto state) { + backend->startWrites(); - auto generateObjects = [](size_t numObjects, - uint32_t ledgerSequence) { - std::vector> res{ - numObjects}; - ripple::uint256 key; - key = ledgerSequence * 100000; - - for (auto& blob : res) + backend->writeLedger(lgrInfo, ledgerInfoToBinaryString(lgrInfo)); + for (auto [hash, txn, meta] : txns) + { + backend->writeTransaction( + std::move(hash), + lgrInfo.seq, + lgrInfo.closeTime.time_since_epoch().count(), + std::move(txn), + std::move(meta)); + } + for (auto [key, obj] : objs) + { + backend->writeLedgerObject(std::string{key}, lgrInfo.seq, std::string{obj}); + } + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if(state[lgrInfo.seq - 1].begin(), state[lgrInfo.seq - 1].end(), [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) + { + for (size_t i = 0; i < objs.size(); ++i) { - ++key; - std::string keyStr{(const char*)key.data(), key.size()}; - blob.first = keyStr; - blob.second = std::to_string(ledgerSequence) + keyStr; - } - return res; - }; - auto updateObjects = [](uint32_t ledgerSequence, auto objs) { - for (auto& [key, obj] : objs) - { - obj = std::to_string(ledgerSequence) + obj; - } - return objs; - }; - auto generateTxns = [](size_t numTxns, uint32_t ledgerSequence) { - std::vector> - res{numTxns}; - ripple::uint256 base; - base = ledgerSequence * 100000; - for (auto& blob : res) - { - ++base; - std::string hashStr{(const char*)base.data(), base.size()}; - std::string txnStr = - "tx" + std::to_string(ledgerSequence) + hashStr; - std::string metaStr = - "meta" + std::to_string(ledgerSequence) + hashStr; - blob = std::make_tuple(hashStr, txnStr, metaStr); - } - return res; - }; - auto generateAccounts = [](uint32_t ledgerSequence, - uint32_t numAccounts) { - std::vector accounts; - ripple::AccountID base; - base = ledgerSequence * 998765; - for (size_t i = 0; i < numAccounts; ++i) - { - ++base; - accounts.push_back(base); - } - return accounts; - }; - auto generateAccountTx = [&](uint32_t ledgerSequence, auto txns) { - std::vector ret; - auto accounts = generateAccounts(ledgerSequence, 10); - std::srand(std::time(nullptr)); - uint32_t idx = 0; - for (auto& [hash, txn, meta] : txns) - { - AccountTransactionsData data; - data.ledgerSequence = ledgerSequence; - data.transactionIndex = idx; - data.txHash = hash; - for (size_t i = 0; i < 3; ++i) - { - data.accounts.insert( - accounts[std::rand() % accounts.size()]); - } - ++idx; - ret.push_back(data); - } - return ret; - }; - auto generateNextLedger = [seed](auto lgrInfo) { - ++lgrInfo.seq; - lgrInfo.parentHash = lgrInfo.hash; - static auto randomEngine = std::default_random_engine(seed); - std::shuffle( - lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); - std::shuffle( - lgrInfo.accountHash.begin(), - lgrInfo.accountHash.end(), - randomEngine); - std::shuffle( - lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); - return lgrInfo; - }; - auto writeLedger = [&](auto lgrInfo, - auto txns, - auto objs, - auto accountTx, - auto state) { - backend->startWrites(); - - backend->writeLedger( - lgrInfo, ledgerInfoToBinaryString(lgrInfo)); - for (auto [hash, txn, meta] : txns) - { - backend->writeTransaction( - std::move(hash), - lgrInfo.seq, - lgrInfo.closeTime.time_since_epoch().count(), - std::move(txn), - std::move(meta)); - } - for (auto [key, obj] : objs) - { - backend->writeLedgerObject( - std::string{key}, lgrInfo.seq, std::string{obj}); - } - if (state.count(lgrInfo.seq - 1) == 0 || - std::find_if( - state[lgrInfo.seq - 1].begin(), - state[lgrInfo.seq - 1].end(), - [&](auto obj) { return obj.first == objs[0].first; }) == - state[lgrInfo.seq - 1].end()) - { - for (size_t i = 0; i < objs.size(); ++i) - { - if (i + 1 < objs.size()) - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - std::string{objs[i + 1].first}); - else - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - } - if (state.count(lgrInfo.seq - 1)) + if (i + 1 < objs.size()) backend->writeSuccessor( - std::string{state[lgrInfo.seq - 1].back().first}, - lgrInfo.seq, - std::string{objs[0].first}); + std::string{objs[i].first}, lgrInfo.seq, std::string{objs[i + 1].first}); else backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - std::string{objs[0].first}); + std::string{objs[i].first}, lgrInfo.seq, uint256ToString(Backend::lastKey)); } - - backend->writeAccountTransactions(std::move(accountTx)); - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - }; - - auto checkLedger = - [&](auto lgrInfo, auto txns, auto objs, auto accountTx) { - auto rng = backend->fetchLedgerRange(); - auto seq = lgrInfo.seq; - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_GE(rng->maxSequence, seq); - auto retLgr = backend->fetchLedgerBySequence(seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)); - auto retTxns = - backend->fetchAllTransactionsInLedger(seq, yield); - for (auto [hash, txn, meta] : txns) - { - bool found = false; - for (auto [retTxn, retMeta, retSeq, retDate] : retTxns) - { - if (std::strncmp( - (const char*)retTxn.data(), - (const char*)txn.data(), - txn.size()) == 0 && - std::strncmp( - (const char*)retMeta.data(), - (const char*)meta.data(), - meta.size()) == 0) - found = true; - } - ASSERT_TRUE(found); - } - for (auto [account, data] : accountTx) - { - std::vector retData; - std::optional cursor; - do - { - uint32_t limit = 10; - auto [txns, retCursor] = - backend->fetchAccountTransactions( - account, limit, false, cursor, yield); - if (retCursor) - EXPECT_EQ(txns.size(), limit); - retData.insert( - retData.end(), txns.begin(), txns.end()); - cursor = retCursor; - } while (cursor); - EXPECT_EQ(retData.size(), data.size()); - for (size_t i = 0; i < retData.size(); ++i) - { - auto [txn, meta, seq, date] = retData[i]; - auto [hash, expTxn, expMeta] = data[i]; - EXPECT_STREQ( - (const char*)txn.data(), - (const char*)expTxn.data()); - EXPECT_STREQ( - (const char*)meta.data(), - (const char*)expMeta.data()); - } - } - std::vector keys; - for (auto [key, obj] : objs) - { - auto retObj = backend->fetchLedgerObject( - binaryStringToUint256(key), seq, yield); - if (obj.size()) - { - ASSERT_TRUE(retObj.has_value()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj->data()); - } - else - { - ASSERT_FALSE(retObj.has_value()); - } - keys.push_back(binaryStringToUint256(key)); - } - - { - auto retObjs = - backend->fetchLedgerObjects(keys, seq, yield); - ASSERT_EQ(retObjs.size(), objs.size()); - - for (size_t i = 0; i < keys.size(); ++i) - { - auto [key, obj] = objs[i]; - auto retObj = retObjs[i]; - if (obj.size()) - { - ASSERT_TRUE(retObj.size()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj.data()); - } - else - { - ASSERT_FALSE(retObj.size()); - } - } - } - - Backend::LedgerPage page; - std::vector retObjs; - do - { - uint32_t limit = 10; - page = backend->fetchLedgerPage( - page.cursor, seq, limit, false, yield); - retObjs.insert( - retObjs.end(), - page.objects.begin(), - page.objects.end()); - } while (page.cursor); - - for (auto obj : objs) - { - bool found = false; - for (auto retObj : retObjs) - { - if (ripple::strHex(obj.first) == - ripple::strHex(retObj.key)) - { - found = true; - ASSERT_EQ( - ripple::strHex(obj.second), - ripple::strHex(retObj.blob)); - } - } - if (found != (obj.second.size() != 0)) - ASSERT_EQ(found, obj.second.size() != 0); - } - }; - - std::map>> - state; - std::map< - uint32_t, - std::vector>> - allTxns; - std::unordered_map> - allTxnsMap; - std::map< - uint32_t, - std::map>> - allAccountTx; - std::map lgrInfos; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - auto objs = generateObjects(25, lgrInfoNext.seq); - auto txns = generateTxns(10, lgrInfoNext.seq); - auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); - for (auto rec : accountTx) - { - for (auto account : rec.accounts) - { - allAccountTx[lgrInfoNext.seq][account].push_back( - std::string{ - (const char*)rec.txHash.data(), - rec.txHash.size()}); - } - } - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - EXPECT_EQ(txns.size(), 10); - EXPECT_NE(txns[0], txns[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, txns, objs, accountTx, state); - allTxns[lgrInfoNext.seq] = txns; - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - for (auto& [hash, txn, meta] : txns) - { - allTxnsMap[hash] = std::make_pair(txn, meta); - } - } - - std::vector> objs; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - if (!objs.size()) - objs = generateObjects(25, lgrInfoNext.seq); + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{state[lgrInfo.seq - 1].back().first}, lgrInfo.seq, std::string{objs[0].first}); else - objs = updateObjects(lgrInfoNext.seq, objs); - auto txns = generateTxns(10, lgrInfoNext.seq); - auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); - for (auto rec : accountTx) + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfo.seq, std::string{objs[0].first}); + } + + backend->writeAccountTransactions(std::move(accountTx)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; + + auto checkLedger = [&](auto lgrInfo, auto txns, auto objs, auto accountTx) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)); + auto retTxns = backend->fetchAllTransactionsInLedger(seq, yield); + for (auto [hash, txn, meta] : txns) + { + bool found = false; + for (auto [retTxn, retMeta, retSeq, retDate] : retTxns) { - for (auto account : rec.accounts) - { - allAccountTx[lgrInfoNext.seq][account].push_back( - std::string{ - (const char*)rec.txHash.data(), - rec.txHash.size()}); - } + if (std::strncmp((const char*)retTxn.data(), (const char*)txn.data(), txn.size()) == 0 && + std::strncmp((const char*)retMeta.data(), (const char*)meta.data(), meta.size()) == 0) + found = true; } - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - EXPECT_EQ(txns.size(), 10); - EXPECT_NE(txns[0], txns[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, txns, objs, accountTx, state); - allTxns[lgrInfoNext.seq] = txns; - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - for (auto& [hash, txn, meta] : txns) + ASSERT_TRUE(found); + } + for (auto [account, data] : accountTx) + { + std::vector retData; + std::optional cursor; + do { - allTxnsMap[hash] = std::make_pair(txn, meta); + uint32_t limit = 10; + auto [txns, retCursor] = backend->fetchAccountTransactions(account, limit, false, cursor, yield); + if (retCursor) + EXPECT_EQ(txns.size(), limit); + retData.insert(retData.end(), txns.begin(), txns.end()); + cursor = retCursor; + } while (cursor); + EXPECT_EQ(retData.size(), data.size()); + for (size_t i = 0; i < retData.size(); ++i) + { + auto [txn, meta, seq, date] = retData[i]; + auto [hash, expTxn, expMeta] = data[i]; + EXPECT_STREQ((const char*)txn.data(), (const char*)expTxn.data()); + EXPECT_STREQ((const char*)meta.data(), (const char*)expMeta.data()); + } + } + std::vector keys; + for (auto [key, obj] : objs) + { + auto retObj = backend->fetchLedgerObject(binaryStringToUint256(key), seq, yield); + if (obj.size()) + { + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data()); + } + else + { + ASSERT_FALSE(retObj.has_value()); + } + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) + { + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; + if (obj.size()) + { + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data()); + } + else + { + ASSERT_FALSE(retObj.size()); + } } } - auto flatten = [&](uint32_t max) { - std::vector> flat; - std::map objs; - for (auto [seq, diff] : state) + Backend::LedgerPage page; + std::vector retObjs; + do + { + uint32_t limit = 10; + page = backend->fetchLedgerPage(page.cursor, seq, limit, false, yield); + retObjs.insert(retObjs.end(), page.objects.begin(), page.objects.end()); + } while (page.cursor); + + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) { - for (auto [k, v] : diff) + if (ripple::strHex(obj.first) == ripple::strHex(retObj.key)) { - if (seq > max) - { - if (objs.count(k) == 0) - objs[k] = ""; - } - else - { - objs[k] = v; - } + found = true; + ASSERT_EQ(ripple::strHex(obj.second), ripple::strHex(retObj.blob)); } } - for (auto [key, value] : objs) - { - flat.push_back(std::make_pair(key, value)); - } - return flat; - }; + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; - auto flattenAccountTx = [&](uint32_t max) { - std::unordered_map< - ripple::AccountID, - std::vector< - std::tuple>> - accountTx; - for (auto [seq, map] : allAccountTx) + std::map>> state; + std::map>> allTxns; + std::unordered_map> allTxnsMap; + std::map>> allAccountTx; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) { - if (seq > max) - break; - for (auto& [account, hashes] : map) - { - for (auto& hash : hashes) - { - auto& [txn, meta] = allTxnsMap[hash]; - accountTx[account].push_back( - std::make_tuple(hash, txn, meta)); - } - } + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{(const char*)rec.txHash.data(), rec.txHash.size()}); } - for (auto& [account, data] : accountTx) - std::reverse(data.begin(), data.end()); - return accountTx; - }; + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); + } + } + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + auto txns = generateTxns(10, lgrInfoNext.seq); + auto accountTx = generateAccountTx(lgrInfoNext.seq, txns); + for (auto rec : accountTx) + { + for (auto account : rec.accounts) + { + allAccountTx[lgrInfoNext.seq][account].push_back( + std::string{(const char*)rec.txHash.data(), rec.txHash.size()}); + } + } + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + EXPECT_EQ(txns.size(), 10); + EXPECT_NE(txns[0], txns[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, txns, objs, accountTx, state); + allTxns[lgrInfoNext.seq] = txns; + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + for (auto& [hash, txn, meta] : txns) + { + allTxnsMap[hash] = std::make_pair(txn, meta); + } + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; for (auto [seq, diff] : state) { - auto flat = flatten(seq); - checkLedger( - lgrInfos[seq], allTxns[seq], flat, flattenAccountTx(seq)); + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; - done = true; - work.reset(); - }); + auto flattenAccountTx = [&](uint32_t max) { + std::unordered_map>> + accountTx; + for (auto [seq, map] : allAccountTx) + { + if (seq > max) + break; + for (auto& [account, hashes] : map) + { + for (auto& hash : hashes) + { + auto& [txn, meta] = allTxnsMap[hash]; + accountTx[account].push_back(std::make_tuple(hash, txn, meta)); + } + } + } + for (auto& [account, data] : accountTx) + std::reverse(data.begin(), data.end()); + return accountTx; + }; + + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger(lgrInfos[seq], allTxns[seq], flat, flattenAccountTx(seq)); + } + + done = true; + work.reset(); + }); ctx.run(); ASSERT_EQ(done, true); @@ -1073,569 +898,445 @@ TEST_F(BackendCassandraTest, CacheIntegration) std::optional work; work.emplace(ctx); - boost::asio::spawn( - ctx, [this, &done, &work](boost::asio::yield_context yield) { - backend->cache().setFull(); + boost::asio::spawn(ctx, [this, &done, &work](boost::asio::yield_context yield) { + backend->cache().setFull(); - std::string rawHeader = - "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335BC54" - "351E" - "DD733898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" - "315A" - "6DB6FE30CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED" - "3CF5" - "3E2232B33EF57CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE28265" - "6A58" - "CE5AA29652EFFD80AC59CD91416E4E13DBBE"; - // this account is not related to the above transaction and - // metadata - std::string accountHex = - "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD018E" - "FFBE" - "17C5C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" - "0781" - "142252F328CF91263417762570D67220CCB33B1370"; - std::string accountIndexHex = - "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C004F" - "06"; + std::string rawHeader = + "03C3141A01633CD656F91B4EBB5EB89B791BD34DBC8A04BB6F407C5335BC54" + "351E" + "DD733898497E809E04074D14D271E4832D7888754F9230800761563A292FA2" + "315A" + "6DB6FE30CC5909B285080FCD6773CC883F9FE0EE4D439340AC592AADB973ED" + "3CF5" + "3E2232B33EF57CECAC2816E3122816E31A0A00F8377CD95DFA484CFAE28265" + "6A58" + "CE5AA29652EFFD80AC59CD91416E4E13DBBE"; + // this account is not related to the above transaction and + // metadata + std::string accountHex = + "1100612200000000240480FDBC2503CE1A872D0000000555516931B2AD018E" + "FFBE" + "17C5C9DCCF872F36837C2C6136ACF80F2A24079CF81FD0624000000005FF0E" + "0781" + "142252F328CF91263417762570D67220CCB33B1370"; + std::string accountIndexHex = + "E0311EB450B6177F969B94DBDDA83E99B7A0576ACD9079573876F16C0C004F" + "06"; - auto hexStringToBinaryString = [](auto const& hex) { - auto blob = ripple::strUnHex(hex); - std::string strBlob; - for (auto c : *blob) - { - strBlob += c; - } - return strBlob; - }; - auto binaryStringToUint256 = - [](auto const& bin) -> ripple::uint256 { - ripple::uint256 uint; - return uint.fromVoid((void const*)bin.data()); - }; - auto ledgerInfoToBinaryString = [](auto const& info) { - auto blob = RPC::ledgerInfoToBlob(info, true); - std::string strBlob; - for (auto c : blob) - { - strBlob += c; - } - return strBlob; - }; + auto hexStringToBinaryString = [](auto const& hex) { + auto blob = ripple::strUnHex(hex); + std::string strBlob; + for (auto c : *blob) + { + strBlob += c; + } + return strBlob; + }; + auto binaryStringToUint256 = [](auto const& bin) -> ripple::uint256 { + ripple::uint256 uint; + return uint.fromVoid((void const*)bin.data()); + }; + auto ledgerInfoToBinaryString = [](auto const& info) { + auto blob = RPC::ledgerInfoToBlob(info, true); + std::string strBlob; + for (auto c : blob) + { + strBlob += c; + } + return strBlob; + }; - std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); - std::string accountBlob = hexStringToBinaryString(accountHex); - std::string accountIndexBlob = - hexStringToBinaryString(accountIndexHex); - ripple::LedgerInfo lgrInfo = - deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + std::string rawHeaderBlob = hexStringToBinaryString(rawHeader); + std::string accountBlob = hexStringToBinaryString(accountHex); + std::string accountIndexBlob = hexStringToBinaryString(accountIndexHex); + ripple::LedgerInfo lgrInfo = deserializeHeader(ripple::makeSlice(rawHeaderBlob)); + + backend->startWrites(); + backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfo.seq, uint256ToString(Backend::lastKey)); + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, rng->maxSequence); + EXPECT_EQ(rng->maxSequence, lgrInfo.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_TRUE(seq.has_value()); + EXPECT_EQ(*seq, lgrInfo.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfo.seq, yield); + ASSERT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfo.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(lgrInfo), RPC::ledgerInfoToBlob(*retLgr)); + } + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield).has_value()); + auto lgrInfoOld = lgrInfo; + + auto lgrInfoNext = lgrInfo; + lgrInfoNext.seq = lgrInfo.seq + 1; + lgrInfoNext.parentHash = lgrInfo.hash; + lgrInfoNext.hash++; + lgrInfoNext.accountHash = ~lgrInfo.accountHash; + { + std::string rawHeaderBlob = ledgerInfoToBinaryString(lgrInfoNext); backend->startWrites(); - backend->writeLedger(lgrInfo, std::move(rawHeaderBlob)); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, rng->maxSequence); - EXPECT_EQ(rng->maxSequence, lgrInfo.seq); - } - { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_TRUE(seq.has_value()); - EXPECT_EQ(*seq, lgrInfo.seq); - } - { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfo.seq, yield); - ASSERT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfo.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(lgrInfo), - RPC::ledgerInfoToBlob(*retLgr)); - } - EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfo.seq + 1, yield) - .has_value()); - auto lgrInfoOld = lgrInfo; + backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng.has_value()); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + } + { + auto seq = backend->fetchLatestLedgerSequence(yield); + EXPECT_EQ(seq, lgrInfoNext.seq); + } + { + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr.has_value()); + EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoOld)); - auto lgrInfoNext = lgrInfo; - lgrInfoNext.seq = lgrInfo.seq + 1; - lgrInfoNext.parentHash = lgrInfo.hash; + EXPECT_NE(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); + EXPECT_FALSE(backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield).has_value()); + + auto txns = backend->fetchAllTransactionsInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(txns.size(), 0); + auto hashes = backend->fetchAllTransactionHashesInLedger(lgrInfoNext.seq, yield); + EXPECT_EQ(hashes.size(), 0); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.txHash = ~lgrInfo.txHash; + lgrInfoNext.accountHash = lgrInfoNext.accountHash ^ lgrInfoNext.txHash; + lgrInfoNext.parentHash = lgrInfoNext.hash; lgrInfoNext.hash++; - lgrInfoNext.accountHash = ~lgrInfo.accountHash; + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {accountBlob.begin(), accountBlob.end()}}}, lgrInfoNext.seq); + backend->writeSuccessor(uint256ToString(Backend::firstKey), lgrInfoNext.seq, std::string{accountIndexBlob}); + backend->writeSuccessor(std::string{accountIndexBlob}, lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfoNext)); + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + // obtain a time-based seed: + unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); + std::string accountBlobOld = accountBlob; + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + std::shuffle(accountBlob.begin(), accountBlob.end(), std::default_random_engine(seed)); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {accountBlob.begin(), accountBlob.end()}}}, lgrInfoNext.seq); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{accountBlob}); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlob.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 1, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + { + backend->startWrites(); + lgrInfoNext.seq = lgrInfoNext.seq + 1; + lgrInfoNext.parentHash = lgrInfoNext.hash; + lgrInfoNext.hash++; + lgrInfoNext.txHash = lgrInfoNext.txHash ^ lgrInfoNext.accountHash; + lgrInfoNext.accountHash = ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); + + backend->writeLedger(lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); + auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); + backend->cache().update({{*key, {}}}, lgrInfoNext.seq); + backend->writeLedgerObject(std::string{accountIndexBlob}, lgrInfoNext.seq, std::string{}); + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfoNext.seq, uint256ToString(Backend::lastKey)); + + ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + } + { + auto rng = backend->fetchLedgerRange(); + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + auto retLgr = backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); + EXPECT_TRUE(retLgr); + + ripple::uint256 key256; + EXPECT_TRUE(key256.parseHex(accountIndexHex)); + auto obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq + 1, yield); + EXPECT_FALSE(obj); + obj = backend->fetchLedgerObject(key256, lgrInfoNext.seq - 2, yield); + EXPECT_TRUE(obj); + EXPECT_STREQ((const char*)obj->data(), (const char*)accountBlobOld.data()); + obj = backend->fetchLedgerObject(key256, lgrInfoOld.seq - 1, yield); + EXPECT_FALSE(obj); + } + + auto generateObjects = [](size_t numObjects, uint32_t ledgerSequence) { + std::vector> res{numObjects}; + ripple::uint256 key; + key = ledgerSequence * 100000; + + for (auto& blob : res) { - std::string rawHeaderBlob = - ledgerInfoToBinaryString(lgrInfoNext); - - backend->startWrites(); - backend->writeLedger(lgrInfoNext, std::move(rawHeaderBlob)); - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); + ++key; + std::string keyStr{(const char*)key.data(), key.size()}; + blob.first = keyStr; + blob.second = std::to_string(ledgerSequence) + keyStr; } + return res; + }; + auto updateObjects = [](uint32_t ledgerSequence, auto objs) { + for (auto& [key, obj] : objs) { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng.has_value()); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); + obj = std::to_string(ledgerSequence) + obj; } + return objs; + }; + + auto generateNextLedger = [seed](auto lgrInfo) { + ++lgrInfo.seq; + lgrInfo.parentHash = lgrInfo.hash; + static auto randomEngine = std::default_random_engine(seed); + std::shuffle(lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); + std::shuffle(lgrInfo.accountHash.begin(), lgrInfo.accountHash.end(), randomEngine); + std::shuffle(lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); + return lgrInfo; + }; + auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { + backend->startWrites(); + + backend->writeLedger(lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo))); + std::vector cacheUpdates; + for (auto [key, obj] : objs) { - auto seq = backend->fetchLatestLedgerSequence(yield); - EXPECT_EQ(seq, lgrInfoNext.seq); + backend->writeLedgerObject(std::string{key}, lgrInfo.seq, std::string{obj}); + auto key256 = ripple::uint256::fromVoidChecked(key); + cacheUpdates.push_back({*key256, {obj.begin(), obj.end()}}); } + backend->cache().update(cacheUpdates, lgrInfo.seq); + if (state.count(lgrInfo.seq - 1) == 0 || + std::find_if(state[lgrInfo.seq - 1].begin(), state[lgrInfo.seq - 1].end(), [&](auto obj) { + return obj.first == objs[0].first; + }) == state[lgrInfo.seq - 1].end()) { - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr.has_value()); - EXPECT_EQ(retLgr->seq, lgrInfoNext.seq); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq - 1, yield); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoOld)); - - EXPECT_NE( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield); - EXPECT_FALSE( - backend->fetchLedgerBySequence(lgrInfoNext.seq - 2, yield) - .has_value()); - - auto txns = backend->fetchAllTransactionsInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(txns.size(), 0); - auto hashes = backend->fetchAllTransactionHashesInLedger( - lgrInfoNext.seq, yield); - EXPECT_EQ(hashes.size(), 0); - } - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.txHash = ~lgrInfo.txHash; - lgrInfoNext.accountHash = - lgrInfoNext.accountHash ^ lgrInfoNext.txHash; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update( - {{*key, {accountBlob.begin(), accountBlob.end()}}}, - lgrInfoNext.seq); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - std::string{accountIndexBlob}); - backend->writeSuccessor( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfoNext)); - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - // obtain a time-based seed: - unsigned seed = - std::chrono::system_clock::now().time_since_epoch().count(); - std::string accountBlobOld = accountBlob; - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - std::shuffle( - accountBlob.begin(), - accountBlob.end(), - std::default_random_engine(seed)); - auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update( - {{*key, {accountBlob.begin(), accountBlob.end()}}}, - lgrInfoNext.seq); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{accountBlob}); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), (const char*)accountBlob.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 1, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - { - backend->startWrites(); - lgrInfoNext.seq = lgrInfoNext.seq + 1; - lgrInfoNext.parentHash = lgrInfoNext.hash; - lgrInfoNext.hash++; - lgrInfoNext.txHash = - lgrInfoNext.txHash ^ lgrInfoNext.accountHash; - lgrInfoNext.accountHash = - ~(lgrInfoNext.accountHash ^ lgrInfoNext.txHash); - - backend->writeLedger( - lgrInfoNext, ledgerInfoToBinaryString(lgrInfoNext)); - auto key = ripple::uint256::fromVoidChecked(accountIndexBlob); - backend->cache().update({{*key, {}}}, lgrInfoNext.seq); - backend->writeLedgerObject( - std::string{accountIndexBlob}, - lgrInfoNext.seq, - std::string{}); - backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfoNext.seq, - uint256ToString(Backend::lastKey)); - - ASSERT_TRUE(backend->finishWrites(lgrInfoNext.seq)); - } - { - auto rng = backend->fetchLedgerRange(); - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_EQ(rng->maxSequence, lgrInfoNext.seq); - auto retLgr = - backend->fetchLedgerBySequence(lgrInfoNext.seq, yield); - EXPECT_TRUE(retLgr); - - ripple::uint256 key256; - EXPECT_TRUE(key256.parseHex(accountIndexHex)); - auto obj = - backend->fetchLedgerObject(key256, lgrInfoNext.seq, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq + 1, yield); - EXPECT_FALSE(obj); - obj = backend->fetchLedgerObject( - key256, lgrInfoNext.seq - 2, yield); - EXPECT_TRUE(obj); - EXPECT_STREQ( - (const char*)obj->data(), - (const char*)accountBlobOld.data()); - obj = backend->fetchLedgerObject( - key256, lgrInfoOld.seq - 1, yield); - EXPECT_FALSE(obj); - } - - auto generateObjects = [](size_t numObjects, - uint32_t ledgerSequence) { - std::vector> res{ - numObjects}; - ripple::uint256 key; - key = ledgerSequence * 100000; - - for (auto& blob : res) + for (size_t i = 0; i < objs.size(); ++i) { - ++key; - std::string keyStr{(const char*)key.data(), key.size()}; - blob.first = keyStr; - blob.second = std::to_string(ledgerSequence) + keyStr; - } - return res; - }; - auto updateObjects = [](uint32_t ledgerSequence, auto objs) { - for (auto& [key, obj] : objs) - { - obj = std::to_string(ledgerSequence) + obj; - } - return objs; - }; - - auto generateNextLedger = [seed](auto lgrInfo) { - ++lgrInfo.seq; - lgrInfo.parentHash = lgrInfo.hash; - static auto randomEngine = std::default_random_engine(seed); - std::shuffle( - lgrInfo.txHash.begin(), lgrInfo.txHash.end(), randomEngine); - std::shuffle( - lgrInfo.accountHash.begin(), - lgrInfo.accountHash.end(), - randomEngine); - std::shuffle( - lgrInfo.hash.begin(), lgrInfo.hash.end(), randomEngine); - return lgrInfo; - }; - auto writeLedger = [&](auto lgrInfo, auto objs, auto state) { - backend->startWrites(); - - backend->writeLedger( - lgrInfo, std::move(ledgerInfoToBinaryString(lgrInfo))); - std::vector cacheUpdates; - for (auto [key, obj] : objs) - { - backend->writeLedgerObject( - std::string{key}, lgrInfo.seq, std::string{obj}); - auto key256 = ripple::uint256::fromVoidChecked(key); - cacheUpdates.push_back({*key256, {obj.begin(), obj.end()}}); - } - backend->cache().update(cacheUpdates, lgrInfo.seq); - if (state.count(lgrInfo.seq - 1) == 0 || - std::find_if( - state[lgrInfo.seq - 1].begin(), - state[lgrInfo.seq - 1].end(), - [&](auto obj) { return obj.first == objs[0].first; }) == - state[lgrInfo.seq - 1].end()) - { - for (size_t i = 0; i < objs.size(); ++i) - { - if (i + 1 < objs.size()) - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - std::string{objs[i + 1].first}); - else - backend->writeSuccessor( - std::string{objs[i].first}, - lgrInfo.seq, - uint256ToString(Backend::lastKey)); - } - if (state.count(lgrInfo.seq - 1)) + if (i + 1 < objs.size()) backend->writeSuccessor( - std::string{state[lgrInfo.seq - 1].back().first}, - lgrInfo.seq, - std::string{objs[0].first}); + std::string{objs[i].first}, lgrInfo.seq, std::string{objs[i + 1].first}); else backend->writeSuccessor( - uint256ToString(Backend::firstKey), - lgrInfo.seq, - std::string{objs[0].first}); + std::string{objs[i].first}, lgrInfo.seq, uint256ToString(Backend::lastKey)); } + if (state.count(lgrInfo.seq - 1)) + backend->writeSuccessor( + std::string{state[lgrInfo.seq - 1].back().first}, lgrInfo.seq, std::string{objs[0].first}); + else + backend->writeSuccessor( + uint256ToString(Backend::firstKey), lgrInfo.seq, std::string{objs[0].first}); + } - ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); - }; + ASSERT_TRUE(backend->finishWrites(lgrInfo.seq)); + }; - auto checkLedger = [&](auto lgrInfo, auto objs) { - auto rng = backend->fetchLedgerRange(); - auto seq = lgrInfo.seq; - EXPECT_TRUE(rng); - EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); - EXPECT_GE(rng->maxSequence, seq); - auto retLgr = backend->fetchLedgerBySequence(seq, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)); - retLgr = backend->fetchLedgerByHash(lgrInfo.hash, yield); - EXPECT_TRUE(retLgr); - EXPECT_EQ( - RPC::ledgerInfoToBlob(*retLgr), - RPC::ledgerInfoToBlob(lgrInfo)) - << "retLgr seq:" << retLgr->seq - << "; lgrInfo seq:" << lgrInfo.seq - << "; retLgr hash:" << retLgr->hash - << "; lgrInfo hash:" << lgrInfo.hash - << "; retLgr parentHash:" << retLgr->parentHash - << "; lgr Info parentHash:" << lgrInfo.parentHash; + auto checkLedger = [&](auto lgrInfo, auto objs) { + auto rng = backend->fetchLedgerRange(); + auto seq = lgrInfo.seq; + EXPECT_TRUE(rng); + EXPECT_EQ(rng->minSequence, lgrInfoOld.seq); + EXPECT_GE(rng->maxSequence, seq); + auto retLgr = backend->fetchLedgerBySequence(seq, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)); + retLgr = backend->fetchLedgerByHash(lgrInfo.hash, yield); + EXPECT_TRUE(retLgr); + EXPECT_EQ(RPC::ledgerInfoToBlob(*retLgr), RPC::ledgerInfoToBlob(lgrInfo)) + << "retLgr seq:" << retLgr->seq << "; lgrInfo seq:" << lgrInfo.seq << "; retLgr hash:" << retLgr->hash + << "; lgrInfo hash:" << lgrInfo.hash << "; retLgr parentHash:" << retLgr->parentHash + << "; lgr Info parentHash:" << lgrInfo.parentHash; - std::vector keys; - for (auto [key, obj] : objs) + std::vector keys; + for (auto [key, obj] : objs) + { + auto retObj = backend->fetchLedgerObject(binaryStringToUint256(key), seq, yield); + if (obj.size()) { - auto retObj = backend->fetchLedgerObject( - binaryStringToUint256(key), seq, yield); + ASSERT_TRUE(retObj.has_value()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj->data()); + } + else + { + ASSERT_FALSE(retObj.has_value()); + } + keys.push_back(binaryStringToUint256(key)); + } + + { + auto retObjs = backend->fetchLedgerObjects(keys, seq, yield); + ASSERT_EQ(retObjs.size(), objs.size()); + + for (size_t i = 0; i < keys.size(); ++i) + { + auto [key, obj] = objs[i]; + auto retObj = retObjs[i]; if (obj.size()) { - ASSERT_TRUE(retObj.has_value()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj->data()); + ASSERT_TRUE(retObj.size()); + EXPECT_STREQ((const char*)obj.data(), (const char*)retObj.data()); } else { - ASSERT_FALSE(retObj.has_value()); - } - keys.push_back(binaryStringToUint256(key)); - } - - { - auto retObjs = - backend->fetchLedgerObjects(keys, seq, yield); - ASSERT_EQ(retObjs.size(), objs.size()); - - for (size_t i = 0; i < keys.size(); ++i) - { - auto [key, obj] = objs[i]; - auto retObj = retObjs[i]; - if (obj.size()) - { - ASSERT_TRUE(retObj.size()); - EXPECT_STREQ( - (const char*)obj.data(), - (const char*)retObj.data()); - } - else - { - ASSERT_FALSE(retObj.size()); - } + ASSERT_FALSE(retObj.size()); } } - Backend::LedgerPage page; - std::vector retObjs; - do - { - uint32_t limit = 10; - page = backend->fetchLedgerPage( - page.cursor, seq, limit, false, yield); - retObjs.insert( - retObjs.end(), - page.objects.begin(), - page.objects.end()); - } while (page.cursor); - for (auto obj : objs) - { - bool found = false; - for (auto retObj : retObjs) - { - if (ripple::strHex(obj.first) == - ripple::strHex(retObj.key)) - { - found = true; - ASSERT_EQ( - ripple::strHex(obj.second), - ripple::strHex(retObj.blob)); - } - } - if (found != (obj.second.size() != 0)) - ASSERT_EQ(found, obj.second.size() != 0); - } - }; - - std::map>> - state; - std::map lgrInfos; - for (size_t i = 0; i < 10; ++i) - { - lgrInfoNext = generateNextLedger(lgrInfoNext); - auto objs = generateObjects(25, lgrInfoNext.seq); - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, objs, state); - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; } - - std::vector> objs; - for (size_t i = 0; i < 10; ++i) + Backend::LedgerPage page; + std::vector retObjs; + do { - lgrInfoNext = generateNextLedger(lgrInfoNext); - if (!objs.size()) - objs = generateObjects(25, lgrInfoNext.seq); - else - objs = updateObjects(lgrInfoNext.seq, objs); - EXPECT_EQ(objs.size(), 25); - EXPECT_NE(objs[0], objs[1]); - std::sort(objs.begin(), objs.end()); - state[lgrInfoNext.seq] = objs; - writeLedger(lgrInfoNext, objs, state); - lgrInfos[lgrInfoNext.seq] = lgrInfoNext; - } - - auto flatten = [&](uint32_t max) { - std::vector> flat; - std::map objs; - for (auto [seq, diff] : state) + uint32_t limit = 10; + page = backend->fetchLedgerPage(page.cursor, seq, limit, false, yield); + retObjs.insert(retObjs.end(), page.objects.begin(), page.objects.end()); + } while (page.cursor); + for (auto obj : objs) + { + bool found = false; + for (auto retObj : retObjs) { - for (auto [k, v] : diff) + if (ripple::strHex(obj.first) == ripple::strHex(retObj.key)) { - if (seq > max) - { - if (objs.count(k) == 0) - objs[k] = ""; - } - else - { - objs[k] = v; - } + found = true; + ASSERT_EQ(ripple::strHex(obj.second), ripple::strHex(retObj.blob)); } } - for (auto [key, value] : objs) - { - flat.push_back(std::make_pair(key, value)); - } - return flat; - }; + if (found != (obj.second.size() != 0)) + ASSERT_EQ(found, obj.second.size() != 0); + } + }; + std::map>> state; + std::map lgrInfos; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + auto objs = generateObjects(25, lgrInfoNext.seq); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + std::vector> objs; + for (size_t i = 0; i < 10; ++i) + { + lgrInfoNext = generateNextLedger(lgrInfoNext); + if (!objs.size()) + objs = generateObjects(25, lgrInfoNext.seq); + else + objs = updateObjects(lgrInfoNext.seq, objs); + EXPECT_EQ(objs.size(), 25); + EXPECT_NE(objs[0], objs[1]); + std::sort(objs.begin(), objs.end()); + state[lgrInfoNext.seq] = objs; + writeLedger(lgrInfoNext, objs, state); + lgrInfos[lgrInfoNext.seq] = lgrInfoNext; + } + + auto flatten = [&](uint32_t max) { + std::vector> flat; + std::map objs; for (auto [seq, diff] : state) { - auto flat = flatten(seq); - checkLedger(lgrInfos[seq], flat); + for (auto [k, v] : diff) + { + if (seq > max) + { + if (objs.count(k) == 0) + objs[k] = ""; + } + else + { + objs[k] = v; + } + } } + for (auto [key, value] : objs) + { + flat.push_back(std::make_pair(key, value)); + } + return flat; + }; - done = true; - work.reset(); - }); + for (auto [seq, diff] : state) + { + auto flat = flatten(seq); + checkLedger(lgrInfos[seq], flat); + } + + done = true; + work.reset(); + }); ctx.run(); ASSERT_EQ(done, true); diff --git a/unittests/backend/cassandra/BaseTests.cpp b/unittests/backend/cassandra/BaseTests.cpp index d4ef27d9..ec76194e 100644 --- a/unittests/backend/cassandra/BaseTests.cpp +++ b/unittests/backend/cassandra/BaseTests.cpp @@ -40,8 +40,7 @@ protected: { Handle handle{contactPoints}; EXPECT_TRUE(handle.connect()); - std::string query = "CREATE KEYSPACE IF NOT EXISTS " + - std::string{keyspace} + + std::string query = "CREATE KEYSPACE IF NOT EXISTS " + std::string{keyspace} + " WITH replication = {'class': " "'SimpleStrategy', 'replication_factor': '1'} AND " "durable_writes = " @@ -85,8 +84,7 @@ protected: int64_t idx = 1000; for (auto const& entry : entries) - statements.push_back( - insert.bind(entry, static_cast(idx++))); + statements.push_back(insert.bind(entry, static_cast(idx++))); EXPECT_EQ(statements.size(), entries.size()); EXPECT_TRUE(handle.execute(statements)); @@ -107,9 +105,7 @@ TEST_F(BackendCassandraBaseTest, ConnectionFailFormat) auto f = handle.asyncConnect(); auto res = f.await(); ASSERT_FALSE(res); - EXPECT_EQ( - res.error(), - "No hosts available: Unable to connect to any contact points"); + EXPECT_EQ(res.error(), "No hosts available: Unable to connect to any contact points"); EXPECT_EQ(res.error().code(), CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); } @@ -124,8 +120,7 @@ TEST_F(BackendCassandraBaseTest, ConnectionFailTimeout) auto res = f.await(); ASSERT_FALSE(res); // scylla and cassandra produce different text - EXPECT_TRUE(res.error().message().starts_with( - "No hosts available: Underlying connection error:")); + EXPECT_TRUE(res.error().message().starts_with("No hosts available: Underlying connection error:")); EXPECT_EQ(res.error().code(), CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); } @@ -134,9 +129,7 @@ TEST_F(BackendCassandraBaseTest, FutureCallback) Handle handle{"127.0.0.1"}; ASSERT_TRUE(handle.connect()); - auto const statement = - handle.prepare("SELECT keyspace_name FROM system_schema.keyspaces") - .bind(); + auto const statement = handle.prepare("SELECT keyspace_name FROM system_schema.keyspaces").bind(); bool complete = false; auto f = handle.asyncExecute(statement, [&complete](auto const res) { @@ -157,24 +150,21 @@ TEST_F(BackendCassandraBaseTest, FutureCallbackSurviveMove) Handle handle{"127.0.0.1"}; ASSERT_TRUE(handle.connect()); - auto const statement = - handle.prepare("SELECT keyspace_name FROM system_schema.keyspaces") - .bind(); + auto const statement = handle.prepare("SELECT keyspace_name FROM system_schema.keyspaces").bind(); bool complete = false; std::vector futures; std::binary_semaphore sem{0}; - futures.push_back( - handle.asyncExecute(statement, [&complete, &sem](auto const res) { - complete = true; - EXPECT_TRUE(res.value().hasRows()); + futures.push_back(handle.asyncExecute(statement, [&complete, &sem](auto const res) { + complete = true; + EXPECT_TRUE(res.value().hasRows()); - for (auto [ks] : extract(res.value())) - std::cout << "keyspace: " << ks << '\n'; + for (auto [ks] : extract(res.value())) + std::cout << "keyspace: " << ks << '\n'; - sem.release(); - })); + sem.release(); + })); sem.acquire(); for (auto const& f : futures) @@ -252,8 +242,7 @@ TEST_F(BackendCassandraBaseTest, CreateTableWithStrings) int64_t idx = 1000; for (auto const& entry : entries) - futures.push_back(handle.asyncExecute( - insert, entry, static_cast(idx++))); + futures.push_back(handle.asyncExecute(insert, entry, static_cast(idx++))); ASSERT_EQ(futures.size(), entries.size()); for (auto const& f : futures) @@ -324,8 +313,7 @@ TEST_F(BackendCassandraBaseTest, BatchInsert) int64_t idx = 1000; for (auto const& entry : entries) - statements.push_back( - insert.bind(entry, static_cast(idx++))); + statements.push_back(insert.bind(entry, static_cast(idx++))); ASSERT_EQ(statements.size(), entries.size()); auto rc = handle.execute(statements); @@ -385,8 +373,7 @@ TEST_F(BackendCassandraBaseTest, AlterTableMoveToNewTable) // now migrate data; tmp column will just get the sequence number + 1 stored std::vector migrationStatements; - auto const migrationInsert = handle.prepare( - "INSERT INTO strings_v2 (hash, sequence, tmp) VALUES (?, ?, ?)"); + auto const migrationInsert = handle.prepare("INSERT INTO strings_v2 (hash, sequence, tmp) VALUES (?, ?, ?)"); auto const res = handle.execute("SELECT hash, sequence FROM strings"); ASSERT_TRUE(res); @@ -396,8 +383,8 @@ TEST_F(BackendCassandraBaseTest, AlterTableMoveToNewTable) { static_assert(std::is_same_v); static_assert(std::is_same_v); - migrationStatements.push_back(migrationInsert.bind( - hash, static_cast(seq), static_cast(seq + 1u))); + migrationStatements.push_back( + migrationInsert.bind(hash, static_cast(seq), static_cast(seq + 1u))); } EXPECT_TRUE(handle.execute(migrationStatements)); diff --git a/unittests/backend/cassandra/ExecutionStrategyTests.cpp b/unittests/backend/cassandra/ExecutionStrategyTests.cpp index 9ed5689f..0868224e 100644 --- a/unittests/backend/cassandra/ExecutionStrategyTests.cpp +++ b/unittests/backend/cassandra/ExecutionStrategyTests.cpp @@ -37,113 +37,82 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineSuccessful) auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([](auto const& statement, auto&& cb) { cb({}); // pretend we got data return FakeFutureWithCallback{}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statement = FakeStatement{}; - strat.read(yield, statement); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statement = FakeStatement{}; + strat.read(yield, statement); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadOneInCoroutineThrowsOnTimeoutFailure) +TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnTimeoutFailure) { auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([](auto const&, auto&& cb) { cb({}); // notify that item is ready - return FakeFutureWithCallback{FakeResultOrError{ - CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}}; + return FakeFutureWithCallback{ + FakeResultOrError{CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statement = FakeStatement{}; - EXPECT_THROW(strat.read(yield, statement), DatabaseTimeout); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statement = FakeStatement{}; + EXPECT_THROW(strat.read(yield, statement), DatabaseTimeout); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadOneInCoroutineThrowsOnInvalidQueryFailure) +TEST_F(BackendCassandraExecutionStrategyTest, ReadOneInCoroutineThrowsOnInvalidQueryFailure) { auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([](auto const&, auto&& cb) { cb({}); // notify that item is ready - return FakeFutureWithCallback{FakeResultOrError{ - CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}}}; + return FakeFutureWithCallback{ + FakeResultOrError{CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}}}; }); - EXPECT_CALL( - handle, - asyncExecute( - An(), - An&&>())) + EXPECT_CALL(handle, asyncExecute(An(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statement = FakeStatement{}; - EXPECT_THROW(strat.read(yield, statement), std::runtime_error); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statement = FakeStatement{}; + EXPECT_THROW(strat.read(yield, statement), std::runtime_error); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); @@ -155,123 +124,96 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineSuccessful) auto strat = DefaultExecutionStrategy{Settings{}, handle}; ON_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .WillByDefault([](auto const& statements, auto&& cb) { EXPECT_EQ(statements.size(), 3); cb({}); // pretend we got data return FakeFutureWithCallback{}; }); EXPECT_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statements = std::vector(3); - strat.read(yield, statements); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statements = std::vector(3); + strat.read(yield, statements); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadBatchInCoroutineThrowsOnTimeoutFailure) +TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineThrowsOnTimeoutFailure) { auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; ON_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .WillByDefault([](auto const& statements, auto&& cb) { EXPECT_EQ(statements.size(), 3); cb({}); // notify that item is ready - return FakeFutureWithCallback{FakeResultOrError{ - CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}}; + return FakeFutureWithCallback{ + FakeResultOrError{CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT}}}; }); EXPECT_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statements = std::vector(3); - EXPECT_THROW(strat.read(yield, statements), DatabaseTimeout); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statements = std::vector(3); + EXPECT_THROW(strat.read(yield, statements), DatabaseTimeout); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadBatchInCoroutineThrowsOnInvalidQueryFailure) +TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineThrowsOnInvalidQueryFailure) { auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; ON_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .WillByDefault([](auto const& statements, auto&& cb) { EXPECT_EQ(statements.size(), 3); cb({}); // notify that item is ready - return FakeFutureWithCallback{FakeResultOrError{ - CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}}}; + return FakeFutureWithCallback{ + FakeResultOrError{CassandraError{"invalid", CASS_ERROR_SERVER_INVALID_QUERY}}}; }); EXPECT_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statements = std::vector(3); - EXPECT_THROW(strat.read(yield, statements), std::runtime_error); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statements = std::vector(3); + EXPECT_THROW(strat.read(yield, statements), std::runtime_error); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadBatchInCoroutineMarksBusyIfRequestsOutstandingExceeded) +TEST_F(BackendCassandraExecutionStrategyTest, ReadBatchInCoroutineMarksBusyIfRequestsOutstandingExceeded) { auto handle = MockHandle{}; auto settings = Settings{}; @@ -279,10 +221,7 @@ TEST_F( auto strat = DefaultExecutionStrategy{settings, handle}; ON_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .WillByDefault([&strat](auto const& statements, auto&& cb) { EXPECT_EQ(statements.size(), 3); EXPECT_TRUE(strat.isTooBusy()); // 2 was the limit, we sent 3 @@ -291,26 +230,21 @@ TEST_F( return FakeFutureWithCallback{}; }); EXPECT_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .Times(1); auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - EXPECT_FALSE(strat.isTooBusy()); // 2 was the limit, 0 atm - auto statements = std::vector(3); - strat.read(yield, statements); - EXPECT_FALSE( - strat.isTooBusy()); // after read completes it's 0 again + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + EXPECT_FALSE(strat.isTooBusy()); // 2 was the limit, 0 atm + auto statements = std::vector(3); + strat.read(yield, statements); + EXPECT_FALSE(strat.isTooBusy()); // after read completes it's 0 again - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); @@ -321,11 +255,7 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineSuccessful) auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([](auto const&, auto&& cb) { cb({}); // pretend we got data return FakeFutureWithCallback{}; @@ -340,37 +270,29 @@ TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineSuccessful) auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statements = std::vector(3); - auto res = strat.readEach(yield, statements); - EXPECT_EQ(res.size(), statements.size()); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statements = std::vector(3); + auto res = strat.readEach(yield, statements); + EXPECT_EQ(res.size(), statements.size()); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); } -TEST_F( - BackendCassandraExecutionStrategyTest, - ReadEachInCoroutineThrowsOnFailure) +TEST_F(BackendCassandraExecutionStrategyTest, ReadEachInCoroutineThrowsOnFailure) { auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; auto callCount = std::atomic_int{0}; - ON_CALL( - handle, - asyncExecute( - An(), - An&&>())) + ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([&callCount](auto const&, auto&& cb) { if (callCount == 1) // error happens on one of the entries - cb({CassandraError{ - "invalid data", CASS_ERROR_LIB_INVALID_DATA}}); + cb({CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA}}); else cb({}); // pretend we got data ++callCount; @@ -386,14 +308,13 @@ TEST_F( auto called = std::atomic_bool{false}; auto work = std::optional{ctx}; - boost::asio::spawn( - ctx, [&work, &called, &strat](boost::asio::yield_context yield) { - auto statements = std::vector(3); - EXPECT_THROW(strat.readEach(yield, statements), DatabaseTimeout); + boost::asio::spawn(ctx, [&work, &called, &strat](boost::asio::yield_context yield) { + auto statements = std::vector(3); + EXPECT_THROW(strat.readEach(yield, statements), DatabaseTimeout); - called = true; - work.reset(); - }); + called = true; + work.reset(); + }); ctx.run(); ASSERT_TRUE(called); @@ -404,12 +325,9 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteSyncFirstTrySuccessful) auto handle = MockHandle{}; auto strat = DefaultExecutionStrategy{Settings{}, handle}; - ON_CALL(handle, execute(An())) - .WillByDefault([](auto const&) { return FakeResultOrError{}; }); - EXPECT_CALL( - handle, - execute(An())) - .Times(1); // first one will succeed + ON_CALL(handle, execute(An())).WillByDefault([](auto const&) { return FakeResultOrError{}; }); + EXPECT_CALL(handle, + execute(An())).Times(1); // first one will succeed EXPECT_TRUE(strat.writeSync({})); } @@ -420,17 +338,13 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteSyncRetrySuccessful) auto strat = DefaultExecutionStrategy{Settings{}, handle}; auto callCount = 0; - ON_CALL(handle, execute(An())) - .WillByDefault([&callCount](auto const&) { - if (callCount++ == 1) - return FakeResultOrError{}; - return FakeResultOrError{ - CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA}}; - }); - EXPECT_CALL( - handle, - execute(An())) - .Times(2); // first one will fail, second will succeed + ON_CALL(handle, execute(An())).WillByDefault([&callCount](auto const&) { + if (callCount++ == 1) + return FakeResultOrError{}; + return FakeResultOrError{CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA}}; + }); + EXPECT_CALL(handle, + execute(An())).Times(2); // first one will fail, second will succeed EXPECT_TRUE(strat.writeSync({})); } @@ -446,10 +360,7 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteMultipleAndCallSyncSucceeds) auto thread = std::thread{[this]() { ctx.run(); }}; ON_CALL( - handle, - asyncExecute( - An const&>(), - An&&>())) + handle, asyncExecute(An const&>(), An&&>())) .WillByDefault([this, &callCount](auto const&, auto&& cb) { // run on thread to emulate concurrency model of real asyncExecute boost::asio::post(ctx, [&callCount, cb = std::move(cb)] { @@ -469,7 +380,7 @@ TEST_F(BackendCassandraExecutionStrategyTest, WriteMultipleAndCallSyncSucceeds) for (auto i = 0u; i < totalRequests; ++i) strat.write(statements); - strat.sync(); // make sure all above writes are finished + strat.sync(); // make sure all above writes are finished ASSERT_EQ(callCount, totalRequests); // all requests should finish work.reset(); diff --git a/unittests/backend/cassandra/RetryPolicyTests.cpp b/unittests/backend/cassandra/RetryPolicyTests.cpp index b700153b..d0772af3 100644 --- a/unittests/backend/cassandra/RetryPolicyTests.cpp +++ b/unittests/backend/cassandra/RetryPolicyTests.cpp @@ -35,12 +35,9 @@ class BackendCassandraRetryPolicyTest : public SyncAsioContextTest TEST_F(BackendCassandraRetryPolicyTest, ShouldRetryAlwaysTrue) { auto retryPolicy = ExponentialBackoffRetryPolicy{ctx}; - EXPECT_TRUE(retryPolicy.shouldRetry( - CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT})); - EXPECT_TRUE(retryPolicy.shouldRetry( - CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA})); - EXPECT_TRUE(retryPolicy.shouldRetry( - CassandraError{"invalid query", CASS_ERROR_SERVER_INVALID_QUERY})); + EXPECT_TRUE(retryPolicy.shouldRetry(CassandraError{"timeout", CASS_ERROR_LIB_REQUEST_TIMED_OUT})); + EXPECT_TRUE(retryPolicy.shouldRetry(CassandraError{"invalid data", CASS_ERROR_LIB_INVALID_DATA})); + EXPECT_TRUE(retryPolicy.shouldRetry(CassandraError{"invalid query", CASS_ERROR_SERVER_INVALID_QUERY})); // this policy actually always returns true auto const err = CassandraError{"ok", CASS_OK}; @@ -64,9 +61,8 @@ TEST_F(BackendCassandraRetryPolicyTest, CheckComputedBackoffDelayIsCorrect) EXPECT_EQ(retryPolicy.calculateDelay(8).count(), 256); EXPECT_EQ(retryPolicy.calculateDelay(9).count(), 512); EXPECT_EQ(retryPolicy.calculateDelay(10).count(), 1024); - EXPECT_EQ( - retryPolicy.calculateDelay(11).count(), - 1024); // 10 is max, same after that + EXPECT_EQ(retryPolicy.calculateDelay(11).count(), + 1024); // 10 is max, same after that } TEST_F(BackendCassandraRetryPolicyTest, RetryCorrectlyExecuted) diff --git a/unittests/backend/cassandra/SettingsProviderTests.cpp b/unittests/backend/cassandra/SettingsProviderTests.cpp index 71bf065f..2dfb6cd3 100644 --- a/unittests/backend/cassandra/SettingsProviderTests.cpp +++ b/unittests/backend/cassandra/SettingsProviderTests.cpp @@ -54,8 +54,7 @@ TEST_F(SettingsProviderTest, Defaults) EXPECT_EQ(settings.username, std::nullopt); EXPECT_EQ(settings.password, std::nullopt); - auto const* cp = - std::get_if(&settings.connectionInfo); + auto const* cp = std::get_if(&settings.connectionInfo); ASSERT_TRUE(cp != nullptr); EXPECT_EQ(cp->contactPoints, "127.0.0.1"); EXPECT_FALSE(cp->port); @@ -80,8 +79,7 @@ TEST_F(SettingsProviderTest, SimpleConfig) auto const settings = provider.getSettings(); EXPECT_EQ(settings.threads, 24); - auto const* cp = - std::get_if(&settings.connectionInfo); + auto const* cp = std::get_if(&settings.connectionInfo); ASSERT_TRUE(cp != nullptr); EXPECT_EQ(cp->contactPoints, "123.123.123.123"); EXPECT_EQ(cp->port, 1234); @@ -97,8 +95,7 @@ TEST_F(SettingsProviderTest, SecureBundleConfig) SettingsProvider provider{cfg}; auto const settings = provider.getSettings(); - auto const* sb = - std::get_if(&settings.connectionInfo); + auto const* sb = std::get_if(&settings.connectionInfo); ASSERT_TRUE(sb != nullptr); EXPECT_EQ(sb->bundle, "bundleData"); } diff --git a/unittests/backend/cassandra/impl/FakesAndMocks.h b/unittests/backend/cassandra/impl/FakesAndMocks.h index 3c19fdd7..9cd4ba24 100644 --- a/unittests/backend/cassandra/impl/FakesAndMocks.h +++ b/unittests/backend/cassandra/impl/FakesAndMocks.h @@ -99,8 +99,7 @@ struct MockHandle MOCK_METHOD( FutureWithCallbackType, asyncExecute, - (std::vector const&, - std::function&&), + (std::vector const&, std::function&&), (const)); MOCK_METHOD(ResultOrErrorType, execute, (StatementType const&), (const)); diff --git a/unittests/rpc/BaseTests.cpp b/unittests/rpc/BaseTests.cpp index b0470833..61aa0b55 100644 --- a/unittests/rpc/BaseTests.cpp +++ b/unittests/rpc/BaseTests.cpp @@ -221,8 +221,7 @@ TEST_F(RPCBaseTest, ArrayAtValidator) auto failingInput = json::parse(R"({ "arr": [{"limit": "not int"}] })"); ASSERT_FALSE(spec.validate(failingInput)); - failingInput = - json::parse(R"({ "arr": [{"limit": 42}] ,"arr2": "not array type" })"); + failingInput = json::parse(R"({ "arr": [{"limit": 42}] ,"arr2": "not array type" })"); ASSERT_FALSE(spec.validate(failingInput)); failingInput = json::parse(R"({ "arr": [] })"); @@ -251,12 +250,10 @@ TEST_F(RPCBaseTest, IfTypeValidator) // clang-format on // if json object pass - auto passingInput = - json::parse(R"({ "mix": {"limit": 42, "limit2": 22} })"); + auto passingInput = json::parse(R"({ "mix": {"limit": 42, "limit2": 22} })"); ASSERT_TRUE(spec.validate(passingInput)); // if string pass - passingInput = json::parse( - R"({ "mix": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC" })"); + passingInput = json::parse(R"({ "mix": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC" })"); ASSERT_TRUE(spec.validate(passingInput)); // if json object fail at first requirement @@ -274,8 +271,7 @@ TEST_F(RPCBaseTest, IfTypeValidator) failingInput = json::parse(R"({ "mix": 1213 })"); ASSERT_FALSE(spec.validate(failingInput)); - failingInput = - json::parse(R"({ "mix": {"limit": 42, "limit2": 22} , "mix2": 1213 })"); + failingInput = json::parse(R"({ "mix": {"limit": 42, "limit2": 22} , "mix2": 1213 })"); ASSERT_FALSE(spec.validate(failingInput)); } @@ -283,20 +279,15 @@ TEST_F(RPCBaseTest, WithCustomError) { auto const spec = RpcSpec{ {"transaction", - WithCustomError{ - Uint256HexStringValidator, - RPC::Status{ripple::rpcBAD_FEATURE, "MyCustomError"}}}, - {"other", - WithCustomError{ - Type{}, - RPC::Status{ripple::rpcALREADY_MULTISIG, "MyCustomError2"}}}}; + WithCustomError{Uint256HexStringValidator, RPC::Status{ripple::rpcBAD_FEATURE, "MyCustomError"}}}, + {"other", WithCustomError{Type{}, RPC::Status{ripple::rpcALREADY_MULTISIG, "MyCustomError2"}}}}; auto const passingInput = json::parse( R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC", "other": "1"})"); ASSERT_TRUE(spec.validate(passingInput)); - auto failingInput = json::parse( - R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515B"})"); + auto failingInput = + json::parse(R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515B"})"); auto err = spec.validate(failingInput); ASSERT_FALSE(err); ASSERT_EQ(err.error().message, "MyCustomError"); @@ -324,8 +315,7 @@ TEST_F(RPCBaseTest, CustomValidator) {"taker", customFormatCheck}, }; - auto passingInput = - json::parse(R"({ "taker": "r9cZA1mLK5R5Am25ArfXFmqgNwjZgnfk59" })"); + auto passingInput = json::parse(R"({ "taker": "r9cZA1mLK5R5Am25ArfXFmqgNwjZgnfk59" })"); ASSERT_TRUE(spec.validate(passingInput)); auto failingInput = json::parse(R"({ "taker": "wrongformat" })"); @@ -365,20 +355,17 @@ TEST_F(RPCBaseTest, AccountValidator) auto failingInput = json::parse(R"({ "account": 256 })"); ASSERT_FALSE(spec.validate(failingInput)); - failingInput = - json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jp" })"); + failingInput = json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jp" })"); ASSERT_FALSE(spec.validate(failingInput)); - failingInput = json::parse( - R"({ "account": "02000000000000000000000000000000000000000000000000000000000000000" })"); + failingInput = json::parse(R"({ "account": "02000000000000000000000000000000000000000000000000000000000000000" })"); ASSERT_FALSE(spec.validate(failingInput)); - auto passingInput = - json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" })"); + auto passingInput = json::parse(R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn" })"); ASSERT_TRUE(spec.validate(passingInput)); - passingInput = json::parse( - R"({ "account": "020000000000000000000000000000000000000000000000000000000000000000" })"); + passingInput = + json::parse(R"({ "account": "020000000000000000000000000000000000000000000000000000000000000000" })"); ASSERT_TRUE(spec.validate(passingInput)); } @@ -403,8 +390,8 @@ TEST_F(RPCBaseTest, AccountMarkerValidator) TEST_F(RPCBaseTest, Uint256HexStringValidator) { auto const spec = RpcSpec{{"transaction", Uint256HexStringValidator}}; - auto const passingInput = json::parse( - R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"})"); + auto const passingInput = + json::parse(R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"})"); ASSERT_TRUE(spec.validate(passingInput)); auto failingInput = json::parse(R"({ "transaction": 256})"); @@ -412,8 +399,7 @@ TEST_F(RPCBaseTest, Uint256HexStringValidator) ASSERT_FALSE(err); ASSERT_EQ(err.error().message, "transactionNotString"); - failingInput = json::parse( - R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC"})"); + failingInput = json::parse(R"({ "transaction": "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC"})"); err = spec.validate(failingInput); ASSERT_FALSE(err); ASSERT_EQ(err.error().message, "transactionMalformed"); @@ -425,8 +411,7 @@ TEST_F(RPCBaseTest, CurrencyValidator) auto passingInput = json::parse(R"({ "currency": "GBP"})"); ASSERT_TRUE(spec.validate(passingInput)); - passingInput = json::parse( - R"({ "currency": "0158415500000000C1F76FF6ECB0BAC600000000"})"); + passingInput = json::parse(R"({ "currency": "0158415500000000C1F76FF6ECB0BAC600000000"})"); ASSERT_TRUE(spec.validate(passingInput)); auto failingInput = json::parse(R"({ "currency": 256})"); @@ -443,8 +428,7 @@ TEST_F(RPCBaseTest, CurrencyValidator) TEST_F(RPCBaseTest, IssuerValidator) { auto const spec = RpcSpec{{"issuer", IssuerValidator}}; - auto passingInput = - json::parse(R"({ "issuer": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"})"); + auto passingInput = json::parse(R"({ "issuer": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"})"); ASSERT_TRUE(spec.validate(passingInput)); auto failingInput = json::parse(R"({ "issuer": 256})"); @@ -452,8 +436,7 @@ TEST_F(RPCBaseTest, IssuerValidator) ASSERT_FALSE(err); ASSERT_EQ(err.error().message, "issuerNotString"); - failingInput = json::parse( - fmt::format(R"({{ "issuer": "{}"}})", toBase58(ripple::noAccount()))); + failingInput = json::parse(fmt::format(R"({{ "issuer": "{}"}})", toBase58(ripple::noAccount()))); err = spec.validate(failingInput); ASSERT_FALSE(err); } diff --git a/unittests/rpc/ErrorTests.cpp b/unittests/rpc/ErrorTests.cpp index d7b7ba81..86b0e46e 100644 --- a/unittests/rpc/ErrorTests.cpp +++ b/unittests/rpc/ErrorTests.cpp @@ -27,11 +27,7 @@ using namespace std; namespace { void -check( - boost::json::object const& j, - std::string_view error, - uint32_t errorCode, - std::string_view errorMessage) +check(boost::json::object const& j, std::string_view error, uint32_t errorCode, std::string_view errorMessage) { EXPECT_TRUE(j.contains("error")); EXPECT_TRUE(j.contains("error_code")); @@ -50,8 +46,7 @@ check( EXPECT_STREQ(j.at("error").as_string().c_str(), error.data()); EXPECT_EQ(j.at("error_code").as_uint64(), errorCode); - EXPECT_STREQ( - j.at("error_message").as_string().c_str(), errorMessage.data()); + EXPECT_STREQ(j.at("error_message").as_string().c_str(), errorMessage.data()); } } // namespace @@ -101,8 +96,7 @@ TEST(RPCErrorsTest, RippledErrorToJSONCustomMessage) TEST(RPCErrorsTest, RippledErrorToJSONCustomStrCodeAndMessage) { - auto const status = - Status{RippledError::rpcINVALID_PARAMS, "customCode", "customMessage"}; + auto const status = Status{RippledError::rpcINVALID_PARAMS, "customCode", "customMessage"}; check(makeError(status), "customCode", 31, "customMessage"); } @@ -120,8 +114,7 @@ TEST(RPCErrorsTest, ClioErrorToJSONCustomMessage) TEST(RPCErrorsTest, ClioErrorToJSONCustomStrCodeAndMessage) { - auto const status = - Status{ClioError::rpcMALFORMED_CURRENCY, "customCode", "customMessage"}; + auto const status = Status{ClioError::rpcMALFORMED_CURRENCY, "customCode", "customMessage"}; check(makeError(status), "customCode", 5000, "customMessage"); } @@ -139,11 +132,8 @@ TEST(RPCErrorsTest, WarningToJSON) EXPECT_TRUE(j.at("id").is_int64()); EXPECT_TRUE(j.at("message").is_string()); - EXPECT_EQ( - j.at("id").as_int64(), - static_cast(WarningCode::warnRPC_OUTDATED)); - EXPECT_STREQ( - j.at("message").as_string().c_str(), "This server may be out of date"); + EXPECT_EQ(j.at("id").as_int64(), static_cast(WarningCode::warnRPC_OUTDATED)); + EXPECT_STREQ(j.at("message").as_string().c_str(), "This server may be out of date"); } TEST(RPCErrorsTest, InvalidWarningToJSON) diff --git a/unittests/rpc/RPCHelpersTest.cpp b/unittests/rpc/RPCHelpersTest.cpp index f71e06e6..1efd7f22 100644 --- a/unittests/rpc/RPCHelpersTest.cpp +++ b/unittests/rpc/RPCHelpersTest.cpp @@ -30,12 +30,9 @@ using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto INDEX1 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; -constexpr static auto TXNID = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX1 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; class RPCHelpersTest : public MockBackendTest, public SyncAsioContextTest { @@ -55,19 +52,16 @@ class RPCHelpersTest : public MockBackendTest, public SyncAsioContextTest TEST_F(RPCHelpersTest, TraverseOwnedNodesNotAccount) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // fetch account object return emtpy - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { auto account = GetAccountIDWithString(ACCOUNT); - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, 10, "", yield, [](auto) { + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, 10, "", yield, [](auto) { - }); + }); auto status = std::get_if(&ret); EXPECT_TRUE(status != nullptr); EXPECT_EQ(*status, RippledError::rpcACT_NOT_FOUND); @@ -77,8 +71,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNotAccount) TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidIndexNotHex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(fake)); @@ -86,10 +79,9 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidIndexNotHex) boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { auto account = GetAccountIDWithString(ACCOUNT); - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, 10, "nothex,10", yield, [](auto) { + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, 10, "nothex,10", yield, [](auto) { - }); + }); auto status = std::get_if(&ret); EXPECT_TRUE(status != nullptr); EXPECT_EQ(*status, ripple::rpcINVALID_PARAMS); @@ -100,8 +92,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidIndexNotHex) TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidPageNotInt) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(fake)); @@ -109,10 +100,9 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidPageNotInt) boost::asio::spawn(ctx, [this](boost::asio::yield_context yield) { auto account = GetAccountIDWithString(ACCOUNT); - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, 10, "nothex,abc", yield, [](auto) { + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, 10, "nothex,abc", yield, [](auto) { - }); + }); auto status = std::get_if(&ret); EXPECT_TRUE(status != nullptr); EXPECT_EQ(*status, ripple::rpcINVALID_PARAMS); @@ -124,40 +114,33 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesMarkerInvalidPageNotInt) // limit = 10, return 2 objects TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarker) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; auto owneDirKk = ripple::keylet::ownerDir(account).key; // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // return owner index - ripple::STObject ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); // return two payment channel objects std::vector bbs; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); bbs.push_back(channel1.getSerializer().peekData()); bbs.push_back(channel1.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); boost::asio::spawn(ctx, [this, &account](boost::asio::yield_context yield) { - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, 10, {}, yield, [](auto) { + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, 10, {}, yield, [](auto) { - }); + }); auto cursor = std::get_if(&ret); EXPECT_TRUE(cursor != nullptr); EXPECT_EQ( @@ -171,24 +154,20 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarker) // limit = 10, return 10 objects and marker TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnSamePageMarker) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; auto owneDirKk = ripple::keylet::ownerDir(account).key; // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); std::vector bbs; int objectsCount = 11; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); std::vector indexes; while (objectsCount != 0) { @@ -200,8 +179,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnSamePageMarker) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, 99); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -209,8 +187,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnSamePageMarker) boost::asio::spawn(ctx, [this, &account](boost::asio::yield_context yield) { auto count = 0; - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, 10, {}, yield, [&](auto) { count++; }); + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, 10, {}, yield, [&](auto) { count++; }); auto cursor = std::get_if(&ret); EXPECT_TRUE(cursor != nullptr); EXPECT_EQ(count, 10); @@ -222,29 +199,24 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnSamePageMarker) // 10 objects per page, limit is 15, return the second page as marker TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnOtherPageMarker) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; auto ownerDirKk = ripple::keylet::ownerDir(account).key; constexpr static auto nextPage = 99; constexpr static auto limit = 15; - auto ownerDir2Kk = - ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ownerDir2Kk = ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; int objectsCount = 10; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); std::vector indexes; while (objectsCount != 0) { @@ -262,15 +234,12 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnOtherPageMarker) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, nextPage); // first page 's next page is 99 - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ripple::STObject ownerDir2 = CreateOwnerDirLedgerObject(indexes, INDEX1); // second page's next page is 0 ownerDir2.setFieldU64(ripple::sfIndexNext, 0); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) .WillByDefault(Return(ownerDir2.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -278,10 +247,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnOtherPageMarker) boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { auto count = 0; - auto ret = traverseOwnedNodes( - *mockBackendPtr, account, 9, limit, {}, yield, [&](auto) { - count++; - }); + auto ret = traverseOwnedNodes(*mockBackendPtr, account, 9, limit, {}, yield, [&](auto) { count++; }); auto cursor = std::get_if(&ret); EXPECT_TRUE(cursor != nullptr); EXPECT_EQ(count, limit); @@ -293,27 +259,22 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesNoInputMarkerReturnOtherPageMarker) // Send a valid marker TEST_F(RPCHelpersTest, TraverseOwnedNodesWithMarkerReturnSamePageMarker) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; - auto ownerDir2Kk = - ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; + auto ownerDir2Kk = ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; constexpr static auto limit = 8; constexpr static auto pageNum = 99; // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; int objectsCount = 10; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); std::vector indexes; while (objectsCount != 0) { @@ -331,9 +292,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesWithMarkerReturnSamePageMarker) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, 0); // return ownerdir when search by marker - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -342,13 +301,7 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesWithMarkerReturnSamePageMarker) boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { auto count = 0; auto ret = traverseOwnedNodes( - *mockBackendPtr, - account, - 9, - limit, - fmt::format("{},{}", INDEX1, pageNum), - yield, - [&](auto) { count++; }); + *mockBackendPtr, account, 9, limit, fmt::format("{},{}", INDEX1, pageNum), yield, [&](auto) { count++; }); auto cursor = std::get_if(&ret); EXPECT_TRUE(cursor != nullptr); EXPECT_EQ(count, limit); @@ -361,25 +314,20 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesWithMarkerReturnSamePageMarker) // return empty TEST_F(RPCHelpersTest, TraverseOwnedNodesWithUnexistingIndexMarker) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; - auto ownerDir2Kk = - ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; + auto ownerDir2Kk = ripple::keylet::page(ripple::keylet::ownerDir(account), 99).key; constexpr static auto limit = 8; constexpr static auto pageNum = 99; // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); int objectsCount = 10; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); std::vector indexes; while (objectsCount != 0) { @@ -390,21 +338,13 @@ TEST_F(RPCHelpersTest, TraverseOwnedNodesWithUnexistingIndexMarker) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, 0); // return ownerdir when search by marker - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); boost::asio::spawn(ctx, [&, this](boost::asio::yield_context yield) { auto count = 0; auto ret = traverseOwnedNodes( - *mockBackendPtr, - account, - 9, - limit, - fmt::format("{},{}", INDEX2, pageNum), - yield, - [&](auto) { count++; }); + *mockBackendPtr, account, 9, limit, fmt::format("{},{}", INDEX2, pageNum), yield, [&](auto) { count++; }); auto cursor = std::get_if(&ret); EXPECT_TRUE(cursor != nullptr); EXPECT_EQ(count, 0); diff --git a/unittests/rpc/handlers/AccountChannelsTest.cpp b/unittests/rpc/handlers/AccountChannelsTest.cpp index e8ddafb6..0b9d2f6c 100644 --- a/unittests/rpc/handlers/AccountChannelsTest.cpp +++ b/unittests/rpc/handlers/AccountChannelsTest.cpp @@ -28,17 +28,13 @@ using namespace RPCng; namespace json = boost::json; using namespace testing; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto ACCOUNT3 = "rB9BMzh27F3Q6a5FtGPDayQoCCEdiRdqcK"; -constexpr static auto INDEX1 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; -constexpr static auto TXNID = - "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto INDEX1 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; class RPCAccountHandlerTest : public HandlerBaseTest { @@ -228,8 +224,7 @@ TEST_F(RPCAccountHandlerTest, AccountNotString) // error case ledger non exist via hash TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) .WillByDefault(Return(std::optional{})); @@ -256,13 +251,11 @@ TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerStringIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -282,13 +275,11 @@ TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerStringIndex) TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIntIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -310,14 +301,12 @@ TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIntIndex) // idk why this case will happen in reality TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerByHash return ledger but seq is 31 > 30 auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -339,8 +328,7 @@ TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerHash2) // error case ledger > max seq via index TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIndex2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // no need to check from db,call fetchLedgerBySequence 0 time @@ -365,17 +353,14 @@ TEST_F(RPCAccountHandlerTest, NonExistLedgerViaLedgerIndex2) // error case account not exist TEST_F(RPCAccountHandlerTest, NonExistAccount) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return emtpy - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -426,13 +411,11 @@ TEST_F(RPCAccountHandlerTest, DefaultParameterTest) } ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -440,23 +423,18 @@ TEST_F(RPCAccountHandlerTest, DefaultParameterTest) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index containing 2 indexes - ripple::STObject ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // return two payment channel objects std::vector bbs; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); bbs.push_back(channel1.getSerializer().peekData()); bbs.push_back(channel1.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -478,13 +456,11 @@ TEST_F(RPCAccountHandlerTest, DefaultParameterTest) // normal case : limit is used TEST_F(RPCAccountHandlerTest, UseLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -492,9 +468,7 @@ TEST_F(RPCAccountHandlerTest, UseLimit) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index std::vector indexes; @@ -504,15 +478,13 @@ TEST_F(RPCAccountHandlerTest, UseLimit) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - ripple::STObject channel = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); bbs.push_back(channel.getSerializer().peekData()); } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); // it should not appear in return marker,marker is the current page ownerDir.setFieldU64(ripple::sfIndexNext, 99); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -530,22 +502,18 @@ TEST_F(RPCAccountHandlerTest, UseLimit) ASSERT_TRUE(output); EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 20); - EXPECT_THAT( - (*output).as_object().at("marker").as_string().c_str(), - EndsWith(",0")); + EXPECT_THAT((*output).as_object().at("marker").as_string().c_str(), EndsWith(",0")); }); } // normal case : destination is used TEST_F(RPCAccountHandlerTest, UseDestination) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -553,9 +521,7 @@ TEST_F(RPCAccountHandlerTest, UseDestination) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index std::vector indexes; @@ -566,8 +532,7 @@ TEST_F(RPCAccountHandlerTest, UseDestination) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - ripple::STObject channel = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); bbs.push_back(channel.getSerializer().peekData()); } @@ -576,14 +541,12 @@ TEST_F(RPCAccountHandlerTest, UseDestination) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - ripple::STObject channel = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT3, 100, 10, 32, TXNID, 28); + ripple::STObject channel = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT3, 100, 10, 32, TXNID, 28); bbs.push_back(channel.getSerializer().peekData()); } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -609,13 +572,11 @@ TEST_F(RPCAccountHandlerTest, UseDestination) // normal case : but the lines is emtpy TEST_F(RPCAccountHandlerTest, EmptyChannel) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -623,15 +584,12 @@ TEST_F(RPCAccountHandlerTest, EmptyChannel) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index ripple::STObject ownerDir = CreateOwnerDirLedgerObject({}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const input = json::parse(fmt::format( @@ -687,13 +645,11 @@ TEST_F(RPCAccountHandlerTest, OptionalResponseField) } ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -701,23 +657,18 @@ TEST_F(RPCAccountHandlerTest, OptionalResponseField) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index - ripple::STObject ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // return two payment channel objects std::vector bbs; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); channel1.setFieldU32(ripple::sfExpiration, 100); channel1.setFieldU32(ripple::sfCancelAfter, 200); channel1.setFieldU32(ripple::sfSourceTag, 300); @@ -742,8 +693,7 @@ TEST_F(RPCAccountHandlerTest, OptionalResponseField) // normal case : test marker output correct TEST_F(RPCAccountHandlerTest, MarkerOutput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto account = GetAccountIDWithString(ACCOUNT); @@ -751,22 +701,17 @@ TEST_F(RPCAccountHandlerTest, MarkerOutput) auto ownerDirKk = ripple::keylet::ownerDir(account).key; constexpr static auto nextPage = 99; constexpr static auto limit = 15; - auto ownerDir2Kk = - ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ownerDir2Kk = ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); // owner dir contains 10 indexes int objectsCount = 10; std::vector indexes; @@ -787,15 +732,12 @@ TEST_F(RPCAccountHandlerTest, MarkerOutput) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, nextPage); // first page 's next page is 99 - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ripple::STObject ownerDir2 = CreateOwnerDirLedgerObject(indexes, INDEX1); // second page's next page is 0 ownerDir2.setFieldU64(ripple::sfIndexNext, 0); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) .WillByDefault(Return(ownerDir2.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -812,9 +754,7 @@ TEST_F(RPCAccountHandlerTest, MarkerOutput) auto handler = AnyHandler{AccountChannelsHandler{this->mockBackendPtr}}; auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_TRUE(output); - EXPECT_EQ( - (*output).as_object().at("marker").as_string().c_str(), - fmt::format("{},{}", INDEX1, nextPage)); + EXPECT_EQ((*output).as_object().at("marker").as_string().c_str(), fmt::format("{},{}", INDEX1, nextPage)); EXPECT_EQ((*output).as_object().at("channels").as_array().size(), 15); }); } @@ -822,30 +762,24 @@ TEST_F(RPCAccountHandlerTest, MarkerOutput) // normal case : handler marker correctly TEST_F(RPCAccountHandlerTest, MarkerInput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; constexpr static auto nextPage = 99; constexpr static auto limit = 15; - auto ownerDirKk = - ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ownerDirKk = ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; - ripple::STObject channel1 = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); + ripple::STObject channel1 = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 10, 32, TXNID, 28); int objectsCount = limit; std::vector indexes; while (objectsCount != 0) @@ -858,8 +792,7 @@ TEST_F(RPCAccountHandlerTest, MarkerInput) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, 0); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -882,7 +815,6 @@ TEST_F(RPCAccountHandlerTest, MarkerInput) EXPECT_TRUE((*output).as_object().if_contains("marker") == nullptr); // the first item is the marker itself, so the result will have limit-1 // items - EXPECT_EQ( - (*output).as_object().at("channels").as_array().size(), limit - 1); + EXPECT_EQ((*output).as_object().at("channels").as_array().size(), limit - 1); }); } diff --git a/unittests/rpc/handlers/AccountCurrenciesTest.cpp b/unittests/rpc/handlers/AccountCurrenciesTest.cpp index de8d4f09..2cdf9bb3 100644 --- a/unittests/rpc/handlers/AccountCurrenciesTest.cpp +++ b/unittests/rpc/handlers/AccountCurrenciesTest.cpp @@ -31,14 +31,10 @@ using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto TXNID = - "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto TXNID = "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; class RPCAccountCurrenciesHandlerTest : public HandlerBaseTest { @@ -52,10 +48,8 @@ TEST_F(RPCAccountCurrenciesHandlerTest, AccountNotExsit) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const static input = boost::json::parse(fmt::format( @@ -80,8 +74,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaIntSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -106,8 +99,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaStringSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(12, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(12, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -174,37 +166,30 @@ TEST_F(RPCAccountCurrenciesHandlerTest, DefaultParameter) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(ledgerinfo)); // return valid account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) - .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)).WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, - ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}}, - INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = + CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // ACCOUNT can receive USD 10 from ACCOUNT2 and send USD 20 to ACCOUNT2, now // the balance is 100, ACCOUNT can only send USD to ACCOUNT2 - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); // ACCOUNT2 can receive JPY 10 from ACCOUNT and send JPY 20 to ACCOUNT, now // the balance is 100, ACCOUNT2 can only send JPY to ACCOUNT - auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, "JPY", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); + auto const line2 = + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); // ACCOUNT can receive EUR 10 from ACCOUNT and send EUR 20 to ACCOUNT2, now // the balance is 8, ACCOUNT can receive/send EUR to/from ACCOUNT2 - auto const line3 = CreateRippleStateLedgerObject( - ACCOUNT, "EUR", ISSUER, 8, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line3 = + CreateRippleStateLedgerObject(ACCOUNT, "EUR", ISSUER, 8, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); bbs.push_back(line2.getSerializer().peekData()); @@ -233,24 +218,19 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderHash) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); // return valid account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) - .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)).WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); - auto const ownerDir = - CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, 30, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); std::vector bbs; - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); bbs.push_back(line1.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -278,24 +258,20 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderSeq) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(ledgerSeq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(ledgerSeq, _)).WillByDefault(Return(ledgerinfo)); // return valid account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); - auto const ownerDir = - CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, ledgerSeq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); std::vector bbs; - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); bbs.push_back(line1.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -311,7 +287,6 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderSeq) runSpawn([&](auto& yield) { auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_TRUE(output); - EXPECT_EQ( - (*output).as_object().at("ledger_index").as_uint64(), ledgerSeq); + EXPECT_EQ((*output).as_object().at("ledger_index").as_uint64(), ledgerSeq); }); } diff --git a/unittests/rpc/handlers/AccountInfoTest.cpp b/unittests/rpc/handlers/AccountInfoTest.cpp index cdcf0465..e6537503 100644 --- a/unittests/rpc/handlers/AccountInfoTest.cpp +++ b/unittests/rpc/handlers/AccountInfoTest.cpp @@ -31,10 +31,8 @@ using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT1 = "rsA2LpzuawewSBQXkiju3YQTMzW13pAAdW"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; class RPCAccountInfoHandlerTest : public HandlerBaseTest { @@ -49,9 +47,8 @@ struct AccountInfoParamTestCaseBundle }; // parameterized test cases for parameters check -struct AccountInfoParameterTest - : public RPCAccountInfoHandlerTest, - public WithParamInterface +struct AccountInfoParameterTest : public RPCAccountInfoHandlerTest, + public WithParamInterface { struct NameGenerator { @@ -59,8 +56,7 @@ struct AccountInfoParameterTest std::string operator()(const testing::TestParamInfo& info) const { - auto bundle = - static_cast(info.param); + auto bundle = static_cast(info.param); return bundle.testName; } }; @@ -70,31 +66,11 @@ static auto generateTestValuesForParametersTest() { return std::vector{ - AccountInfoParamTestCaseBundle{ - "MissingAccountAndIdent", - R"({})", - "actMalformed", - "Account malformed."}, - AccountInfoParamTestCaseBundle{ - "AccountNotString", - R"({"account":1})", - "invalidParams", - "accountNotString"}, - AccountInfoParamTestCaseBundle{ - "AccountInvalid", - R"({"account":"xxx"})", - "invalidParams", - "accountMalformed"}, - AccountInfoParamTestCaseBundle{ - "IdentNotString", - R"({"ident":1})", - "invalidParams", - "identNotString"}, - AccountInfoParamTestCaseBundle{ - "IdentInvalid", - R"({"ident":"xxx"})", - "invalidParams", - "identMalformed"}, + AccountInfoParamTestCaseBundle{"MissingAccountAndIdent", R"({})", "actMalformed", "Account malformed."}, + AccountInfoParamTestCaseBundle{"AccountNotString", R"({"account":1})", "invalidParams", "accountNotString"}, + AccountInfoParamTestCaseBundle{"AccountInvalid", R"({"account":"xxx"})", "invalidParams", "accountMalformed"}, + AccountInfoParamTestCaseBundle{"IdentNotString", R"({"ident":1})", "invalidParams", "identNotString"}, + AccountInfoParamTestCaseBundle{"IdentInvalid", R"({"ident":"xxx"})", "invalidParams", "identMalformed"}, AccountInfoParamTestCaseBundle{ "SignerListsInvalid", R"({"ident":"rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun", "signer_lists":1})", @@ -135,9 +111,7 @@ TEST_P(AccountInfoParameterTest, InvalidParams) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } @@ -148,8 +122,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaIntSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -174,8 +147,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaStringSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::nullopt)); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -228,10 +200,8 @@ TEST_F(RPCAccountInfoHandlerTest, AccountNotExsit) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const static input = boost::json::parse(fmt::format( @@ -257,11 +227,9 @@ TEST_F(RPCAccountInfoHandlerTest, AccountInvalid) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); // return a valid ledger object but not account root - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0))); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0))); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const static input = boost::json::parse(fmt::format( @@ -275,9 +243,7 @@ TEST_F(RPCAccountInfoHandlerTest, AccountInvalid) ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "dbDeserialization"); - EXPECT_EQ( - err.at("error_message").as_string(), - "Database deserialization error."); + EXPECT_EQ(err.at("error_message").as_string(), "Database deserialization error."); }); } @@ -289,12 +255,10 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsInvalid) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); auto const account = GetAccountIDWithString(ACCOUNT); auto const accountKk = ripple::keylet::account(account).key; - auto const accountRoot = - CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); + auto const accountRoot = CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) .WillByDefault(Return(accountRoot.getSerializer().peekData())); auto signersKey = ripple::keylet::signers(account).key; @@ -314,9 +278,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsInvalid) ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "dbDeserialization"); - EXPECT_EQ( - err.at("error_message").as_string(), - "Database deserialization error."); + EXPECT_EQ(err.at("error_message").as_string(), "Database deserialization error."); }); } @@ -379,20 +341,16 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrue) mockBackendPtr->updateRange(30); // max auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); auto const account = GetAccountIDWithString(ACCOUNT); auto const accountKk = ripple::keylet::account(account).key; - auto const accountRoot = - CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); + auto const accountRoot = CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) .WillByDefault(Return(accountRoot.getSerializer().peekData())); auto signersKey = ripple::keylet::signers(account).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(signersKey, 30, _)) - .WillByDefault(Return(CreateSignerLists({{ACCOUNT1, 1}, {ACCOUNT2, 1}}) - .getSerializer() - .peekData())); + .WillByDefault(Return(CreateSignerLists({{ACCOUNT1, 1}, {ACCOUNT2, 1}}).getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const static input = boost::json::parse(fmt::format( @@ -416,13 +374,11 @@ TEST_F(RPCAccountInfoHandlerTest, IdentAndSignerListsFalse) mockBackendPtr->updateRange(30); // max auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); auto const account = GetAccountIDWithString(ACCOUNT); auto const accountKk = ripple::keylet::account(account).key; - auto const accountRoot = - CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); + auto const accountRoot = CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2); ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, 30, _)) .WillByDefault(Return(accountRoot.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); diff --git a/unittests/rpc/handlers/AccountLinesTest.cpp b/unittests/rpc/handlers/AccountLinesTest.cpp index 1222379c..beedb493 100644 --- a/unittests/rpc/handlers/AccountLinesTest.cpp +++ b/unittests/rpc/handlers/AccountLinesTest.cpp @@ -28,17 +28,13 @@ using namespace RPCng; namespace json = boost::json; using namespace testing; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto ACCOUNT3 = "rB9BMzh27F3Q6a5FtGPDayQoCCEdiRdqcK"; -constexpr static auto INDEX1 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; -constexpr static auto TXNID = - "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto INDEX1 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; class RPCAccountLinesHandlerTest : public HandlerBaseTest { @@ -235,8 +231,7 @@ TEST_F(RPCAccountLinesHandlerTest, AccountNotString) // error case ledger non exist via hash TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) .WillByDefault(Return(std::optional{})); @@ -263,13 +258,11 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerStringIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -289,13 +282,11 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerStringIndex) TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerIntIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -317,14 +308,12 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerIntIndex) // idk why this case will happen in reality TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerHash2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerByHash return ledger but seq is 31 > 30 auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -346,8 +335,7 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerHash2) // error case ledger > max seq via index TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerIndex2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // no need to check from db, call fetchLedgerBySequence 0 time @@ -372,17 +360,14 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistLedgerViaLedgerIndex2) // error case account not exist TEST_F(RPCAccountLinesHandlerTest, NonExistAccount) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return emtpy - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -404,13 +389,11 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistAccount) // normal case when only provide account TEST_F(RPCAccountLinesHandlerTest, DefaultParameterTest) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -418,25 +401,21 @@ TEST_F(RPCAccountLinesHandlerTest, DefaultParameterTest) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index containing 2 indexes - ripple::STObject ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // return two trust lines std::vector bbs; - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123); - auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT2, "USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123); + auto const line2 = + CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123); bbs.push_back(line1.getSerializer().peekData()); bbs.push_back(line2.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -490,13 +469,11 @@ TEST_F(RPCAccountLinesHandlerTest, DefaultParameterTest) // normal case : limit is used TEST_F(RPCAccountLinesHandlerTest, UseLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -504,9 +481,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseLimit) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index std::vector indexes; @@ -516,24 +491,14 @@ TEST_F(RPCAccountLinesHandlerTest, UseLimit) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - auto const line = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ACCOUNT2, - 10, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123); + auto const line = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123); bbs.push_back(line.getSerializer().peekData()); } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); // it should not appear in return marker,marker is the current page ownerDir.setFieldU64(ripple::sfIndexNext, 99); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -551,22 +516,18 @@ TEST_F(RPCAccountLinesHandlerTest, UseLimit) ASSERT_TRUE(output); EXPECT_EQ((*output).as_object().at("lines").as_array().size(), 20); - EXPECT_THAT( - (*output).as_object().at("marker").as_string().c_str(), - EndsWith(",0")); + EXPECT_THAT((*output).as_object().at("marker").as_string().c_str(), EndsWith(",0")); }); } // normal case : destination is used TEST_F(RPCAccountLinesHandlerTest, UseDestination) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -574,9 +535,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index std::vector indexes; @@ -587,17 +546,8 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - auto const line = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ACCOUNT2, - 10, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123); + auto const line = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123); bbs.push_back(line.getSerializer().peekData()); } @@ -606,23 +556,13 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination) while (repetitions--) { indexes.push_back(ripple::uint256{INDEX1}); - auto const line = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ACCOUNT3, - 10, - ACCOUNT, - 100, - ACCOUNT3, - 200, - TXNID, - 123); + auto const line = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT3, 10, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123); bbs.push_back(line.getSerializer().peekData()); } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -648,13 +588,11 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination) // normal case : but the lines is emtpy TEST_F(RPCAccountLinesHandlerTest, EmptyChannel) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -662,15 +600,12 @@ TEST_F(RPCAccountLinesHandlerTest, EmptyChannel) auto owneDirKk = ripple::keylet::ownerDir(account).key; auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index ripple::STObject ownerDir = CreateOwnerDirLedgerObject({}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const input = json::parse(fmt::format( @@ -722,13 +657,11 @@ TEST_F(RPCAccountLinesHandlerTest, OptionalResponseField) } ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto account = GetAccountIDWithString(ACCOUNT); @@ -737,30 +670,24 @@ TEST_F(RPCAccountLinesHandlerTest, OptionalResponseField) auto fake = Blob{'f', 'a', 'k', 'e'}; // return a non empty account - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); // return owner index - ripple::STObject ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + ripple::STObject ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(owneDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // return few trust lines std::vector bbs; - auto line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); + auto line1 = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); line1.setFlag(ripple::lsfHighAuth); line1.setFlag(ripple::lsfHighNoRipple); line1.setFlag(ripple::lsfHighFreeze); bbs.push_back(line1.getSerializer().peekData()); - auto line2 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ACCOUNT2, 20, ACCOUNT, 200, ACCOUNT2, 400, TXNID, 0); + auto line2 = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 20, ACCOUNT, 200, ACCOUNT2, 400, TXNID, 0); line2.setFlag(ripple::lsfLowAuth); line2.setFlag(ripple::lsfLowNoRipple); line2.setFlag(ripple::lsfLowFreeze); @@ -784,8 +711,7 @@ TEST_F(RPCAccountLinesHandlerTest, OptionalResponseField) // normal case : test marker output correct TEST_F(RPCAccountLinesHandlerTest, MarkerOutput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto account = GetAccountIDWithString(ACCOUNT); @@ -793,22 +719,17 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerOutput) auto ownerDirKk = ripple::keylet::ownerDir(account).key; constexpr static auto nextPage = 99; constexpr static auto limit = 15; - auto ownerDir2Kk = - ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ownerDir2Kk = ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; - auto line = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); + auto line = CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); // owner dir contains 10 indexes int objectsCount = 10; @@ -830,15 +751,12 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerOutput) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, nextPage); // first page's next page is 99 - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ripple::STObject ownerDir2 = CreateOwnerDirLedgerObject(indexes, INDEX1); // second page's next page is 0 ownerDir2.setFieldU64(ripple::sfIndexNext, 0); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDir2Kk, testing::_, testing::_)) .WillByDefault(Return(ownerDir2.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -855,9 +773,7 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerOutput) auto handler = AnyHandler{AccountLinesHandler{this->mockBackendPtr}}; auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_TRUE(output); - EXPECT_EQ( - (*output).as_object().at("marker").as_string().c_str(), - fmt::format("{},{}", INDEX1, nextPage)); + EXPECT_EQ((*output).as_object().at("marker").as_string().c_str(), fmt::format("{},{}", INDEX1, nextPage)); EXPECT_EQ((*output).as_object().at("lines").as_array().size(), 15); }); } @@ -865,30 +781,25 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerOutput) // normal case : handler marker correctly TEST_F(RPCAccountLinesHandlerTest, MarkerInput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto account = GetAccountIDWithString(ACCOUNT); auto accountKk = ripple::keylet::account(account).key; constexpr static auto nextPage = 99; constexpr static auto limit = 15; - auto ownerDirKk = - ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; + auto ownerDirKk = ripple::keylet::page(ripple::keylet::ownerDir(account), nextPage).key; auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch account object return something auto fake = Blob{'f', 'a', 'k', 'e'}; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)) - .WillByDefault(Return(fake)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, testing::_, testing::_)).WillByDefault(Return(fake)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); std::vector bbs; - auto const line = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); + auto const line = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 0); int objectsCount = limit; std::vector indexes; while (objectsCount != 0) @@ -901,8 +812,7 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerInput) ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); ownerDir.setFieldU64(ripple::sfIndexNext, 0); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -925,7 +835,6 @@ TEST_F(RPCAccountLinesHandlerTest, MarkerInput) EXPECT_TRUE((*output).as_object().if_contains("marker") == nullptr); // the first item is the marker itself, so the result will have limit-1 // items - EXPECT_EQ( - (*output).as_object().at("lines").as_array().size(), limit - 1); + EXPECT_EQ((*output).as_object().at("lines").as_array().size(), limit - 1); }); } diff --git a/unittests/rpc/handlers/AccountOffersTest.cpp b/unittests/rpc/handlers/AccountOffersTest.cpp index 8c3cc438..cf350e03 100644 --- a/unittests/rpc/handlers/AccountOffersTest.cpp +++ b/unittests/rpc/handlers/AccountOffersTest.cpp @@ -26,10 +26,8 @@ constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; using namespace RPCng; namespace json = boost::json; @@ -48,9 +46,8 @@ struct AccountOfferParamTestCaseBundle }; // parameterized test cases for parameters check -struct AccountOfferParameterTest - : public RPCAccountOffersHandlerTest, - public WithParamInterface +struct AccountOfferParameterTest : public RPCAccountOffersHandlerTest, + public WithParamInterface { struct NameGenerator { @@ -58,8 +55,7 @@ struct AccountOfferParameterTest std::string operator()(const testing::TestParamInfo& info) const { - auto bundle = - static_cast(info.param); + auto bundle = static_cast(info.param); return bundle.testName; } }; @@ -154,9 +150,7 @@ TEST_P(AccountOfferParameterTest, InvalidParams) ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } @@ -195,8 +189,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaStringIndex) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -223,8 +216,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaIntIndex) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -251,10 +243,8 @@ TEST_F(RPCAccountOffersHandlerTest, AccountNotFound) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const static input = boost::json::parse(fmt::format( @@ -307,17 +297,13 @@ TEST_F(RPCAccountOffersHandlerTest, DefaultParams) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); - auto const ownerDir = - CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, ledgerSeq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -360,17 +346,13 @@ TEST_F(RPCAccountOffersHandlerTest, Limit) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); - auto const ownerDir = CreateOwnerDirLedgerObject( - std::vector{20, ripple::uint256{INDEX1}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject(std::vector{20, ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, ledgerSeq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); @@ -403,8 +385,7 @@ TEST_F(RPCAccountOffersHandlerTest, Limit) auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_TRUE(output); EXPECT_EQ(output->at("offers").as_array().size(), 10); - EXPECT_EQ( - output->at("marker").as_string(), fmt::format("{},0", INDEX1)); + EXPECT_EQ(output->at("marker").as_string(), fmt::format("{},0", INDEX1)); }); } @@ -417,18 +398,14 @@ TEST_F(RPCAccountOffersHandlerTest, Marker) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); auto const startPage = 2; - auto const ownerDir = CreateOwnerDirLedgerObject( - std::vector{20, ripple::uint256{INDEX1}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject(std::vector{20, ripple::uint256{INDEX1}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; auto const hintIndex = ripple::keylet::page(ownerDirKk, startPage).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(hintIndex, ledgerSeq, _)) @@ -478,20 +455,16 @@ TEST_F(RPCAccountOffersHandlerTest, MarkerNotExists) auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, ledgerSeq); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerinfo)); - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerinfo)); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, ledgerSeq, _)) .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); auto const startPage = 2; - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; auto const hintIndex = ripple::keylet::page(ownerDirKk, startPage).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(hintIndex, ledgerSeq, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(hintIndex, ledgerSeq, _)).WillByDefault(Return(std::nullopt)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const static input = boost::json::parse(fmt::format( diff --git a/unittests/rpc/handlers/AccountTxTest.cpp b/unittests/rpc/handlers/AccountTxTest.cpp index a9d3dca1..35d68b68 100644 --- a/unittests/rpc/handlers/AccountTxTest.cpp +++ b/unittests/rpc/handlers/AccountTxTest.cpp @@ -32,8 +32,7 @@ constexpr static auto MINSEQ = 10; constexpr static auto MAXSEQ = 30; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; class RPCAccountTxHandlerTest : public HandlerBaseTest { @@ -48,9 +47,7 @@ struct AccountTxParamTestCaseBundle }; // parameterized test cases for parameters check -struct AccountTxParameterTest - : public RPCAccountTxHandlerTest, - public WithParamInterface +struct AccountTxParameterTest : public RPCAccountTxHandlerTest, public WithParamInterface { struct NameGenerator { @@ -68,11 +65,7 @@ static auto generateTestValuesForParametersTest() { return std::vector{ - AccountTxParamTestCaseBundle{ - "MissingAccount", - R"({})", - "invalidParams", - "Required field 'account' missing"}, + AccountTxParamTestCaseBundle{"MissingAccount", R"({})", "invalidParams", "Required field 'account' missing"}, AccountTxParamTestCaseBundle{ "BinaryNotBool", R"({"account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", "binary": 1})", @@ -220,9 +213,7 @@ TEST_P(AccountTxParameterTest, InvalidParams) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } @@ -231,23 +222,19 @@ genTransactions(uint32_t seq1, uint32_t seq2) { auto transactions = std::vector{}; auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = - CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); + ripple::STObject obj = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = seq1; - ripple::STObject metaObj = - CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); + ripple::STObject metaObj = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); trans1.metadata = metaObj.getSerializer().peekData(); trans1.date = 1; transactions.push_back(trans1); auto trans2 = TransactionAndMetadata(); - ripple::STObject obj2 = - CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); + ripple::STObject obj2 = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); trans2.transaction = obj.getSerializer().peekData(); trans2.ledgerSequence = seq2; - ripple::STObject metaObj2 = - CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); + ripple::STObject metaObj2 = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); trans2.metadata = metaObj2.getSerializer().peekData(); trans2.date = 2; transactions.push_back(trans2); @@ -258,13 +245,10 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( @@ -292,9 +276,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardTrue) EXPECT_EQ(output->at("account").as_string(), ACCOUNT); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ + 1); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 1); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -304,21 +286,17 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardFalse) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); @@ -339,9 +317,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardFalse) EXPECT_EQ(output->at("account").as_string(), ACCOUNT); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ + 1); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 1); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -351,21 +327,14 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( - testing::_, - testing::_, - true, - testing::Optional(testing::Eq(TransactionsCursor{MINSEQ, 0})), - testing::_)) + testing::_, testing::_, true, testing::Optional(testing::Eq(TransactionsCursor{MINSEQ, 0})), testing::_)) .Times(1); runSpawn([&, this](auto& yield) { @@ -385,9 +354,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardTrue) EXPECT_EQ(output->at("account").as_string(), ACCOUNT); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -397,21 +364,17 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardFalse) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), testing::_)) .Times(1); @@ -432,9 +395,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardFalse) EXPECT_EQ(output->at("account").as_string(), ACCOUNT); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -444,21 +405,17 @@ TEST_F(RPCAccountTxHandlerTest, BinaryTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), testing::_)) .Times(1); @@ -479,35 +436,19 @@ TEST_F(RPCAccountTxHandlerTest, BinaryTrue) EXPECT_EQ(output->at("account").as_string(), ACCOUNT); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("meta") - .as_string(), + output->at("transactions").as_array()[0].as_object().at("meta").as_string(), "201C00000000F8E5110061E762400000000000001681144B4E9C06F24296074F7B" "C48F92A97916C6DC5EA9E1E1E5110061E76240000000000000178114D31252CF90" "2EF8DD8451243869B38667CBD89DF3E1E1F1031000"); EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("tx_blob") - .as_string(), + output->at("transactions").as_array()[0].as_object().at("tx_blob").as_string(), "120000240000002061400000000000000168400000000000000173047465737481" "144B4E9C06F24296074F7BC48F92A97916C6DC5EA98314D31252CF902EF8DD8451" "243869B38667CBD89DF3"); - EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("date") - .as_uint64(), - 1); + EXPECT_EQ(output->at("transactions").as_array()[0].as_object().at("date").as_uint64(), 1); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -517,21 +458,14 @@ TEST_F(RPCAccountTxHandlerTest, LimitAndMarker) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( - testing::_, - testing::_, - false, - testing::Optional(testing::Eq(TransactionsCursor{10, 11})), - testing::_)) + testing::_, testing::_, false, testing::Optional(testing::Eq(TransactionsCursor{10, 11})), testing::_)) .Times(1); runSpawn([&, this](auto& yield) { @@ -554,9 +488,7 @@ TEST_F(RPCAccountTxHandlerTest, LimitAndMarker) EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); EXPECT_EQ(output->at("limit").as_uint64(), 2); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); }); } @@ -565,29 +497,24 @@ TEST_F(RPCAccountTxHandlerTest, SpecificLedgerIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // adjust the order for forward->false auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, MAXSEQ - 1); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(ledgerinfo)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{AccountTxHandler{mockBackendPtr}}; @@ -613,12 +540,10 @@ TEST_F(RPCAccountTxHandlerTest, SpecificNonexistLedgerIntIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{AccountTxHandler{mockBackendPtr}}; @@ -641,12 +566,10 @@ TEST_F(RPCAccountTxHandlerTest, SpecificNonexistLedgerStringIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{AccountTxHandler{mockBackendPtr}}; @@ -669,29 +592,24 @@ TEST_F(RPCAccountTxHandlerTest, SpecificLedgerHash) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // adjust the order for forward->false auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, MAXSEQ - 1); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{AccountTxHandler{mockBackendPtr}}; @@ -717,21 +635,17 @@ TEST_F(RPCAccountTxHandlerTest, TxLessThanMinSeq) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); @@ -762,21 +676,17 @@ TEST_F(RPCAccountTxHandlerTest, TxLargerThanMaxSeq) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchAccountTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchAccountTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchAccountTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 2, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 2, INT32_MAX})), testing::_)) .Times(1); @@ -799,8 +709,6 @@ TEST_F(RPCAccountTxHandlerTest, TxLargerThanMaxSeq) EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 2); EXPECT_EQ(output->at("transactions").as_array().size(), 1); EXPECT_FALSE(output->as_object().contains("limit")); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); }); } diff --git a/unittests/rpc/handlers/BookOffersTest.cpp b/unittests/rpc/handlers/BookOffersTest.cpp index 266a538f..05d0a8b8 100644 --- a/unittests/rpc/handlers/BookOffersTest.cpp +++ b/unittests/rpc/handlers/BookOffersTest.cpp @@ -28,18 +28,13 @@ constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; // 20 USD : 10 XRP -constexpr static auto PAYS20USDGETS10XRPBOOKDIR = - "43B83ADC452B85FCBADA6CAEAC5181C255A213630D58FFD455071AFD498D0000"; +constexpr static auto PAYS20USDGETS10XRPBOOKDIR = "43B83ADC452B85FCBADA6CAEAC5181C255A213630D58FFD455071AFD498D0000"; // 20 XRP : 10 USD -constexpr static auto PAYS20XRPGETS10USDBOOKDIR = - "7B1767D41DBCE79D9585CF9D0262A5FEC45E5206FF524F8B55071AFD498D0000"; +constexpr static auto PAYS20XRPGETS10USDBOOKDIR = "7B1767D41DBCE79D9585CF9D0262A5FEC45E5206FF524F8B55071AFD498D0000"; // transfer rate x2 constexpr static auto TRANSFERRATEX2 = 2000000000; @@ -59,9 +54,7 @@ struct ParameterTestBundle std::string expectedErrorMessage; }; -struct RPCBookOffersParameterTest - : public RPCBookOffersHandlerTest, - public WithParamInterface +struct RPCBookOffersParameterTest : public RPCBookOffersHandlerTest, public WithParamInterface { struct NameGenerator { @@ -80,13 +73,11 @@ TEST_P(RPCBookOffersParameterTest, CheckError) auto bundle = GetParam(); auto const handler = AnyHandler{BookOffersHandler{mockBackendPtr}}; runSpawn([&](boost::asio::yield_context yield) { - auto const output = handler.process( - json::parse(bundle.testJson), Context{std::ref(yield)}); + auto const output = handler.process(json::parse(bundle.testJson), Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), bundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), bundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), bundle.expectedErrorMessage); }); } @@ -476,9 +467,8 @@ struct BookOffersNormalTestBundle std::string expectedJson; }; -struct RPCBookOffersNormalPathTest - : public RPCBookOffersHandlerTest, - public WithParamInterface +struct RPCBookOffersNormalPathTest : public RPCBookOffersHandlerTest, + public WithParamInterface { struct NameGenerator { @@ -502,40 +492,33 @@ TEST_P(RPCBookOffersNormalPathTest, CheckOutput) EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(ledgerinfo)); // return valid book dir - EXPECT_CALL(*rawBackendPtr, doFetchSuccessorKey) - .Times(bundle.mockedSuccessors.size()); + EXPECT_CALL(*rawBackendPtr, doFetchSuccessorKey).Times(bundle.mockedSuccessors.size()); for (auto const& [key, value] : bundle.mockedSuccessors) { - ON_CALL(*rawBackendPtr, doFetchSuccessorKey(key, seq, _)) - .WillByDefault(Return(value)); + ON_CALL(*rawBackendPtr, doFetchSuccessorKey(key, seq, _)).WillByDefault(Return(value)); } - EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject) - .Times(bundle.ledgerObjectCalls); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(bundle.ledgerObjectCalls); for (auto const& [key, value] : bundle.mockedLedgerObjects) { - ON_CALL(*rawBackendPtr, doFetchLedgerObject(key, seq, _)) - .WillByDefault(Return(value)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(key, seq, _)).WillByDefault(Return(value)); } std::vector bbs; std::transform( - bundle.mockedOffers.begin(), - bundle.mockedOffers.end(), - std::back_inserter(bbs), - [](auto const& obj) { return obj.getSerializer().peekData(); }); + bundle.mockedOffers.begin(), bundle.mockedOffers.end(), std::back_inserter(bbs), [](auto const& obj) { + return obj.getSerializer().peekData(); + }); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); auto const handler = AnyHandler{BookOffersHandler{mockBackendPtr}}; runSpawn([&](boost::asio::yield_context yield) { - auto const output = handler.process( - json::parse(bundle.inputJson), Context{std::ref(yield)}); + auto const output = handler.process(json::parse(bundle.inputJson), Context{std::ref(yield)}); ASSERT_TRUE(output); EXPECT_EQ(output.value(), json::parse(bundle.expectedJson)); }); @@ -548,17 +531,7 @@ generateNormalPathBookOffersTestBundles() auto const account2 = GetAccountIDWithString(ACCOUNT2); auto const frozenTrustLine = CreateRippleStateLedgerObject( - ACCOUNT2, - "USD", - ACCOUNT, - -8, - ACCOUNT2, - 1000, - ACCOUNT, - 2000, - INDEX1, - 2, - ripple::lsfLowFreeze); + ACCOUNT2, "USD", ACCOUNT, -8, ACCOUNT2, 1000, ACCOUNT, 2000, INDEX1, 2, ripple::lsfLowFreeze); auto const gets10USDPays20XRPOffer = CreateOfferLedgerObject( ACCOUNT2, @@ -590,18 +563,10 @@ generateNormalPathBookOffersTestBundles() ACCOUNT, PAYS20USDGETS10XRPBOOKDIR); - auto const getsXRPPaysUSDBook = - getBookBase(std::get(RPC::parseBook( - ripple::to_currency("USD"), - account, - ripple::xrpCurrency(), - ripple::xrpAccount()))); - auto const getsUSDPaysXRPBook = - getBookBase(std::get(RPC::parseBook( - ripple::xrpCurrency(), - ripple::xrpAccount(), - ripple::to_currency("USD"), - account))); + auto const getsXRPPaysUSDBook = getBookBase(std::get( + RPC::parseBook(ripple::to_currency("USD"), account, ripple::xrpCurrency(), ripple::xrpAccount()))); + auto const getsUSDPaysXRPBook = getBookBase(std::get( + RPC::parseBook(ripple::xrpCurrency(), ripple::xrpAccount(), ripple::to_currency("USD"), account))); auto const getsXRPPaysUSDInputJson = fmt::format( R"({{ @@ -633,31 +598,11 @@ generateNormalPathBookOffersTestBundles() auto const feeLedgerObject = CreateFeeSettingBlob(1, 2, 3, 4, 0); - auto const trustline30Balance = CreateRippleStateLedgerObject( - ACCOUNT2, - "USD", - ACCOUNT, - -30, - ACCOUNT2, - 1000, - ACCOUNT, - 2000, - INDEX1, - 2, - 0); + auto const trustline30Balance = + CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, -30, ACCOUNT2, 1000, ACCOUNT, 2000, INDEX1, 2, 0); - auto const trustline8Balance = CreateRippleStateLedgerObject( - ACCOUNT2, - "USD", - ACCOUNT, - -8, - ACCOUNT2, - 1000, - ACCOUNT, - 2000, - INDEX1, - 2, - 0); + auto const trustline8Balance = + CreateRippleStateLedgerObject(ACCOUNT2, "USD", ACCOUNT, -8, ACCOUNT2, 1000, ACCOUNT, 2000, INDEX1, 2, 0); return std::vector{ BookOffersNormalTestBundle{ @@ -665,26 +610,18 @@ generateNormalPathBookOffersTestBundles() getsXRPPaysUSDInputJson, // prepare offer dir index std::map>{ - {getsXRPPaysUSDBook, - ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, - {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - std::optional{}}}, + {getsXRPPaysUSDBook, ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, + {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // pays issuer account object {ripple::keylet::account(account).key, - CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData()}, // owner account object {ripple::keylet::account(account2).key, - CreateAccountRootObject(ACCOUNT2, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT2, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData()}, // fee settings: base ->3 inc->2, account2 has 2 objects ,total // reserve ->7 // owner_funds should be 193 @@ -727,26 +664,18 @@ generateNormalPathBookOffersTestBundles() getsXRPPaysUSDInputJson, // prepare offer dir index std::map>{ - {getsXRPPaysUSDBook, - ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, - {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - std::optional{}}}, + {getsXRPPaysUSDBook, ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, + {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // pays issuer account object {ripple::keylet::account(account).key, - CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData()}, // owner account object, hold {ripple::keylet::account(account2).key, - CreateAccountRootObject(ACCOUNT2, 0, 2, 5 + 7, 2, INDEX1, 2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT2, 0, 2, 5 + 7, 2, INDEX1, 2).getSerializer().peekData()}, // fee settings: base ->3 inc->2, account2 has 2 objects // ,total // reserve ->7 @@ -796,20 +725,15 @@ generateNormalPathBookOffersTestBundles() getsXRPPaysUSDInputJson, // prepare offer dir index std::map>{ - {getsXRPPaysUSDBook, - ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, - {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - std::optional{}}}, + {getsXRPPaysUSDBook, ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}}, + {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // pays issuer account object {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, ripple::lsfGlobalFreeze, 2, 200, 2, INDEX1, 2) + CreateAccountRootObject(ACCOUNT, ripple::lsfGlobalFreeze, 2, 200, 2, INDEX1, 2) .getSerializer() .peekData()}}, 3, @@ -857,27 +781,15 @@ generateNormalPathBookOffersTestBundles() paysXRPGetsUSDInputJson, // prepare offer dir index std::map>{ - {getsUSDPaysXRPBook, - ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, - {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - std::optional{}}}, + {getsUSDPaysXRPBook, ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, + {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // gets issuer account object {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, - ripple::lsfGlobalFreeze, - 2, - 200, - 2, - INDEX1, - 2, - TRANSFERRATEX2) + CreateAccountRootObject(ACCOUNT, ripple::lsfGlobalFreeze, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) .getSerializer() .peekData()}}, 3, @@ -926,26 +838,17 @@ generateNormalPathBookOffersTestBundles() paysXRPGetsUSDInputJson, // prepare offer dir index std::map>{ - {getsUSDPaysXRPBook, - ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, - {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - std::optional{}}}, + {getsUSDPaysXRPBook, ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, + {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // gets issuer account object, rate is 1/2 {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2).getSerializer().peekData()}, // trust line between gets issuer and owner,owner has 8 USD - {ripple::keylet::line( - account2, account, ripple::to_currency("USD")) - .key, + {ripple::keylet::line(account2, account, ripple::to_currency("USD")).key, trustline8Balance.getSerializer().peekData()}, }, 6, @@ -994,36 +897,27 @@ generateNormalPathBookOffersTestBundles() paysXRPGetsUSDInputJson, // prepare offer dir index std::map>{ - {getsUSDPaysXRPBook, - ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, - {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - std::optional{}}}, + {getsUSDPaysXRPBook, ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, + {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1) + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1) .getSerializer() .peekData()}, // gets issuer account object {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2).getSerializer().peekData()}, // trust line between gets issuer and owner,owner has 30 USD - {ripple::keylet::line( - account2, account, ripple::to_currency("USD")) - .key, + {ripple::keylet::line(account2, account, ripple::to_currency("USD")).key, trustline30Balance.getSerializer().peekData()}, }, 6, - std::vector{ - // After offer1, balance is 30 - 2*10 = 10 - gets10USDPays20XRPOffer, - // offer2 not fully funded, balance is 10, rate is 2, so only - // gets 5 - gets10USDPays20XRPOffer}, + std::vector{// After offer1, balance is 30 - 2*10 = 10 + gets10USDPays20XRPOffer, + // offer2 not fully funded, balance is 10, rate is 2, so only + // gets 5 + gets10USDPays20XRPOffer}, fmt::format( R"({{ "ledger_hash":"{}", @@ -1090,22 +984,15 @@ generateNormalPathBookOffersTestBundles() paysXRPGetsUSDInputJson, // prepare offer dir index std::map>{ - {getsUSDPaysXRPBook, - ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, - {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - std::optional{}}}, + {getsUSDPaysXRPBook, ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, + {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // gets issuer account object, rate is 1/2 {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2).getSerializer().peekData()}, }, 3, std::vector{gets10USDPays20XRPOwnerOffer}, @@ -1147,26 +1034,17 @@ generateNormalPathBookOffersTestBundles() paysXRPGetsUSDInputJson, // prepare offer dir index std::map>{ - {getsUSDPaysXRPBook, - ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, - {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - std::optional{}}}, + {getsUSDPaysXRPBook, ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}}, + {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, std::optional{}}}, std::map{ // book dir object {ripple::uint256{PAYS20XRPGETS10USDBOOKDIR}, - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1) - .getSerializer() - .peekData()}, + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1).getSerializer().peekData()}, // gets issuer account object, rate is 1/2 {ripple::keylet::account(account).key, - CreateAccountRootObject( - ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) - .getSerializer() - .peekData()}, + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2).getSerializer().peekData()}, // trust line between gets issuer and owner,owner has 8 USD - {ripple::keylet::line( - account2, account, ripple::to_currency("USD")) - .key, + {ripple::keylet::line(account2, account, ripple::to_currency("USD")).key, frozenTrustLine.getSerializer().peekData()}, }, 6, @@ -1227,8 +1105,7 @@ TEST_F(RPCBookOffersHandlerTest, LedgerNonExistViaIntSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -1261,8 +1138,7 @@ TEST_F(RPCBookOffersHandlerTest, LedgerNonExistViaSequence) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional{})); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -1332,55 +1208,31 @@ TEST_F(RPCBookOffersHandlerTest, Limit) EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(ledgerinfo)); auto const issuer = GetAccountIDWithString(ACCOUNT); // return valid book dir EXPECT_CALL(*rawBackendPtr, doFetchSuccessorKey).Times(1); - auto const getsXRPPaysUSDBook = - getBookBase(std::get(RPC::parseBook( - ripple::to_currency("USD"), - issuer, - ripple::xrpCurrency(), - ripple::xrpAccount()))); + auto const getsXRPPaysUSDBook = getBookBase(std::get( + RPC::parseBook(ripple::to_currency("USD"), issuer, ripple::xrpCurrency(), ripple::xrpAccount()))); ON_CALL(*rawBackendPtr, doFetchSuccessorKey(getsXRPPaysUSDBook, seq, _)) .WillByDefault(Return(ripple::uint256{PAYS20USDGETS10XRPBOOKDIR})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(5); - auto const indexes = - std::vector(10, ripple::uint256{INDEX2}); + auto const indexes = std::vector(10, ripple::uint256{INDEX2}); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, seq, _)) - .WillByDefault(Return(CreateOwnerDirLedgerObject(indexes, INDEX1) - .getSerializer() - .peekData())); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject( - ripple::keylet::account(GetAccountIDWithString(ACCOUNT2)).key, - seq, - _)) - .WillByDefault( - Return(CreateAccountRootObject(ACCOUNT2, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::uint256{PAYS20USDGETS10XRPBOOKDIR}, seq, _)) + .WillByDefault(Return(CreateOwnerDirLedgerObject(indexes, INDEX1).getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::account(GetAccountIDWithString(ACCOUNT2)).key, seq, _)) + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT2, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData())); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) .WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0))); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ripple::keylet::account(issuer).key, seq, _)) - .WillByDefault( - Return(CreateAccountRootObject( - ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2) - .getSerializer() - .peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::account(issuer).key, seq, _)) + .WillByDefault(Return( + CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2, TRANSFERRATEX2).getSerializer().peekData())); auto const gets10XRPPays20USDOffer = CreateOfferLedgerObject( ACCOUNT2, @@ -1392,8 +1244,7 @@ TEST_F(RPCBookOffersHandlerTest, Limit) ACCOUNT, PAYS20USDGETS10XRPBOOKDIR); - std::vector bbs( - 10, gets10XRPPays20USDOffer.getSerializer().peekData()); + std::vector bbs(10, gets10XRPPays20USDOffer.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); diff --git a/unittests/rpc/handlers/GatewayBalancesTest.cpp b/unittests/rpc/handlers/GatewayBalancesTest.cpp index c2516eb3..f4edb63b 100644 --- a/unittests/rpc/handlers/GatewayBalancesTest.cpp +++ b/unittests/rpc/handlers/GatewayBalancesTest.cpp @@ -32,14 +32,10 @@ constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto ACCOUNT3 = "raHGBERMka3KZsfpTQUAtumxmvpqhFLyrk"; constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto TXNID = - "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto TXNID = "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; class RPCGatewayBalancesHandlerTest : public HandlerBaseTest { @@ -53,8 +49,7 @@ struct ParameterTestBundle std::string expectedErrorMessage; }; -struct ParameterTest : public RPCGatewayBalancesHandlerTest, - public WithParamInterface +struct ParameterTest : public RPCGatewayBalancesHandlerTest, public WithParamInterface { struct NameGenerator { @@ -73,13 +68,11 @@ TEST_P(ParameterTest, CheckError) auto bundle = GetParam(); auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; runSpawn([&](auto& yield) { - auto const output = handler.process( - json::parse(bundle.testJson), Context{std::ref(yield)}); + auto const output = handler.process(json::parse(bundle.testJson), Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), bundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), bundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), bundle.expectedErrorMessage); }); } @@ -194,8 +187,7 @@ TEST_F(RPCGatewayBalancesHandlerTest, LedgerNotFoundViaStringIndex) mockBackendPtr->updateRange(300); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional{})); auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; runSpawn([&](auto& yield) { @@ -223,8 +215,7 @@ TEST_F(RPCGatewayBalancesHandlerTest, LedgerNotFoundViaIntIndex) mockBackendPtr->updateRange(300); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::optional{})); auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; runSpawn([&](auto& yield) { @@ -281,14 +272,11 @@ TEST_F(RPCGatewayBalancesHandlerTest, AccountNotFound) EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(ledgerinfo)); // return empty account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) - .WillByDefault(Return(std::optional{})); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const handler = AnyHandler{GatewayBalancesHandler{mockBackendPtr}}; @@ -316,27 +304,21 @@ TEST_F(RPCGatewayBalancesHandlerTest, InvalidHotWallet) EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(ledgerinfo)); // return valid account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) - .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)).WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); // return valid owner dir - auto const ownerDir = - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); // create a valid line, balance is 0 - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 0, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); + auto const line1 = CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 0, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -369,8 +351,7 @@ struct NormalTestBundle std::string hotwallet; }; -struct NormalPathTest : public RPCGatewayBalancesHandlerTest, - public WithParamInterface +struct NormalPathTest : public RPCGatewayBalancesHandlerTest, public WithParamInterface { struct NameGenerator { @@ -394,30 +375,24 @@ TEST_P(NormalPathTest, CheckOutput) EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(ledgerinfo)); // return valid account - auto const accountKk = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; - ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)) - .WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); + auto const accountKk = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + ON_CALL(*rawBackendPtr, doFetchLedgerObject(accountKk, seq, _)).WillByDefault(Return(Blob{'f', 'a', 'k', 'e'})); // return valid owner dir - auto const ownerDir = - CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(bundle.mockedDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); std::vector bbs; std::transform( - bundle.mockedObjects.begin(), - bundle.mockedObjects.end(), - std::back_inserter(bbs), - [](auto const& obj) { return obj.getSerializer().peekData(); }); + bundle.mockedObjects.begin(), bundle.mockedObjects.end(), std::back_inserter(bbs), [](auto const& obj) { + return obj.getSerializer().peekData(); + }); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); @@ -440,16 +415,14 @@ TEST_P(NormalPathTest, CheckOutput) auto generateNormalPathTestBundles() { - auto frozenState = CreateRippleStateLedgerObject( - ACCOUNT, "JPY", ISSUER, -50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); + auto frozenState = + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, -50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); frozenState.setFieldU32(ripple::sfFlags, ripple::lsfLowFreeze); - auto overflowState = CreateRippleStateLedgerObject( - ACCOUNT, "JPY", ISSUER, 50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); + auto overflowState = + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, 50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123); int64_t min64 = -9922966390934554; - overflowState.setFieldAmount( - ripple::sfBalance, - ripple::STAmount(GetIssue("JPY", ISSUER), min64, 80)); + overflowState.setFieldAmount(ripple::sfBalance, ripple::STAmount(GetIssue("JPY", ISSUER), min64, 80)); return std::vector{ NormalTestBundle{ "AllBranches", @@ -461,67 +434,18 @@ generateNormalPathTestBundles() ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), - std::vector{// hotwallet - CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - -10, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123), - // hotwallet - CreateRippleStateLedgerObject( - ACCOUNT, - "CNY", - ISSUER, - -20, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123), - // positive balance -> asset - CreateRippleStateLedgerObject( - ACCOUNT, - "EUR", - ISSUER, - 30, - ACCOUNT, - 100, - ACCOUNT3, - 200, - TXNID, - 123), - // positive balance -> asset - CreateRippleStateLedgerObject( - ACCOUNT, - "JPY", - ISSUER, - 40, - ACCOUNT, - 100, - ACCOUNT3, - 200, - TXNID, - 123), - // obligation - CreateRippleStateLedgerObject( - ACCOUNT, - "JPY", - ISSUER, - -50, - ACCOUNT, - 10, - ACCOUNT3, - 20, - TXNID, - 123), - frozenState + std::vector{ + // hotwallet + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, -10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123), + // hotwallet + CreateRippleStateLedgerObject(ACCOUNT, "CNY", ISSUER, -20, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123), + // positive balance -> asset + CreateRippleStateLedgerObject(ACCOUNT, "EUR", ISSUER, 30, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123), + // positive balance -> asset + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, 40, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123), + // obligation + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, -50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123), + frozenState }, fmt::format( @@ -573,17 +497,8 @@ generateNormalPathTestBundles() NormalTestBundle{ "NoHotwallet", CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}}, INDEX1), - std::vector{CreateRippleStateLedgerObject( - ACCOUNT, - "JPY", - ISSUER, - -50, - ACCOUNT, - 10, - ACCOUNT3, - 20, - TXNID, - 123)}, + std::vector{ + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, -50, ACCOUNT, 10, ACCOUNT3, 20, TXNID, 123)}, fmt::format( R"({{ "obligations":{{ @@ -597,8 +512,7 @@ generateNormalPathTestBundles() R"("ledger_index" : "validated")"}, NormalTestBundle{ "ObligationOverflow", - CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), + CreateOwnerDirLedgerObject({ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), std::vector{overflowState, overflowState}, fmt::format( R"({{ @@ -615,57 +529,15 @@ generateNormalPathTestBundles() NormalTestBundle{ "HighID", CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}}, + {ripple::uint256{INDEX2}, ripple::uint256{INDEX2}, ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), - std::vector{// hotwallet - CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 10, - ACCOUNT2, - 100, - ACCOUNT, - 200, - TXNID, - 123), - // hotwallet - CreateRippleStateLedgerObject( - ACCOUNT, - "CNY", - ISSUER, - 20, - ACCOUNT2, - 100, - ACCOUNT, - 200, - TXNID, - 123), - CreateRippleStateLedgerObject( - ACCOUNT, - "EUR", - ISSUER, - 30, - ACCOUNT3, - 100, - ACCOUNT, - 200, - TXNID, - 123), - CreateRippleStateLedgerObject( - ACCOUNT, - "JPY", - ISSUER, - -50, - ACCOUNT3, - 10, - ACCOUNT, - 20, - TXNID, - 123)}, + std::vector{ + // hotwallet + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 10, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123), + // hotwallet + CreateRippleStateLedgerObject(ACCOUNT, "CNY", ISSUER, 20, ACCOUNT2, 100, ACCOUNT, 200, TXNID, 123), + CreateRippleStateLedgerObject(ACCOUNT, "EUR", ISSUER, 30, ACCOUNT3, 100, ACCOUNT, 200, TXNID, 123), + CreateRippleStateLedgerObject(ACCOUNT, "JPY", ISSUER, -50, ACCOUNT3, 10, ACCOUNT, 20, TXNID, 123)}, fmt::format( R"({{ "obligations":{{ @@ -702,44 +574,11 @@ generateNormalPathTestBundles() NormalTestBundle{ "HotWalletArray", CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}, - ripple::uint256{INDEX2}}, - INDEX1), + {ripple::uint256{INDEX2}, ripple::uint256{INDEX2}, ripple::uint256{INDEX2}}, INDEX1), std::vector{ - CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - -10, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123), - CreateRippleStateLedgerObject( - ACCOUNT, - "CNY", - ISSUER, - -20, - ACCOUNT, - 100, - ACCOUNT2, - 200, - TXNID, - 123), - CreateRippleStateLedgerObject( - ACCOUNT, - "EUR", - ISSUER, - -30, - ACCOUNT, - 100, - ACCOUNT3, - 200, - TXNID, - 123) + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, -10, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123), + CreateRippleStateLedgerObject(ACCOUNT, "CNY", ISSUER, -20, ACCOUNT, 100, ACCOUNT2, 200, TXNID, 123), + CreateRippleStateLedgerObject(ACCOUNT, "EUR", ISSUER, -30, ACCOUNT, 100, ACCOUNT3, 200, TXNID, 123) }, fmt::format( diff --git a/unittests/rpc/handlers/LedgerEntryTest.cpp b/unittests/rpc/handlers/LedgerEntryTest.cpp index 8d3208da..1fd80c0b 100644 --- a/unittests/rpc/handlers/LedgerEntryTest.cpp +++ b/unittests/rpc/handlers/LedgerEntryTest.cpp @@ -28,14 +28,12 @@ using namespace RPCng; namespace json = boost::json; using namespace testing; -constexpr static auto INDEX1 = - "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto INDEX1 = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto RANGEMIN = 10; constexpr static auto RANGEMAX = 30; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; class RPCLedgerEntryTest : public HandlerBaseTest { @@ -50,8 +48,7 @@ struct ParamTestCaseBundle }; // parameterized test cases for parameters check -struct LedgerEntryParameterTest : public RPCLedgerEntryTest, - public WithParamInterface +struct LedgerEntryParameterTest : public RPCLedgerEntryTest, public WithParamInterface { struct NameGenerator { @@ -557,15 +554,12 @@ TEST_P(LedgerEntryParameterTest, InvalidParams) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } // parameterized test cases for index -struct IndexTest : public HandlerBaseTest, - public WithParamInterface +struct IndexTest : public HandlerBaseTest, public WithParamInterface { struct NameGenerator { @@ -631,15 +625,12 @@ TEST_F(RPCLedgerEntryTest, LedgerEntryNotFound) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(ledgerinfo)); // return null for ledger entry - auto const key = - ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; + auto const key = ripple::keylet::account(GetAccountIDWithString(ACCOUNT)).key; EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); - ON_CALL(*rawBackendPtr, doFetchLedgerObject(key, RANGEMAX, _)) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(key, RANGEMAX, _)).WillByDefault(Return(std::optional{})); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; @@ -663,9 +654,7 @@ struct NormalPathTestBundle ripple::STObject mockedEntity; }; -struct RPCLedgerEntryNormalPathTest - : public RPCLedgerEntryTest, - public WithParamInterface +struct RPCLedgerEntryNormalPathTest : public RPCLedgerEntryTest, public WithParamInterface { struct NameGenerator { @@ -697,8 +686,7 @@ generateTestValuesForNormalPathTest() }})", INDEX1), ripple::uint256{INDEX1}, - CreateAccountRootObject( - ACCOUNT2, ripple::lsfGlobalFreeze, 1, 10, 2, INDEX1, 3)}, + CreateAccountRootObject(ACCOUNT2, ripple::lsfGlobalFreeze, 1, 10, 2, INDEX1, 3)}, NormalPathTestBundle{ "Payment_channel", fmt::format( @@ -708,8 +696,7 @@ generateTestValuesForNormalPathTest() }})", INDEX1), ripple::uint256{INDEX1}, - CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400)}, + CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400)}, NormalPathTestBundle{ "Check", fmt::format( @@ -729,8 +716,7 @@ generateTestValuesForNormalPathTest() }})", INDEX1), ripple::uint256{INDEX1}, - CreateOwnerDirLedgerObject( - std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + CreateOwnerDirLedgerObject(std::vector{ripple::uint256{INDEX1}}, INDEX1)}, NormalPathTestBundle{ "OfferIndex", fmt::format( @@ -741,14 +727,7 @@ generateTestValuesForNormalPathTest() INDEX1), ripple::uint256{INDEX1}, CreateOfferLedgerObject( - ACCOUNT, - 100, - 200, - "USD", - "XRP", - ACCOUNT2, - ripple::toBase58(ripple::xrpAccount()), - INDEX1)}, + ACCOUNT, 100, 200, "USD", "XRP", ACCOUNT2, ripple::toBase58(ripple::xrpAccount()), INDEX1)}, NormalPathTestBundle{ "EscrowIndex", fmt::format( @@ -801,8 +780,7 @@ generateTestValuesForNormalPathTest() }})", INDEX1), ripple::keylet::page(ripple::uint256{INDEX1}, 2).key, - CreateOwnerDirLedgerObject( - std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + CreateOwnerDirLedgerObject(std::vector{ripple::uint256{INDEX1}}, INDEX1)}, NormalPathTestBundle{ "DirectoryViaOwner", fmt::format( @@ -815,8 +793,7 @@ generateTestValuesForNormalPathTest() }})", ACCOUNT), ripple::keylet::page(ripple::keylet::ownerDir(account1), 2).key, - CreateOwnerDirLedgerObject( - std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + CreateOwnerDirLedgerObject(std::vector{ripple::uint256{INDEX1}}, INDEX1)}, NormalPathTestBundle{ "DirectoryViaDefaultSubIndex", fmt::format( @@ -829,8 +806,7 @@ generateTestValuesForNormalPathTest() ACCOUNT), // default sub_index is 0 ripple::keylet::page(ripple::keylet::ownerDir(account1), 0).key, - CreateOwnerDirLedgerObject( - std::vector{ripple::uint256{INDEX1}}, INDEX1)}, + CreateOwnerDirLedgerObject(std::vector{ripple::uint256{INDEX1}}, INDEX1)}, NormalPathTestBundle{ "Escrow", fmt::format( @@ -871,18 +847,7 @@ generateTestValuesForNormalPathTest() ACCOUNT, ACCOUNT2), ripple::keylet::line(account1, account2, currency).key, - CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ACCOUNT2, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - INDEX1, - 123, - 0)}, + CreateRippleStateLedgerObject(ACCOUNT, "USD", ACCOUNT2, 100, ACCOUNT, 10, ACCOUNT2, 20, INDEX1, 123, 0)}, NormalPathTestBundle{ "Ticket", fmt::format( @@ -909,14 +874,7 @@ generateTestValuesForNormalPathTest() ACCOUNT), ripple::keylet::offer(account1, 2).key, CreateOfferLedgerObject( - ACCOUNT, - 100, - 200, - "USD", - "XRP", - ACCOUNT2, - ripple::toBase58(ripple::xrpAccount()), - INDEX1)}}; + ACCOUNT, 100, 200, "USD", "XRP", ACCOUNT2, ripple::toBase58(ripple::xrpAccount()), INDEX1)}}; } INSTANTIATE_TEST_CASE_P( @@ -936,15 +894,11 @@ TEST_P(RPCLedgerEntryNormalPathTest, NormalPath) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(testBundle.expectedIndex, RANGEMAX, _)) - .WillByDefault( - Return(testBundle.mockedEntity.getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject(testBundle.expectedIndex, RANGEMAX, _)) + .WillByDefault(Return(testBundle.mockedEntity.getSerializer().peekData())); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; @@ -956,9 +910,7 @@ TEST_P(RPCLedgerEntryNormalPathTest, NormalPath) EXPECT_EQ( output.value().at("node_binary").as_string(), ripple::strHex(testBundle.mockedEntity.getSerializer().peekData())); - EXPECT_EQ( - ripple::uint256(output.value().at("index").as_string().c_str()), - testBundle.expectedIndex); + EXPECT_EQ(ripple::uint256(output.value().at("index").as_string().c_str()), testBundle.expectedIndex); }); } @@ -991,16 +943,12 @@ TEST_F(RPCLedgerEntryTest, BinaryFalse) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(ledgerinfo)); // return valid ledger entry which can be deserialized - auto const ledgerEntry = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); + auto const ledgerEntry = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) .WillByDefault(Return(ledgerEntry.getSerializer().peekData())); runSpawn([&, this](auto& yield) { @@ -1024,16 +972,12 @@ TEST_F(RPCLedgerEntryTest, UnexpectedLedgerType) // return valid ledgerinfo auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, RANGEMAX); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(ledgerinfo)); // return valid ledger entry which can be deserialized - auto const ledgerEntry = CreatePaymentChannelLedgerObject( - ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); + auto const ledgerEntry = CreatePaymentChannelLedgerObject(ACCOUNT, ACCOUNT2, 100, 200, 300, INDEX1, 400); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::uint256{INDEX1}, RANGEMAX, _)) .WillByDefault(Return(ledgerEntry.getSerializer().peekData())); runSpawn([&, this](auto& yield) { @@ -1057,8 +1001,7 @@ TEST_F(RPCLedgerEntryTest, LedgerNotExistViaIntSequence) mockBackendPtr->updateRange(RANGEMAX); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; @@ -1084,8 +1027,7 @@ TEST_F(RPCLedgerEntryTest, LedgerNotExistViaStringSequence) mockBackendPtr->updateRange(RANGEMAX); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(RANGEMAX, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; @@ -1111,8 +1053,7 @@ TEST_F(RPCLedgerEntryTest, LedgerNotExistViaHash) mockBackendPtr->updateRange(RANGEMAX); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{LedgerEntryHandler{mockBackendPtr}}; diff --git a/unittests/rpc/handlers/NFTBuyOffersTest.cpp b/unittests/rpc/handlers/NFTBuyOffersTest.cpp index 5bdfddf0..c5d43fee 100644 --- a/unittests/rpc/handlers/NFTBuyOffersTest.cpp +++ b/unittests/rpc/handlers/NFTBuyOffersTest.cpp @@ -29,14 +29,10 @@ namespace json = boost::json; using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto NFTID = - "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; -constexpr static auto INDEX1 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto NFTID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; +constexpr static auto INDEX1 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; class RPCNFTBuyOffersHandlerTest : public HandlerBaseTest { @@ -135,8 +131,7 @@ TEST_F(RPCNFTBuyOffersHandlerTest, NFTIDNotString) // error case ledger non exist via hash TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) .WillByDefault(Return(std::optional{})); @@ -163,13 +158,11 @@ TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -191,14 +184,12 @@ TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerIndex) // idk why this case will happen in reality TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerHash2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerByHash return ledger but seq is 31 > 30 auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -220,8 +211,7 @@ TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerHash2) // error case ledger > max seq via index TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerIndex2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // no need to check from db, call fetchLedgerBySequence 0 time @@ -246,16 +236,13 @@ TEST_F(RPCNFTBuyOffersHandlerTest, NonExistLedgerViaLedgerIndex2) // error case when nft is not found TEST_F(RPCNFTBuyOffersHandlerTest, NoNFT) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::nullopt)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -382,28 +369,20 @@ TEST_F(RPCNFTBuyOffersHandlerTest, DefaultParameters) } ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index containing 2 indexes auto const directory = ripple::keylet::nft_buys(ripple::uint256{NFTID}); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(directory.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(directory.key, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, - doFetchLedgerObject(directory.key, testing::_, testing::_)) - .Times(2); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(directory.key, testing::_, testing::_)).Times(2); // return two nft buy offers std::vector bbs; @@ -430,13 +409,11 @@ TEST_F(RPCNFTBuyOffersHandlerTest, DefaultParameters) // normal case when provided with nft_id and limit TEST_F(RPCNFTBuyOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -452,8 +429,7 @@ TEST_F(RPCNFTBuyOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(ownerDir.getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -481,13 +457,11 @@ TEST_F(RPCNFTBuyOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) // normal case when provided with nft_id, limit and marker TEST_F(RPCNFTBuyOffersHandlerTest, ResultsForInputWithMarkerAndLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -505,25 +479,18 @@ TEST_F(RPCNFTBuyOffersHandlerTest, ResultsForInputWithMarkerAndLimit) auto const cursorBuyOffer = CreateNFTBuyOffer(NFTID, ACCOUNT); // first is nft offer object - auto const cursor = ripple::uint256{ - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; + auto const cursor = ripple::uint256{"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; auto const first = ripple::keylet::nftoffer(cursor); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) .WillByDefault(Return(cursorBuyOffer.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)).Times(1); auto const directory = ripple::keylet::nft_buys(ripple::uint256{NFTID}); auto const startHint = 0ul; // offer node is hardcoded to 0ul auto const secondKey = ripple::keylet::page(directory, startHint).key; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) - .Times(3); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)).Times(3); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); @@ -551,17 +518,13 @@ TEST_F(RPCNFTBuyOffersHandlerTest, ResultsForInputWithMarkerAndLimit) // normal case when provided with nft_id, limit and marker // nothing left after reading remaining 50 entries -TEST_F( - RPCNFTBuyOffersHandlerTest, - ResultsWithoutMarkerForInputWithMarkerAndLimit) +TEST_F(RPCNFTBuyOffersHandlerTest, ResultsWithoutMarkerForInputWithMarkerAndLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -579,25 +542,18 @@ TEST_F( auto const cursorBuyOffer = CreateNFTBuyOffer(NFTID, ACCOUNT); // first is nft offer object - auto const cursor = ripple::uint256{ - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; + auto const cursor = ripple::uint256{"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; auto const first = ripple::keylet::nftoffer(cursor); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) .WillByDefault(Return(cursorBuyOffer.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)).Times(1); auto const directory = ripple::keylet::nft_buys(ripple::uint256{NFTID}); auto const startHint = 0ul; // offer node is hardcoded to 0ul auto const secondKey = ripple::keylet::page(directory, startHint).key; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) - .Times(3); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)).Times(3); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); diff --git a/unittests/rpc/handlers/NFTHistoryTest.cpp b/unittests/rpc/handlers/NFTHistoryTest.cpp index 6c3eabb3..6f358088 100644 --- a/unittests/rpc/handlers/NFTHistoryTest.cpp +++ b/unittests/rpc/handlers/NFTHistoryTest.cpp @@ -32,10 +32,8 @@ constexpr static auto MINSEQ = 10; constexpr static auto MAXSEQ = 30; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto NFTID = - "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto NFTID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; class RPCNFTHistoryHandlerTest : public HandlerBaseTest { @@ -50,9 +48,8 @@ struct NFTHistoryParamTestCaseBundle }; // parameterized test cases for parameters check -struct NFTHistoryParameterTest - : public RPCNFTHistoryHandlerTest, - public WithParamInterface +struct NFTHistoryParameterTest : public RPCNFTHistoryHandlerTest, + public WithParamInterface { struct NameGenerator { @@ -60,8 +57,7 @@ struct NFTHistoryParameterTest std::string operator()(const testing::TestParamInfo& info) const { - auto bundle = - static_cast(info.param); + auto bundle = static_cast(info.param); return bundle.testName; } }; @@ -71,11 +67,7 @@ static auto generateTestValuesForParametersTest() { return std::vector{ - NFTHistoryParamTestCaseBundle{ - "MissingNFTID", - R"({})", - "invalidParams", - "Required field 'nft_id' missing"}, + NFTHistoryParamTestCaseBundle{"MissingNFTID", R"({})", "invalidParams", "Required field 'nft_id' missing"}, NFTHistoryParamTestCaseBundle{ "BinaryNotBool", R"({"nft_id":"00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004", "binary": 1})", @@ -223,9 +215,7 @@ TEST_P(NFTHistoryParameterTest, InvalidParams) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } @@ -234,23 +224,19 @@ genTransactions(uint32_t seq1, uint32_t seq2) { auto transactions = std::vector{}; auto trans1 = TransactionAndMetadata(); - ripple::STObject obj = - CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); + ripple::STObject obj = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); trans1.transaction = obj.getSerializer().peekData(); trans1.ledgerSequence = seq1; - ripple::STObject metaObj = - CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); + ripple::STObject metaObj = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); trans1.metadata = metaObj.getSerializer().peekData(); trans1.date = 1; transactions.push_back(trans1); auto trans2 = TransactionAndMetadata(); - ripple::STObject obj2 = - CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); + ripple::STObject obj2 = CreatePaymentTransactionObject(ACCOUNT, ACCOUNT2, 1, 1, 32); trans2.transaction = obj.getSerializer().peekData(); trans2.ledgerSequence = seq2; - ripple::STObject metaObj2 = - CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); + ripple::STObject metaObj2 = CreatePaymentTransactionMetaObject(ACCOUNT, ACCOUNT2, 22, 23); trans2.metadata = metaObj2.getSerializer().peekData(); trans2.date = 2; transactions.push_back(trans2); @@ -261,13 +247,10 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexSpecificForwardTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( @@ -295,9 +278,7 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexSpecificForwardTrue) EXPECT_EQ(output->at("nft_id").as_string(), NFTID); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ + 1); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 1); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -307,21 +288,17 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexSpecificForwardFalse) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); @@ -342,9 +319,7 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexSpecificForwardFalse) EXPECT_EQ(output->at("nft_id").as_string(), NFTID); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ + 1); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 1); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -354,21 +329,14 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexNotSpecificForwardTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( - testing::_, - testing::_, - true, - testing::Optional(testing::Eq(TransactionsCursor{MINSEQ, 0})), - testing::_)) + testing::_, testing::_, true, testing::Optional(testing::Eq(TransactionsCursor{MINSEQ, 0})), testing::_)) .Times(1); runSpawn([&, this](auto& yield) { @@ -388,9 +356,7 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexNotSpecificForwardTrue) EXPECT_EQ(output->at("nft_id").as_string(), NFTID); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -400,21 +366,17 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexNotSpecificForwardFalse) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), testing::_)) .Times(1); @@ -435,9 +397,7 @@ TEST_F(RPCNFTHistoryHandlerTest, IndexNotSpecificForwardFalse) EXPECT_EQ(output->at("nft_id").as_string(), NFTID); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -447,21 +407,17 @@ TEST_F(RPCNFTHistoryHandlerTest, BinaryTrue) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ, INT32_MAX})), testing::_)) .Times(1); @@ -482,35 +438,19 @@ TEST_F(RPCNFTHistoryHandlerTest, BinaryTrue) EXPECT_EQ(output->at("nft_id").as_string(), NFTID); EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("meta") - .as_string(), + output->at("transactions").as_array()[0].as_object().at("meta").as_string(), "201C00000000F8E5110061E762400000000000001681144B4E9C06F24296074F7B" "C48F92A97916C6DC5EA9E1E1E5110061E76240000000000000178114D31252CF90" "2EF8DD8451243869B38667CBD89DF3E1E1F1031000"); EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("tx_blob") - .as_string(), + output->at("transactions").as_array()[0].as_object().at("tx_blob").as_string(), "120000240000002061400000000000000168400000000000000173047465737481" "144B4E9C06F24296074F7BC48F92A97916C6DC5EA98314D31252CF902EF8DD8451" "243869B38667CBD89DF3"); - EXPECT_EQ( - output->at("transactions") - .as_array()[0] - .as_object() - .at("date") - .as_uint64(), - 1); + EXPECT_EQ(output->at("transactions").as_array()[0].as_object().at("date").as_uint64(), 1); EXPECT_FALSE(output->as_object().contains("limit")); }); @@ -520,21 +460,14 @@ TEST_F(RPCNFTHistoryHandlerTest, LimitAndMarker) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MINSEQ + 1, MAXSEQ - 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( - testing::_, - testing::_, - false, - testing::Optional(testing::Eq(TransactionsCursor{10, 11})), - testing::_)) + testing::_, testing::_, false, testing::Optional(testing::Eq(TransactionsCursor{10, 11})), testing::_)) .Times(1); runSpawn([&, this](auto& yield) { @@ -557,9 +490,7 @@ TEST_F(RPCNFTHistoryHandlerTest, LimitAndMarker) EXPECT_EQ(output->at("ledger_index_min").as_uint64(), MINSEQ); EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ); EXPECT_EQ(output->at("limit").as_uint64(), 2); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); EXPECT_EQ(output->at("transactions").as_array().size(), 2); }); } @@ -568,29 +499,24 @@ TEST_F(RPCNFTHistoryHandlerTest, SpecificLedgerIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // adjust the order for forward->false auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, MAXSEQ - 1); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(ledgerinfo)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{NFTHistoryHandler{mockBackendPtr}}; @@ -616,12 +542,10 @@ TEST_F(RPCNFTHistoryHandlerTest, SpecificNonexistLedgerIntIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{NFTHistoryHandler{mockBackendPtr}}; @@ -644,12 +568,10 @@ TEST_F(RPCNFTHistoryHandlerTest, SpecificNonexistLedgerStringIndex) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(MAXSEQ - 1, _)).WillByDefault(Return(std::nullopt)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{NFTHistoryHandler{mockBackendPtr}}; @@ -672,29 +594,24 @@ TEST_F(RPCNFTHistoryHandlerTest, SpecificLedgerHash) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // adjust the order for forward->false auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); auto const ledgerinfo = CreateLedgerInfo(LEDGERHASH, MAXSEQ - 1); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); runSpawn([&, this](auto& yield) { auto const handler = AnyHandler{NFTHistoryHandler{mockBackendPtr}}; @@ -720,21 +637,17 @@ TEST_F(RPCNFTHistoryHandlerTest, TxLessThanMinSeq) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 1, INT32_MAX})), testing::_)) .Times(1); @@ -765,21 +678,17 @@ TEST_F(RPCNFTHistoryHandlerTest, TxLargerThanMaxSeq) { mockBackendPtr->updateRange(MINSEQ); // min mockBackendPtr->updateRange(MAXSEQ); // max - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); auto const transactions = genTransactions(MAXSEQ - 1, MINSEQ + 1); - auto const transCursor = - TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; - ON_CALL(*rawBackendPtr, fetchNFTTransactions) - .WillByDefault(Return(transCursor)); + auto const transCursor = TransactionsAndCursor{transactions, TransactionsCursor{12, 34}}; + ON_CALL(*rawBackendPtr, fetchNFTTransactions).WillByDefault(Return(transCursor)); EXPECT_CALL( *rawBackendPtr, fetchNFTTransactions( testing::_, testing::_, false, - testing::Optional( - testing::Eq(TransactionsCursor{MAXSEQ - 2, INT32_MAX})), + testing::Optional(testing::Eq(TransactionsCursor{MAXSEQ - 2, INT32_MAX})), testing::_)) .Times(1); @@ -802,8 +711,6 @@ TEST_F(RPCNFTHistoryHandlerTest, TxLargerThanMaxSeq) EXPECT_EQ(output->at("ledger_index_max").as_uint64(), MAXSEQ - 2); EXPECT_EQ(output->at("transactions").as_array().size(), 1); EXPECT_FALSE(output->as_object().contains("limit")); - EXPECT_EQ( - output->at("marker").as_object(), - json::parse(R"({"ledger":12,"seq":34})")); + EXPECT_EQ(output->at("marker").as_object(), json::parse(R"({"ledger":12,"seq":34})")); }); } diff --git a/unittests/rpc/handlers/NFTInfoTest.cpp b/unittests/rpc/handlers/NFTInfoTest.cpp index e9bd71d8..cda00078 100644 --- a/unittests/rpc/handlers/NFTInfoTest.cpp +++ b/unittests/rpc/handlers/NFTInfoTest.cpp @@ -29,12 +29,9 @@ namespace json = boost::json; using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto NFTID = - "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; -constexpr static auto NFTID2 = - "00081388319F12E15BCA13E1B933BF4C99C8E1BBC36BD4910A85D52F00000022"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto NFTID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; +constexpr static auto NFTID2 = "00081388319F12E15BCA13E1B933BF4C99C8E1BBC36BD4910A85D52F00000022"; class RPCNFTInfoHandlerTest : public HandlerBaseTest { @@ -133,8 +130,7 @@ TEST_F(RPCNFTInfoHandlerTest, NFTIDNotString) // error case ledger non exist via hash TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) .WillByDefault(Return(std::optional{})); @@ -161,13 +157,11 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerStringIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -187,13 +181,11 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerStringIndex) TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerIntIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -215,14 +207,12 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerIntIndex) // idk why this case will happen in reality TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerHash2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerByHash return ledger but seq is 31 > 30 auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -244,8 +234,7 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerHash2) // error case ledger > max seq via index TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerIndex2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // no need to check from db,call fetchLedgerBySequence 0 time @@ -270,19 +259,15 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistLedgerViaLedgerIndex2) // error case nft does not exist TEST_F(RPCNFTInfoHandlerTest, NonExistNFT) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch nft return emtpy - ON_CALL(*rawBackendPtr, fetchNFT) - .WillByDefault(Return(std::optional{})); - EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)) - .Times(1); + ON_CALL(*rawBackendPtr, fetchNFT).WillByDefault(Return(std::optional{})); + EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)).Times(1); auto const input = json::parse(fmt::format( R"({{ "nft_id": "{}", @@ -316,21 +301,17 @@ TEST_F(RPCNFTInfoHandlerTest, DefaultParameters) "uri": "757269", "validated": true })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch nft return something - auto const nft = - std::make_optional(CreateNFT(NFTID, ACCOUNT, ledgerInfo.seq)); + auto const nft = std::make_optional(CreateNFT(NFTID, ACCOUNT, ledgerInfo.seq)); ON_CALL(*rawBackendPtr, fetchNFT).WillByDefault(Return(nft)); - EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -360,21 +341,18 @@ TEST_F(RPCNFTInfoHandlerTest, BurnedNFT) "nft_serial": 4, "validated": true })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch nft return something - auto const nft = std::make_optional(CreateNFT( - NFTID, ACCOUNT, ledgerInfo.seq, ripple::Blob{'u', 'r', 'i'}, true)); + auto const nft = + std::make_optional(CreateNFT(NFTID, ACCOUNT, ledgerInfo.seq, ripple::Blob{'u', 'r', 'i'}, true)); ON_CALL(*rawBackendPtr, fetchNFT).WillByDefault(Return(nft)); - EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -405,21 +383,17 @@ TEST_F(RPCNFTInfoHandlerTest, NotBurnedNFTWithoutURI) "uri": "", "validated": true })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch nft return something - auto const nft = std::make_optional( - CreateNFT(NFTID, ACCOUNT, ledgerInfo.seq, ripple::Blob{})); + auto const nft = std::make_optional(CreateNFT(NFTID, ACCOUNT, ledgerInfo.seq, ripple::Blob{})); ON_CALL(*rawBackendPtr, fetchNFT).WillByDefault(Return(nft)); - EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID}, 30, _)).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -450,21 +424,17 @@ TEST_F(RPCNFTInfoHandlerTest, NFTWithExtraFieldsSet) "uri": "757269", "validated": true })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // fetch nft return something - auto const nft = - std::make_optional(CreateNFT(NFTID2, ACCOUNT, ledgerInfo.seq)); + auto const nft = std::make_optional(CreateNFT(NFTID2, ACCOUNT, ledgerInfo.seq)); ON_CALL(*rawBackendPtr, fetchNFT).WillByDefault(Return(nft)); - EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID2}, 30, _)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, fetchNFT(ripple::uint256{NFTID2}, 30, _)).Times(1); auto const input = json::parse(fmt::format( R"({{ diff --git a/unittests/rpc/handlers/NFTSellOffersTest.cpp b/unittests/rpc/handlers/NFTSellOffersTest.cpp index 5ef11c92..4447a3f6 100644 --- a/unittests/rpc/handlers/NFTSellOffersTest.cpp +++ b/unittests/rpc/handlers/NFTSellOffersTest.cpp @@ -29,14 +29,10 @@ namespace json = boost::json; using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto NFTID = - "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; -constexpr static auto INDEX1 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto NFTID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004"; +constexpr static auto INDEX1 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; class RPCNFTSellOffersHandlerTest : public HandlerBaseTest { @@ -135,8 +131,7 @@ TEST_F(RPCNFTSellOffersHandlerTest, NFTIDNotString) // error case ledger non exist via hash TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) .WillByDefault(Return(std::optional{})); @@ -163,13 +158,11 @@ TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -191,14 +184,12 @@ TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerIndex) // idk why this case will happen in reality TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerHash2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerByHash return ledger but seq is 31 > 30 auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 31); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -220,8 +211,7 @@ TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerHash2) // error case ledger > max seq via index TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerIndex2) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // no need to check from db, call fetchLedgerBySequence 0 time @@ -246,16 +236,13 @@ TEST_F(RPCNFTSellOffersHandlerTest, NonExistLedgerViaLedgerIndex2) // error case when nft is not found TEST_F(RPCNFTSellOffersHandlerTest, NoNFT) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::nullopt)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -382,28 +369,20 @@ TEST_F(RPCNFTSellOffersHandlerTest, DefaultParameters) } ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index containing 2 indexes auto const directory = ripple::keylet::nft_sells(ripple::uint256{NFTID}); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - ON_CALL( - *rawBackendPtr, - doFetchLedgerObject(directory.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(directory.key, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, - doFetchLedgerObject(directory.key, testing::_, testing::_)) - .Times(2); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(directory.key, testing::_, testing::_)).Times(2); // return two nft sell offers std::vector bbs; @@ -430,13 +409,11 @@ TEST_F(RPCNFTSellOffersHandlerTest, DefaultParameters) // normal case when provided with nft_id and limit TEST_F(RPCNFTSellOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -452,8 +429,7 @@ TEST_F(RPCNFTSellOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) } ripple::STObject ownerDir = CreateOwnerDirLedgerObject(indexes, INDEX1); - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(ownerDir.getSerializer().peekData())); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); @@ -481,13 +457,11 @@ TEST_F(RPCNFTSellOffersHandlerTest, MultipleResultsWithMarkerAndLimitOutput) // normal case when provided with nft_id, limit and marker TEST_F(RPCNFTSellOffersHandlerTest, ResultsForInputWithMarkerAndLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -505,25 +479,18 @@ TEST_F(RPCNFTSellOffersHandlerTest, ResultsForInputWithMarkerAndLimit) auto const cursorSellOffer = CreateNFTSellOffer(NFTID, ACCOUNT); // first is nft offer object - auto const cursor = ripple::uint256{ - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; + auto const cursor = ripple::uint256{"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; auto const first = ripple::keylet::nftoffer(cursor); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) .WillByDefault(Return(cursorSellOffer.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)).Times(1); auto const directory = ripple::keylet::nft_sells(ripple::uint256{NFTID}); auto const startHint = 0ul; // offer node is hardcoded to 0ul auto const secondKey = ripple::keylet::page(directory, startHint).key; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) - .Times(3); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)).Times(3); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); @@ -551,17 +518,13 @@ TEST_F(RPCNFTSellOffersHandlerTest, ResultsForInputWithMarkerAndLimit) // normal case when provided with nft_id, limit and marker // nothing left after reading remaining 50 entries -TEST_F( - RPCNFTSellOffersHandlerTest, - ResultsWithoutMarkerForInputWithMarkerAndLimit) +TEST_F(RPCNFTSellOffersHandlerTest, ResultsWithoutMarkerForInputWithMarkerAndLimit) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerInfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(ledgerInfo)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(ledgerInfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return owner index @@ -579,25 +542,18 @@ TEST_F( auto const cursorSellOffer = CreateNFTSellOffer(NFTID, ACCOUNT); // first is nft offer object - auto const cursor = ripple::uint256{ - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; + auto const cursor = ripple::uint256{"E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC353"}; auto const first = ripple::keylet::nftoffer(cursor); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) .WillByDefault(Return(cursorSellOffer.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)) - .Times(1); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(first.key, testing::_, testing::_)).Times(1); auto const directory = ripple::keylet::nft_sells(ripple::uint256{NFTID}); auto const startHint = 0ul; // offer node is hardcoded to 0ul auto const secondKey = ripple::keylet::page(directory, startHint).key; - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - EXPECT_CALL( - *rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)) - .Times(3); + EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject(secondKey, testing::_, testing::_)).Times(3); ON_CALL(*rawBackendPtr, doFetchLedgerObjects).WillByDefault(Return(bbs)); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObjects).Times(1); diff --git a/unittests/rpc/handlers/NoRippleCheckTest.cpp b/unittests/rpc/handlers/NoRippleCheckTest.cpp index f5a3cc59..62ca89df 100644 --- a/unittests/rpc/handlers/NoRippleCheckTest.cpp +++ b/unittests/rpc/handlers/NoRippleCheckTest.cpp @@ -30,15 +30,11 @@ using namespace testing; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; -constexpr static auto LEDGERHASH = - "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; -constexpr static auto INDEX1 = - "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; -constexpr static auto INDEX2 = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; +constexpr static auto LEDGERHASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652"; +constexpr static auto INDEX1 = "1B8590C01B0006EDFA9ED60296DD052DC5E90F99659B25014D08E1BC983515BC"; +constexpr static auto INDEX2 = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC321"; constexpr static auto ISSUER = "rK9DrarGKnVEo2nYp5MfVRXRYf5yRX3mwD"; -constexpr static auto TXNID = - "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; +constexpr static auto TXNID = "E3FE6EA3D48F0C2B639448020EA4F03D4F4F8FFDB243A852A0F59177921B4879"; class RPCNoRippleCheckTest : public HandlerBaseTest { @@ -53,9 +49,7 @@ struct NoRippleParamTestCaseBundle }; // parameterized test cases for parameters check -struct NoRippleCheckParameterTest - : public RPCNoRippleCheckTest, - public WithParamInterface +struct NoRippleCheckParameterTest : public RPCNoRippleCheckTest, public WithParamInterface { struct NameGenerator { @@ -158,9 +152,7 @@ TEST_P(NoRippleCheckParameterTest, InvalidParams) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), testBundle.expectedError); - EXPECT_EQ( - err.at("error_message").as_string(), - testBundle.expectedErrorMessage); + EXPECT_EQ(err.at("error_message").as_string(), testBundle.expectedErrorMessage); }); } @@ -171,8 +163,7 @@ TEST_F(RPCNoRippleCheckTest, LedgerNotExistViaHash) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(std::nullopt)); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -200,8 +191,7 @@ TEST_F(RPCNoRippleCheckTest, LedgerNotExistViaIntIndex) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::nullopt)); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -229,8 +219,7 @@ TEST_F(RPCNoRippleCheckTest, LedgerNotExistViaStringIndex) mockBackendPtr->updateRange(30); // max EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); // return empty ledgerinfo - ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)) - .WillByDefault(Return(std::nullopt)); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence(seq, _)).WillByDefault(Return(std::nullopt)); auto const static input = boost::json::parse(fmt::format( R"({{ @@ -252,17 +241,14 @@ TEST_F(RPCNoRippleCheckTest, LedgerNotExistViaStringIndex) TEST_F(RPCNoRippleCheckTest, AccountNotExist) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, 30); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return emtpy - ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -282,9 +268,7 @@ TEST_F(RPCNoRippleCheckTest, AccountNotExist) }); } -TEST_F( - RPCNoRippleCheckTest, - NormalPathRoleUserDefaultRippleSetTrustLineNoRippleSet) +TEST_F(RPCNoRippleCheckTest, NormalPathRoleUserDefaultRippleSetTrustLineNoRippleSet) { static auto constexpr seq = 30; static auto constexpr expectedOutput = @@ -296,55 +280,29 @@ TEST_F( "You appear to have set your default ripple flag even though you are not a gateway. This is not recommended unless you are experimenting" ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject( - ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) + .getSerializer() + .peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -369,9 +327,7 @@ TEST_F( }); } -TEST_F( - RPCNoRippleCheckTest, - NormalPathRoleUserDefaultRippleUnsetTrustLineNoRippleUnSet) +TEST_F(RPCNoRippleCheckTest, NormalPathRoleUserDefaultRippleUnsetTrustLineNoRippleUnSet) { static auto constexpr seq = 30; static auto constexpr expectedOutput = @@ -383,34 +339,27 @@ TEST_F( "You should probably set the no ripple flag on your USD line to rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun" ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); - auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line2 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -435,9 +384,7 @@ TEST_F( }); } -TEST_F( - RPCNoRippleCheckTest, - NormalPathRoleGatewayDefaultRippleSetTrustLineNoRippleSet) +TEST_F(RPCNoRippleCheckTest, NormalPathRoleGatewayDefaultRippleSetTrustLineNoRippleSet) { static auto constexpr seq = 30; static auto constexpr expectedOutput = @@ -450,55 +397,29 @@ TEST_F( "You should clear the no ripple flag on your USD line to rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun" ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject( - ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) + .getSerializer() + .peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -523,9 +444,7 @@ TEST_F( }); } -TEST_F( - RPCNoRippleCheckTest, - NormalPathRoleGatewayDefaultRippleUnsetTrustLineNoRippleUnset) +TEST_F(RPCNoRippleCheckTest, NormalPathRoleGatewayDefaultRippleUnsetTrustLineNoRippleUnset) { static auto constexpr seq = 30; static auto constexpr expectedOutput = @@ -537,34 +456,27 @@ TEST_F( "You should immediately set your default ripple flag" ] })"; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); - auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); + auto const line2 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, 0); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -589,42 +501,32 @@ TEST_F( }); } -TEST_F( - RPCNoRippleCheckTest, - NormalPathRoleGatewayDefaultRippleUnsetTrustLineNoRippleUnsetHighAccount) +TEST_F(RPCNoRippleCheckTest, NormalPathRoleGatewayDefaultRippleUnsetTrustLineNoRippleUnsetHighAccount) { static auto constexpr seq = 30; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, 0, 2, 200, 2, INDEX1, 2).getSerializer().peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) .WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0))); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); - auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); + auto const line1 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); - auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, "USD", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); + auto const line2 = + CreateRippleStateLedgerObject(ACCOUNT, "USD", ISSUER, 100, ACCOUNT2, 10, ACCOUNT, 20, TXNID, 123, 0); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -654,55 +556,29 @@ TEST_F( TEST_F(RPCNoRippleCheckTest, NormalPathLimit) { constexpr auto seq = 30; - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault( - Return(CreateAccountRootObject( - ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault(Return(CreateAccountRootObject(ACCOUNT, ripple::lsfDefaultRipple, 2, 200, 2, INDEX1, 2) + .getSerializer() + .peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(2); auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); @@ -780,57 +656,30 @@ TEST_F(RPCNoRippleCheckTest, NormalPathTransactions) ripple::tfClearNoRipple, transactionSeq + 2, ripple::tfClearNoRipple); - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(seq); // max auto ledgerinfo = CreateLedgerInfo(LEDGERHASH, seq); - ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)) - .WillByDefault(Return(ledgerinfo)); + ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{LEDGERHASH}, _)).WillByDefault(Return(ledgerinfo)); EXPECT_CALL(*rawBackendPtr, fetchLedgerByHash).Times(1); // fetch account object return valid account with DefaultRippleSet flag ON_CALL(*rawBackendPtr, doFetchLedgerObject) - .WillByDefault(Return(CreateAccountRootObject( - ACCOUNT, 0, transactionSeq, 200, 2, INDEX1, 2) - .getSerializer() - .peekData())); - auto const ownerDir = CreateOwnerDirLedgerObject( - {ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); - auto const ownerDirKk = - ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; + .WillByDefault( + Return(CreateAccountRootObject(ACCOUNT, 0, transactionSeq, 200, 2, INDEX1, 2).getSerializer().peekData())); + auto const ownerDir = CreateOwnerDirLedgerObject({ripple::uint256{INDEX1}, ripple::uint256{INDEX2}}, INDEX1); + auto const ownerDirKk = ripple::keylet::ownerDir(GetAccountIDWithString(ACCOUNT)).key; ON_CALL(*rawBackendPtr, doFetchLedgerObject(ownerDirKk, seq, _)) .WillByDefault(Return(ownerDir.getSerializer().peekData())); - ON_CALL( - *rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) + ON_CALL(*rawBackendPtr, doFetchLedgerObject(ripple::keylet::fees().key, seq, _)) .WillByDefault(Return(CreateFeeSettingBlob(1, 2, 3, 4, 0))); EXPECT_CALL(*rawBackendPtr, doFetchLedgerObject).Times(3); auto const line1 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); auto const line2 = CreateRippleStateLedgerObject( - ACCOUNT, - "USD", - ISSUER, - 100, - ACCOUNT, - 10, - ACCOUNT2, - 20, - TXNID, - 123, - ripple::lsfLowNoRipple); + ACCOUNT, "USD", ISSUER, 100, ACCOUNT, 10, ACCOUNT2, 20, TXNID, 123, ripple::lsfLowNoRipple); std::vector bbs; bbs.push_back(line1.getSerializer().peekData()); diff --git a/unittests/rpc/handlers/TransactionEntryTest.cpp b/unittests/rpc/handlers/TransactionEntryTest.cpp index 953ef458..8076b411 100644 --- a/unittests/rpc/handlers/TransactionEntryTest.cpp +++ b/unittests/rpc/handlers/TransactionEntryTest.cpp @@ -28,10 +28,8 @@ using namespace RPCng; namespace json = boost::json; using namespace testing; -constexpr static auto INDEX = - "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; -constexpr static auto TXNID = - "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto INDEX = "E6DBAFC99223B42257915A63DFC6B0C032D4070F9A574B255AD97466726FC322"; +constexpr static auto TXNID = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto CURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000"; @@ -43,26 +41,20 @@ class RPCTransactionEntryHandlerTest : public HandlerBaseTest TEST_F(RPCTransactionEntryHandlerTest, TxHashNotProvide) { runSpawn([this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; - auto const output = - handler.process(json::parse("{}"), Context{std::ref(yield)}); + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const output = handler.process(json::parse("{}"), Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "invalidParams"); - EXPECT_EQ( - err.at("error_message").as_string(), - "Required field 'tx_hash' missing"); + EXPECT_EQ(err.at("error_message").as_string(), "Required field 'tx_hash' missing"); }); } TEST_F(RPCTransactionEntryHandlerTest, TxHashWrongFormat) { runSpawn([this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; - auto const output = handler.process( - json::parse(R"({"tx_hash":"123"})"), Context{std::ref(yield)}); + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const output = handler.process(json::parse(R"({"tx_hash":"123"})"), Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "invalidParams"); @@ -72,8 +64,7 @@ TEST_F(RPCTransactionEntryHandlerTest, TxHashWrongFormat) TEST_F(RPCTransactionEntryHandlerTest, NonExistLedgerViaLedgerHash) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); // mock fetchLedgerByHash return empty ON_CALL(*rawBackendPtr, fetchLedgerByHash(ripple::uint256{INDEX}, _)) .WillByDefault(Return(std::optional{})); @@ -87,8 +78,7 @@ TEST_F(RPCTransactionEntryHandlerTest, NonExistLedgerViaLedgerHash) INDEX, TXNID)); runSpawn([&, this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); @@ -100,13 +90,11 @@ TEST_F(RPCTransactionEntryHandlerTest, NonExistLedgerViaLedgerHash) // error case ledger non exist via index TEST_F(RPCTransactionEntryHandlerTest, NonExistLedgerViaLedgerIndex) { - MockBackend* rawBackendPtr = - static_cast(mockBackendPtr.get()); + MockBackend* rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max // mock fetchLedgerBySequence return empty - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(std::optional{})); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); auto const input = json::parse(fmt::format( R"({{ @@ -115,8 +103,7 @@ TEST_F(RPCTransactionEntryHandlerTest, NonExistLedgerViaLedgerIndex) }})", TXNID)); runSpawn([&, this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; auto const output = handler.process(input, Context{std::ref(yield)}); ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); @@ -130,15 +117,13 @@ TEST_F(RPCTransactionEntryHandlerTest, TXNotFound) auto const rawBackendPtr = static_cast(mockBackendPtr.get()); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(CreateLedgerInfo(INDEX, 30))); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(CreateLedgerInfo(INDEX, 30))); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) .WillByDefault(Return(std::optional{})); EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); runSpawn([this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; auto const req = json::parse(fmt::format( R"({{ "tx_hash": "{}" @@ -148,8 +133,7 @@ TEST_F(RPCTransactionEntryHandlerTest, TXNotFound) ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "transactionNotFound"); - EXPECT_EQ( - err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("error_message").as_string(), "Transaction not found."); }); } @@ -157,28 +141,21 @@ TEST_F(RPCTransactionEntryHandlerTest, LedgerSeqNotMatch) { auto const rawBackendPtr = static_cast(mockBackendPtr.get()); TransactionAndMetadata tx; - tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) - .getSerializer() - .peekData(); - tx.transaction = CreateCreateOfferTransactionObject( - ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) - .getSerializer() - .peekData(); + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300).getSerializer().peekData(); + tx.transaction = + CreateCreateOfferTransactionObject(ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300).getSerializer().peekData(); tx.date = 123456; tx.ledgerSequence = 10; - ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) - .WillByDefault(Return(tx)); + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)).WillByDefault(Return(tx)); EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(30); // max - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(CreateLedgerInfo(INDEX, 30))); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(CreateLedgerInfo(INDEX, 30))); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); runSpawn([this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; auto const req = json::parse(fmt::format( R"({{ "tx_hash": "{}", @@ -189,8 +166,7 @@ TEST_F(RPCTransactionEntryHandlerTest, LedgerSeqNotMatch) ASSERT_FALSE(output); auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "transactionNotFound"); - EXPECT_EQ( - err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("error_message").as_string(), "Transaction not found."); }); } @@ -241,28 +217,21 @@ TEST_F(RPCTransactionEntryHandlerTest, NormalPath) })"; auto const rawBackendPtr = static_cast(mockBackendPtr.get()); TransactionAndMetadata tx; - tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) - .getSerializer() - .peekData(); - tx.transaction = CreateCreateOfferTransactionObject( - ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) - .getSerializer() - .peekData(); + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300).getSerializer().peekData(); + tx.transaction = + CreateCreateOfferTransactionObject(ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300).getSerializer().peekData(); tx.date = 123456; tx.ledgerSequence = 30; - ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) - .WillByDefault(Return(tx)); + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)).WillByDefault(Return(tx)); EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); mockBackendPtr->updateRange(10); // min mockBackendPtr->updateRange(tx.ledgerSequence); // max - ON_CALL(*rawBackendPtr, fetchLedgerBySequence) - .WillByDefault(Return(CreateLedgerInfo(INDEX, tx.ledgerSequence))); + ON_CALL(*rawBackendPtr, fetchLedgerBySequence).WillByDefault(Return(CreateLedgerInfo(INDEX, tx.ledgerSequence))); EXPECT_CALL(*rawBackendPtr, fetchLedgerBySequence).Times(1); runSpawn([&, this](auto& yield) { - auto const handler = - AnyHandler{TransactionEntryHandler{mockBackendPtr}}; + auto const handler = AnyHandler{TransactionEntryHandler{mockBackendPtr}}; auto const req = json::parse(fmt::format( R"({{ "tx_hash": "{}", diff --git a/unittests/rpc/handlers/TxTest.cpp b/unittests/rpc/handlers/TxTest.cpp index b0ff999b..112b4dc4 100644 --- a/unittests/rpc/handlers/TxTest.cpp +++ b/unittests/rpc/handlers/TxTest.cpp @@ -28,8 +28,7 @@ using namespace RPCng; namespace json = boost::json; using namespace testing; -constexpr static auto TXNID = - "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; +constexpr static auto TXNID = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD"; constexpr static auto ACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"; constexpr static auto ACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun"; constexpr static auto CURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000"; @@ -55,8 +54,7 @@ TEST_F(RPCTxTest, ExcessiveLgrRange) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "excessiveLgrRange"); - EXPECT_EQ( - err.at("error_message").as_string(), "Ledger range exceeds 1000."); + EXPECT_EQ(err.at("error_message").as_string(), "Ledger range exceeds 1000."); }); } @@ -77,8 +75,7 @@ TEST_F(RPCTxTest, InvalidLgrRange) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "invalidLgrRange"); - EXPECT_EQ( - err.at("error_message").as_string(), "Ledger range is invalid."); + EXPECT_EQ(err.at("error_message").as_string(), "Ledger range is invalid."); }); } @@ -101,8 +98,7 @@ TEST_F(RPCTxTest, TxnNotFound) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); - EXPECT_EQ( - err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("error_message").as_string(), "Transaction not found."); }); } @@ -129,8 +125,7 @@ TEST_F(RPCTxTest, TxnNotFoundInGivenRangeSearchAllFalse) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); - EXPECT_EQ( - err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("error_message").as_string(), "Transaction not found."); EXPECT_EQ(err.at("searched_all").as_bool(), false); }); } @@ -158,8 +153,7 @@ TEST_F(RPCTxTest, TxnNotFoundInGivenRangeSearchAllTrue) auto const err = RPC::makeError(output.error()); EXPECT_EQ(err.at("error").as_string(), "txnNotFound"); - EXPECT_EQ( - err.at("error_message").as_string(), "Transaction not found."); + EXPECT_EQ(err.at("error_message").as_string(), "Transaction not found."); EXPECT_EQ(err.at("searched_all").as_bool(), true); }); } @@ -203,17 +197,12 @@ TEST_F(RPCTxTest, DefaultParameter) })"; auto const rawBackendPtr = static_cast(mockBackendPtr.get()); TransactionAndMetadata tx; - tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) - .getSerializer() - .peekData(); - tx.transaction = CreateCreateOfferTransactionObject( - ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) - .getSerializer() - .peekData(); + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300).getSerializer().peekData(); + tx.transaction = + CreateCreateOfferTransactionObject(ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300).getSerializer().peekData(); tx.date = 123456; tx.ledgerSequence = 100; - ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) - .WillByDefault(Return(tx)); + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)).WillByDefault(Return(tx)); EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); runSpawn([this](auto& yield) { auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; @@ -240,17 +229,12 @@ TEST_F(RPCTxTest, ReturnBinary) })"; auto const rawBackendPtr = static_cast(mockBackendPtr.get()); TransactionAndMetadata tx; - tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300) - .getSerializer() - .peekData(); - tx.transaction = CreateCreateOfferTransactionObject( - ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300) - .getSerializer() - .peekData(); + tx.metadata = CreateMetaDataForCreateOffer(CURRENCY, ACCOUNT, 100, 200, 300).getSerializer().peekData(); + tx.transaction = + CreateCreateOfferTransactionObject(ACCOUNT, 2, 100, CURRENCY, ACCOUNT2, 200, 300).getSerializer().peekData(); tx.date = 123456; tx.ledgerSequence = 100; - ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)) - .WillByDefault(Return(tx)); + ON_CALL(*rawBackendPtr, fetchTransaction(ripple::uint256{TXNID}, _)).WillByDefault(Return(tx)); EXPECT_CALL(*rawBackendPtr, fetchTransaction).Times(1); runSpawn([this](auto& yield) { auto const handler = AnyHandler{TxHandler{mockBackendPtr}}; diff --git a/unittests/rpc/handlers/impl/FakesAndMocks.h b/unittests/rpc/handlers/impl/FakesAndMocks.h index 8418f7e0..b2f0d2c4 100644 --- a/unittests/rpc/handlers/impl/FakesAndMocks.h +++ b/unittests/rpc/handlers/impl/FakesAndMocks.h @@ -59,10 +59,7 @@ tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) // must be implemented as per rpc/common/Concepts.h inline void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - TestOutput const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, TestOutput const& output) { jv = {{"computed", output.computed}}; } @@ -80,12 +77,10 @@ public: { using namespace RPCng::validation; - // clang-format off - static const RPCng::RpcSpec rpcSpec = { + static const auto rpcSpec = RPCng::RpcSpec{ {"hello", Required{}, Type{}, EqualTo{"world"}}, - {"limit", Type{}, Between{0, 100}} // optional field + {"limit", Type{}, Between{0, 100}}, // optional field }; - // clang-format on return rpcSpec; } @@ -93,8 +88,7 @@ public: Result process(Input input) const { - return Output{ - input.hello + '_' + std::to_string(input.limit.value_or(0))}; + return Output{input.hello + '_' + std::to_string(input.limit.value_or(0))}; } }; @@ -111,12 +105,10 @@ public: { using namespace RPCng::validation; - // clang-format off - static const RPCng::RpcSpec rpcSpec = { + static const auto rpcSpec = RPCng::RpcSpec{ {"hello", Required{}, Type{}, EqualTo{"world"}}, - {"limit", Type{}, Between{0, 100}} // optional field + {"limit", Type{}, Between{0, 100}}, // optional field }; - // clang-format on return rpcSpec; } @@ -124,8 +116,7 @@ public: Result process(Input input, RPCng::Context const& ctx) const { - return Output{ - input.hello + '_' + std::to_string(input.limit.value_or(0))}; + return Output{input.hello + '_' + std::to_string(input.limit.value_or(0))}; } }; @@ -155,12 +146,10 @@ public: { using namespace RPCng::validation; - // clang-format off - static const RPCng::RpcSpec rpcSpec = { + static const auto rpcSpec = RPCng::RpcSpec{ {"hello", Required{}, Type{}, EqualTo{"world"}}, - {"limit", Type{}, Between{0u, 100u}} // optional field + {"limit", Type{}, Between{0u, 100u}}, // optional field }; - // clang-format on return rpcSpec; } @@ -191,10 +180,7 @@ tag_invoke(boost::json::value_to_tag, boost::json::value const& jv) // must be implemented as per rpc/common/Concepts.h inline void -tag_invoke( - boost::json::value_from_tag, - boost::json::value& jv, - InOutFake const& output) +tag_invoke(boost::json::value_from_tag, boost::json::value& jv, InOutFake const& output) { jv = {{"something", output.something}}; } diff --git a/unittests/util/Fixtures.h b/unittests/util/Fixtures.h index 8b0de05f..db7252f7 100644 --- a/unittests/util/Fixtures.h +++ b/unittests/util/Fixtures.h @@ -61,8 +61,7 @@ protected: static std::once_flag once_; std::call_once(once_, [] { boost::log::add_common_attributes(); - boost::log::register_simple_formatter_factory( - "Severity"); + boost::log::register_simple_formatter_factory("Severity"); }); namespace src = boost::log::sources; @@ -72,10 +71,8 @@ protected: auto core = boost::log::core::get(); core->remove_all_sinks(); - boost::log::add_console_log( - stream_, keywords::format = "%Channel%:%Severity% %Message%"); - auto min_severity = expr::channel_severity_filter( - clio::log_channel, clio::log_severity); + boost::log::add_console_log(stream_, keywords::format = "%Channel%:%Severity% %Message%"); + auto min_severity = expr::channel_severity_filter(clio::log_channel, clio::log_severity); min_severity["General"] = clio::Severity::DBG; min_severity["Trace"] = clio::Severity::TRC; core->set_filter(min_severity); diff --git a/unittests/util/MockBackend.h b/unittests/util/MockBackend.h index 11b7d3ef..d8ba0da7 100644 --- a/unittests/util/MockBackend.h +++ b/unittests/util/MockBackend.h @@ -57,8 +57,7 @@ public: MOCK_METHOD( std::vector, fetchTransactions, - (std::vector const& hashes, - boost::asio::yield_context& yield), + (std::vector const& hashes, boost::asio::yield_context& yield), (const, override)); MOCK_METHOD( @@ -86,9 +85,7 @@ public: MOCK_METHOD( std::optional, fetchNFT, - (ripple::uint256 const& tokenID, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yieldd), + (ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yieldd), (const, override)); MOCK_METHOD( @@ -104,17 +101,13 @@ public: MOCK_METHOD( std::vector, doFetchLedgerObjects, - (std::vector const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield), + (std::vector const& key, std::uint32_t const sequence, boost::asio::yield_context& yield), (const, override)); MOCK_METHOD( std::optional, doFetchLedgerObject, - (ripple::uint256 const& key, - std::uint32_t const sequence, - boost::asio::yield_context& yield), + (ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield), (const, override)); MOCK_METHOD( @@ -126,9 +119,7 @@ public: MOCK_METHOD( std::optional, doFetchSuccessorKey, - (ripple::uint256 key, - std::uint32_t const ledgerSequence, - boost::asio::yield_context& yield), + (ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield), (const, override)); MOCK_METHOD( @@ -137,17 +128,9 @@ public: (boost::asio::yield_context & yield), (const, override)); - MOCK_METHOD( - void, - writeLedger, - (ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader), - (override)); + MOCK_METHOD(void, writeLedger, (ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader), (override)); - MOCK_METHOD( - void, - writeLedgerObject, - (std::string && key, std::uint32_t const seq, std::string&& blob), - (override)); + MOCK_METHOD(void, writeLedgerObject, (std::string && key, std::uint32_t const seq, std::string&& blob), (override)); MOCK_METHOD( void, @@ -161,17 +144,9 @@ public: MOCK_METHOD(void, writeNFTs, (std::vector && blob), (override)); - MOCK_METHOD( - void, - writeAccountTransactions, - (std::vector && blob), - (override)); + MOCK_METHOD(void, writeAccountTransactions, (std::vector && blob), (override)); - MOCK_METHOD( - void, - writeNFTTransactions, - (std::vector && blob), - (override)); + MOCK_METHOD(void, writeNFTTransactions, (std::vector && blob), (override)); MOCK_METHOD( void, diff --git a/unittests/util/TestObject.cpp b/unittests/util/TestObject.cpp index 455532c7..e75b3637 100644 --- a/unittests/util/TestObject.cpp +++ b/unittests/util/TestObject.cpp @@ -56,15 +56,9 @@ CreateFeeSettingLedgerObject( } ripple::Blob -CreateFeeSettingBlob( - uint64_t base, - uint32_t reserveInc, - uint32_t reserveBase, - uint32_t refFeeUnit, - uint32_t flag) +CreateFeeSettingBlob(uint64_t base, uint32_t reserveInc, uint32_t reserveBase, uint32_t refFeeUnit, uint32_t flag) { - auto lo = CreateFeeSettingLedgerObject( - base, reserveInc, reserveBase, refFeeUnit, flag); + auto lo = CreateFeeSettingLedgerObject(base, reserveInc, reserveBase, refFeeUnit, flag); return lo.getSerializer().peekData(); } @@ -78,13 +72,11 @@ CreatePaymentTransactionObject( { ripple::STObject obj(ripple::sfTransaction); obj.setFieldU16(ripple::sfTransactionType, ripple::ttPAYMENT); - auto account = - ripple::parseBase58(std::string(accountId1)); + auto account = ripple::parseBase58(std::string(accountId1)); obj.setAccountID(ripple::sfAccount, account.value()); obj.setFieldAmount(ripple::sfAmount, ripple::STAmount(amount, false)); obj.setFieldAmount(ripple::sfFee, ripple::STAmount(fee, false)); - auto account2 = - ripple::parseBase58(std::string(accountId2)); + auto account2 = ripple::parseBase58(std::string(accountId2)); obj.setAccountID(ripple::sfDestination, account2.value()); obj.setFieldU32(ripple::sfSequence, seq); const char* key = "test"; @@ -101,16 +93,12 @@ CreatePaymentTransactionMetaObject( int finalBalance2) { ripple::STObject finalFields(ripple::sfFinalFields); - finalFields.setAccountID( - ripple::sfAccount, GetAccountIDWithString(accountId1)); - finalFields.setFieldAmount( - ripple::sfBalance, ripple::STAmount(finalBalance1)); + finalFields.setAccountID(ripple::sfAccount, GetAccountIDWithString(accountId1)); + finalFields.setFieldAmount(ripple::sfBalance, ripple::STAmount(finalBalance1)); ripple::STObject finalFields2(ripple::sfFinalFields); - finalFields2.setAccountID( - ripple::sfAccount, GetAccountIDWithString(accountId2)); - finalFields2.setFieldAmount( - ripple::sfBalance, ripple::STAmount(finalBalance2)); + finalFields2.setAccountID(ripple::sfAccount, GetAccountIDWithString(accountId2)); + finalFields2.setFieldAmount(ripple::sfBalance, ripple::STAmount(finalBalance2)); ripple::STObject metaObj(ripple::sfTransactionMetaData); ripple::STArray metaArray{2}; @@ -142,14 +130,11 @@ CreateAccountRootObject( ripple::STObject accountRoot(ripple::sfAccount); accountRoot.setFieldU16(ripple::sfLedgerEntryType, ripple::ltACCOUNT_ROOT); accountRoot.setFieldU32(ripple::sfFlags, flag); - accountRoot.setAccountID( - ripple::sfAccount, GetAccountIDWithString(accountId)); + accountRoot.setAccountID(ripple::sfAccount, GetAccountIDWithString(accountId)); accountRoot.setFieldU32(ripple::sfSequence, seq); - accountRoot.setFieldAmount( - ripple::sfBalance, ripple::STAmount(balance, false)); + accountRoot.setFieldAmount(ripple::sfBalance, ripple::STAmount(balance, false)); accountRoot.setFieldU32(ripple::sfOwnerCount, ownerCount); - accountRoot.setFieldH256( - ripple::sfPreviousTxnID, ripple::uint256{previousTxnID}); + accountRoot.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{previousTxnID}); accountRoot.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); accountRoot.setFieldU32(ripple::sfTransferRate, transferRate); return accountRoot; @@ -167,18 +152,15 @@ CreateCreateOfferTransactionObject( { ripple::STObject obj(ripple::sfTransaction); obj.setFieldU16(ripple::sfTransactionType, ripple::ttOFFER_CREATE); - auto account = - ripple::parseBase58(std::string(accountId)); + auto account = ripple::parseBase58(std::string(accountId)); obj.setAccountID(ripple::sfAccount, account.value()); auto amount = ripple::STAmount(fee, false); obj.setFieldAmount(ripple::sfFee, amount); obj.setFieldU32(ripple::sfSequence, seq); // add amount ripple::Issue issue1( - ripple::Currency{currency}, - ripple::parseBase58(std::string(issuer)).value()); - obj.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); + ripple::Currency{currency}, ripple::parseBase58(std::string(issuer)).value()); + obj.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); obj.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(takerPays, false)); auto key = "test"; @@ -194,11 +176,9 @@ GetIssue(std::string_view currency, std::string_view issuerId) if (currency.size() == 3) return ripple::Issue( ripple::to_currency(std::string(currency)), - ripple::parseBase58(std::string(issuerId)) - .value()); + ripple::parseBase58(std::string(issuerId)).value()); return ripple::Issue( - ripple::Currency{currency}, - ripple::parseBase58(std::string(issuerId)).value()); + ripple::Currency{currency}, ripple::parseBase58(std::string(issuerId)).value()); } ripple::STObject @@ -213,15 +193,11 @@ CreateMetaDataForBookChange( { ripple::STObject finalFields(ripple::sfFinalFields); ripple::Issue issue1 = GetIssue(currency, issueId); - finalFields.setFieldAmount( - ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); - finalFields.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + finalFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); ripple::STObject previousFields(ripple::sfPreviousFields); - previousFields.setFieldAmount( - ripple::sfTakerPays, ripple::STAmount(issue1, perviousTakerPays)); - previousFields.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(perviousTakerGets, false)); + previousFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, perviousTakerPays)); + previousFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(perviousTakerGets, false)); ripple::STObject metaObj(ripple::sfTransactionMetaData); ripple::STArray metaArray{1}; ripple::STObject node(ripple::sfModifiedNode); @@ -245,10 +221,8 @@ CreateMetaDataForCreateOffer( { ripple::STObject finalFields(ripple::sfNewFields); ripple::Issue issue1 = GetIssue(currency, issueId); - finalFields.setFieldAmount( - ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); - finalFields.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + finalFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); ripple::STObject metaObj(ripple::sfTransactionMetaData); ripple::STArray metaArray{1}; ripple::STObject node(ripple::sfCreatedNode); @@ -271,10 +245,8 @@ CreateMetaDataForCancelOffer( { ripple::STObject finalFields(ripple::sfFinalFields); ripple::Issue issue1 = GetIssue(currency, issueId); - finalFields.setFieldAmount( - ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); - finalFields.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); + finalFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays)); + finalFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false)); ripple::STObject metaObj(ripple::sfTransactionMetaData); ripple::STArray metaArray{1}; ripple::STObject node(ripple::sfDeletedNode); @@ -288,9 +260,7 @@ CreateMetaDataForCancelOffer( } ripple::STObject -CreateOwnerDirLedgerObject( - std::vector indexes, - std::string_view rootIndex) +CreateOwnerDirLedgerObject(std::vector indexes, std::string_view rootIndex) { ripple::STObject ownerDir(ripple::sfLedgerEntry); ownerDir.setFieldU16(ripple::sfLedgerEntryType, ripple::ltDIR_NODE); @@ -318,8 +288,7 @@ CreatePaymentChannelLedgerObject( channel.setFieldAmount(ripple::sfBalance, ripple::STAmount(balance, false)); channel.setFieldU32(ripple::sfSettleDelay, settleDelay); channel.setFieldU64(ripple::sfOwnerNode, 0); - channel.setFieldH256( - ripple::sfPreviousTxnID, ripple::uint256{previousTxnId}); + channel.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{previousTxnId}); channel.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); channel.setFieldU32(ripple::sfFlags, 0); uint8_t key[33] = {0}; @@ -346,15 +315,9 @@ CreateRippleStateLedgerObject( auto line = ripple::STObject(ripple::sfLedgerEntry); line.setFieldU16(ripple::sfLedgerEntryType, ripple::ltRIPPLE_STATE); line.setFieldU32(ripple::sfFlags, flag); - line.setFieldAmount( - ripple::sfBalance, - ripple::STAmount(GetIssue(currency, issuerId), balance)); - line.setFieldAmount( - ripple::sfHighLimit, - ripple::STAmount(GetIssue(currency, highNodeAccountId), highLimit)); - line.setFieldAmount( - ripple::sfLowLimit, - ripple::STAmount(GetIssue(currency, lowNodeAccountId), lowLimit)); + line.setFieldAmount(ripple::sfBalance, ripple::STAmount(GetIssue(currency, issuerId), balance)); + line.setFieldAmount(ripple::sfHighLimit, ripple::STAmount(GetIssue(currency, highNodeAccountId), highLimit)); + line.setFieldAmount(ripple::sfLowLimit, ripple::STAmount(GetIssue(currency, lowNodeAccountId), lowLimit)); line.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{previousTxnId}); line.setFieldU32(ripple::sfPreviousTxnLgrSeq, previousTxnSeq); return line; @@ -377,11 +340,9 @@ CreateOfferLedgerObject( offer.setFieldU32(ripple::sfSequence, 0); offer.setFieldU32(ripple::sfFlags, 0); ripple::Issue issue1 = GetIssue(getsCurrency, getsIssueId); - offer.setFieldAmount( - ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); + offer.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(issue1, takerGets)); ripple::Issue issue2 = GetIssue(paysCurrency, paysIssueId); - offer.setFieldAmount( - ripple::sfTakerPays, ripple::STAmount(issue2, takerPays)); + offer.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue2, takerPays)); offer.setFieldH256(ripple::sfBookDirectory, ripple::uint256{}); offer.setFieldU64(ripple::sfBookNode, 0); offer.setFieldU64(ripple::sfOwnerNode, 0); @@ -438,17 +399,12 @@ CreateCheckLedgerObject(std::string_view account, std::string_view dest) } ripple::STObject -CreateDepositPreauthLedgerObject( - std::string_view account, - std::string_view auth) +CreateDepositPreauthLedgerObject(std::string_view account, std::string_view auth) { ripple::STObject depositPreauth(ripple::sfLedgerEntry); - depositPreauth.setFieldU16( - ripple::sfLedgerEntryType, ripple::ltDEPOSIT_PREAUTH); - depositPreauth.setAccountID( - ripple::sfAccount, GetAccountIDWithString(account)); - depositPreauth.setAccountID( - ripple::sfAuthorize, GetAccountIDWithString(auth)); + depositPreauth.setFieldU16(ripple::sfLedgerEntryType, ripple::ltDEPOSIT_PREAUTH); + depositPreauth.setAccountID(ripple::sfAccount, GetAccountIDWithString(account)); + depositPreauth.setAccountID(ripple::sfAuthorize, GetAccountIDWithString(auth)); depositPreauth.setFieldU32(ripple::sfFlags, 0); depositPreauth.setFieldU64(ripple::sfOwnerNode, 0); depositPreauth.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{}); @@ -457,19 +413,9 @@ CreateDepositPreauthLedgerObject( } Backend::NFT -CreateNFT( - std::string_view tokenID, - std::string_view account, - ripple::LedgerIndex seq, - ripple::Blob uri, - bool isBurned) +CreateNFT(std::string_view tokenID, std::string_view account, ripple::LedgerIndex seq, ripple::Blob uri, bool isBurned) { - return Backend::NFT{ - ripple::uint256(tokenID), - seq, - GetAccountIDWithString(account), - uri, - isBurned}; + return Backend::NFT{ripple::uint256(tokenID), seq, GetAccountIDWithString(account), uri, isBurned}; } ripple::STObject @@ -519,8 +465,7 @@ CreateSignerLists(std::vector> const& signers) for (auto const& signer : signers) { auto entry = ripple::STObject(ripple::sfSignerEntry); - entry.setAccountID( - ripple::sfAccount, GetAccountIDWithString(signer.first)); + entry.setAccountID(ripple::sfAccount, GetAccountIDWithString(signer.first)); entry.setFieldU16(ripple::sfSignerWeight, signer.second); quorum += signer.second; list.push_back(entry); diff --git a/unittests/util/TestObject.h b/unittests/util/TestObject.h index 6c7661e3..364a03da 100644 --- a/unittests/util/TestObject.h +++ b/unittests/util/TestObject.h @@ -52,12 +52,7 @@ CreateFeeSettingLedgerObject( * Create a FeeSetting ledger object and return its blob */ [[nodiscard]] ripple::Blob -CreateFeeSettingBlob( - uint64_t base, - uint32_t reserveInc, - uint32_t reserveBase, - uint32_t refFeeUnit, - uint32_t flag); +CreateFeeSettingBlob(uint64_t base, uint32_t reserveInc, uint32_t reserveBase, uint32_t refFeeUnit, uint32_t flag); /* * Create a payment transaction object @@ -150,9 +145,7 @@ CreateMetaDataForCancelOffer( * Create a owner dir ledger object */ [[nodiscard]] ripple::STObject -CreateOwnerDirLedgerObject( - std::vector indexes, - std::string_view rootIndex); +CreateOwnerDirLedgerObject(std::vector indexes, std::string_view rootIndex); /* * Create a payment channel ledger object @@ -202,9 +195,7 @@ CreateEscrowLedgerObject(std::string_view account, std::string_view dest); CreateCheckLedgerObject(std::string_view account, std::string_view dest); [[nodiscard]] ripple::STObject -CreateDepositPreauthLedgerObject( - std::string_view account, - std::string_view auth); +CreateDepositPreauthLedgerObject(std::string_view account, std::string_view auth); [[nodiscard]] Backend::NFT CreateNFT(