Add clang tidy (#864)

Fixes #863
This commit is contained in:
Sergey Kuznetsov
2023-10-03 10:43:54 +01:00
committed by GitHub
parent 69f5025a29
commit 4b53bef1f5
198 changed files with 2168 additions and 1288 deletions

118
.clang-tidy Normal file
View File

@@ -0,0 +1,118 @@
---
Checks: '-*,
bugprone-argument-comment,
bugprone-assert-side-effect,
bugprone-bad-signal-to-kill-thread,
bugprone-bool-pointer-implicit-conversion,
bugprone-copy-constructor-init,
bugprone-dangling-handle,
bugprone-dynamic-static-initializers,
bugprone-fold-init-type,
bugprone-forward-declaration-namespace,
bugprone-inaccurate-erase,
bugprone-incorrect-roundings,
bugprone-infinite-loop,
bugprone-integer-division,
bugprone-lambda-function-name,
bugprone-macro-parentheses,
bugprone-macro-repeated-side-effects,
bugprone-misplaced-operator-in-strlen-in-alloc,
bugprone-misplaced-pointer-arithmetic-in-alloc,
bugprone-misplaced-widening-cast,
bugprone-move-forwarding-reference,
bugprone-multiple-statement-macro,
bugprone-no-escape,
bugprone-parent-virtual-call,
bugprone-posix-return,
bugprone-redundant-branch-condition,
bugprone-shared-ptr-array-mismatch,
bugprone-signal-handler,
bugprone-signed-char-misuse,
bugprone-sizeof-container,
bugprone-sizeof-expression,
bugprone-spuriously-wake-up-functions,
bugprone-standalone-empty,
bugprone-string-constructor,
bugprone-string-integer-assignment,
bugprone-string-literal-with-embedded-nul,
bugprone-stringview-nullptr,
bugprone-suspicious-enum-usage,
bugprone-suspicious-include,
bugprone-suspicious-memory-comparison,
bugprone-suspicious-memset-usage,
bugprone-suspicious-missing-comma,
bugprone-suspicious-realloc-usage,
bugprone-suspicious-semicolon,
bugprone-suspicious-string-compare,
bugprone-swapped-arguments,
bugprone-terminating-continue,
bugprone-throw-keyword-missing,
bugprone-too-small-loop-variable,
bugprone-undefined-memory-manipulation,
bugprone-undelegated-constructor,
bugprone-unhandled-exception-at-new,
bugprone-unhandled-self-assignment,
bugprone-unused-raii,
bugprone-unused-return-value,
bugprone-use-after-move,
bugprone-virtual-near-miss,
cppcoreguidelines-init-variables,
cppcoreguidelines-prefer-member-initializer,
cppcoreguidelines-pro-type-member-init,
cppcoreguidelines-pro-type-static-cast-downcast,
cppcoreguidelines-virtual-class-destructor,
llvm-namespace-comment,
misc-const-correctness,
misc-definitions-in-headers,
misc-misplaced-const,
misc-redundant-expression,
misc-static-assert,
misc-throw-by-value-catch-by-reference,
misc-unused-alias-decls,
misc-unused-using-decls,
modernize-concat-nested-namespaces,
modernize-deprecated-headers,
modernize-make-shared,
modernize-make-unique,
modernize-pass-by-value,
modernize-use-emplace,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-override,
modernize-use-using,
performance-faster-string-find,
performance-for-range-copy,
performance-implicit-conversion-in-loop,
performance-inefficient-vector-operation,
performance-move-const-arg,
performance-move-constructor-init,
performance-no-automatic-move,
performance-trivially-destructible,
readability-avoid-const-params-in-decls,
readability-braces-around-statements,
readability-const-return-type,
readability-container-contains,
readability-container-size-empty,
readability-convert-member-functions-to-static,
readability-duplicate-include,
readability-else-after-return,
readability-implicit-bool-conversion,
readability-inconsistent-declaration-parameter-name,
readability-make-member-function-const,
readability-misleading-indentation,
readability-non-const-parameter,
readability-redundant-declaration,
readability-redundant-member-init,
readability-redundant-string-init,
readability-simplify-boolean-expr,
readability-static-accessed-through-instance,
readability-static-definition-in-anonymous-namespace,
readability-suspicious-call-argument
'
CheckOptions:
readability-braces-around-statements.ShortStatementLines: 2
HeaderFilterRegex: '^.*/(src|unitests)/.*\.(h|hpp)$'
WarningsAsErrors: '*'

View File

@@ -27,10 +27,11 @@ runs:
shell: bash
env:
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
LINT: "${{ runner.os == 'Linux' && 'True' || 'False' }}"
run: |
mkdir -p build
cd build
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True --profile ${{ inputs.conan_profile }}
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True -o clio:lint=$LINT --profile ${{ inputs.conan_profile }}
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
cmake --build . --parallel $threads_num

View File

@@ -82,10 +82,15 @@ jobs:
with:
fetch-depth: 0
- name: Add llvm repo
run: |
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main' >> /etc/apt/sources.list
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
- name: Install packages
run: |
apt update -qq
apt install -y jq
apt install -y jq clang-tidy-16
- name: Install ccache
run: |

31
CMake/ClangTidy.cmake Normal file
View File

@@ -0,0 +1,31 @@
if (lint)
# Find clang-tidy binary
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
endif ()
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
else ()
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-16" "clang-tidy" REQUIRED)
endif ()
if (NOT _CLANG_TIDY_BIN)
message (FATAL_ERROR
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
endif ()
# Support for https://github.com/matus-chochlik/ctcache
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
set (_CLANG_TIDY_CMD
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
endif ()
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
endif ()

View File

@@ -19,13 +19,7 @@ set(COMPILER_FLAGS
-Wunused
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
list(APPEND COMPILER_FLAGS
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
)
endif ()
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT lint)
list(APPEND COMPILER_FLAGS
-Wduplicated-branches
-Wduplicated-cond
@@ -34,6 +28,12 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
)
endif ()
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
list(APPEND COMPILER_FLAGS
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
)
endif ()
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})

View File

@@ -9,6 +9,7 @@ option (tests "Build tests" FALSE)
option (docs "Generate doxygen docs" FALSE)
option (coverage "Build test coverage report" FALSE)
option (packaging "Create distribution packages" FALSE)
option (lint "Run clang-tidy checks during compilation" FALSE)
# ========================================================================== #
set (san "" CACHE STRING "Add sanitizer instrumentation")
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
@@ -18,6 +19,7 @@ set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
# Include required modules
include (CMake/Ccache.cmake)
include (CheckCXXCompilerFlag)
include (CMake/ClangTidy.cmake)
if (verbose)
set (CMAKE_VERBOSE_MAKEFILE TRUE)

View File

@@ -237,6 +237,21 @@ Clio will fallback to hardcoded defaults when not specified in the config file o
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
> **Note:** See `example-config.json` for more details.
## Using clang-tidy for static analysis
Minimum clang-tidy version required is 16.0.
Clang-tidy could be run by cmake during building the project.
For that provide the option `-o lint=True` for `conan install` command:
```sh
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
```
By default cmake will try to find clang-tidy automatically in your system.
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
E.g.:
```sh
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@16/bin/clang-tidy
```
## Developing against `rippled` in standalone mode
If you wish you develop against a `rippled` instance running in standalone

View File

@@ -16,6 +16,7 @@ class Clio(ConanFile):
'docs': [True, False], # doxygen API docs; create custom target 'docs'
'packaging': [True, False], # create distribution packages
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
'lint': [True, False], # run clang-tidy checks during compilation
}
requires = [
@@ -33,6 +34,7 @@ class Clio(ConanFile):
'tests': False,
'packaging': False,
'coverage': False,
'lint': False,
'docs': False,
'xrpl/*:tests': False,
@@ -73,6 +75,7 @@ class Clio(ConanFile):
tc.variables['verbose'] = self.options.verbose
tc.variables['tests'] = self.options.tests
tc.variables['coverage'] = self.options.coverage
tc.variables['lint'] = self.options.lint
tc.variables['docs'] = self.options.docs
tc.variables['packaging'] = self.options.packaging
tc.generate()

View File

@@ -34,10 +34,10 @@ namespace data {
* @param config The clio config to use
* @return A shared_ptr<BackendInterface> with the selected implementation
*/
std::shared_ptr<BackendInterface>
inline std::shared_ptr<BackendInterface>
make_Backend(util::Config const& config)
{
static util::Logger log{"Backend"};
static util::Logger const log{"Backend"};
LOG(log.info()) << "Constructing BackendInterface";
auto const readOnly = config.valueOr("read_only", false);

View File

@@ -67,16 +67,18 @@ BackendInterface::fetchLedgerObject(
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
return *obj;
}
else
{
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj)
{
LOG(gLog.trace()) << "Missed cache and missed in db";
else
LOG(gLog.trace()) << "Missed cache but found in db";
return dbObj;
}
else
{
LOG(gLog.trace()) << "Missed cache but found in db";
}
return dbObj;
}
std::vector<Blob>
@@ -92,18 +94,22 @@ BackendInterface::fetchLedgerObjects(
{
auto obj = cache_.get(keys[i], sequence);
if (obj)
{
results[i] = *obj;
}
else
{
misses.push_back(keys[i]);
}
}
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
if (misses.size())
if (!misses.empty())
{
auto objs = doFetchLedgerObjects(misses, sequence, yield);
for (size_t i = 0, j = 0; i < results.size(); ++i)
{
if (results[i].size() == 0)
if (results[i].empty())
{
results[i] = objs[j];
++j;
@@ -122,9 +128,13 @@ BackendInterface::fetchSuccessorKey(
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ)
{
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
}
else
{
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
}
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
@@ -181,11 +191,12 @@ BackendInterface::fetchBookOffers(
while (keys.size() < limit)
{
++numPages;
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
ripple::STLedgerEntry const sle{
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
auto indexes = sle.getFieldV256(ripple::sfIndexes);
keys.insert(keys.end(), indexes.begin(), indexes.end());
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next)
if (next == 0u)
{
LOG(gLog.trace()) << "Next is empty. breaking";
break;
@@ -231,19 +242,23 @@ BackendInterface::hardFetchLedgerRange() const
std::optional<LedgerRange>
BackendInterface::fetchLedgerRange() const
{
std::shared_lock lck(rngMtx_);
std::shared_lock const lck(rngMtx_);
return range;
}
void
BackendInterface::updateRange(uint32_t newMax)
{
std::scoped_lock lck(rngMtx_);
std::scoped_lock const lck(rngMtx_);
assert(!range || newMax >= range->maxSequence);
if (!range)
{
range = {newMax, newMax};
}
else
{
range->maxSequence = newMax;
}
}
LedgerPage
@@ -260,20 +275,26 @@ BackendInterface::fetchLedgerPage(
bool reachedEnd = false;
while (keys.size() < limit && !reachedEnd)
{
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
auto succ = fetchSuccessorKey(curCursor, seq, yield);
if (!succ)
{
reachedEnd = true;
}
else
keys.push_back(std::move(*succ));
{
keys.push_back(*succ);
}
}
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < objects.size(); ++i)
{
if (objects[i].size())
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
if (!objects[i].empty())
{
page.objects.push_back({keys[i], std::move(objects[i])});
}
else if (!outOfOrder)
{
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
@@ -286,7 +307,7 @@ BackendInterface::fetchLedgerPage(
LOG(gLog.error()) << msg.str();
}
}
if (keys.size() && !reachedEnd)
if (!keys.empty() && !reachedEnd)
page.cursor = keys.back();
return page;
@@ -307,7 +328,7 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
}
ripple::SerialIter it(bytes->data(), bytes->size());
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
fees.base = sle.getFieldU64(ripple::sfBaseFee);

View File

@@ -48,6 +48,7 @@ public:
}
};
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
/**
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
*
@@ -58,9 +59,9 @@ public:
*/
template <class FnType>
auto
retryOnTimeout(FnType func, size_t waitMs = 500)
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
{
static util::Logger log{"Backend"};
static util::Logger const log{"Backend"};
while (true)
{
@@ -161,7 +162,7 @@ public:
* @return The ripple::LedgerHeader if found; nullopt otherwise
*/
virtual std::optional<ripple::LedgerHeader>
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const = 0;
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a specific ledger by hash.
@@ -206,7 +207,7 @@ public:
* @return ripple::Fees if fees are found; nullopt otherwise
*/
std::optional<ripple::Fees>
fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const;
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
/**
* @brief Fetches a specific transaction.
@@ -241,7 +242,7 @@ public:
virtual TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
std::uint32_t limit,
bool forward,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context yield) const = 0;
@@ -254,7 +255,7 @@ public:
* @return Results as a vector of TransactionAndMetadata
*/
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches all transaction hashes from a specific ledger.
@@ -264,7 +265,7 @@ public:
* @return Hashes as ripple::uint256 in a vector
*/
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a specific NFT.
@@ -275,8 +276,7 @@ public:
* @return NFT object on success; nullopt otherwise
*/
virtual std::optional<NFT>
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const = 0;
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches all transactions for a specific NFT.
@@ -291,8 +291,8 @@ public:
virtual TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::uint32_t limit,
bool forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context yield) const = 0;
@@ -308,7 +308,7 @@ public:
* @return The object as a Blob on success; nullopt otherwise
*/
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield) const;
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
/**
* @brief Fetches all ledger objects by their keys.
@@ -324,7 +324,7 @@ public:
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
std::uint32_t sequence,
boost::asio::yield_context yield) const;
/**
@@ -336,8 +336,7 @@ public:
* @return The object as a Blob on success; nullopt otherwise
*/
virtual std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
const = 0;
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
/**
* @brief The database-specific implementation for fetching ledger objects.
@@ -350,7 +349,7 @@ public:
virtual std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
std::uint32_t sequence,
boost::asio::yield_context yield) const = 0;
/**
@@ -361,7 +360,7 @@ public:
* @return A vector of LedgerObject representing the diff
*/
virtual std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a page of ledger objects, ordered by key/index.
@@ -376,8 +375,8 @@ public:
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::uint32_t ledgerSequence,
std::uint32_t limit,
bool outOfOrder,
boost::asio::yield_context yield) const;
@@ -390,8 +389,7 @@ public:
* @return The sucessor on success; nullopt otherwise
*/
std::optional<LedgerObject>
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const;
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
/**
* @brief Fetches the successor key.
@@ -405,7 +403,7 @@ public:
* @return The sucessor key on success; nullopt otherwise
*/
std::optional<ripple::uint256>
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const;
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
/**
* @brief Database-specific implementation of fetching the successor key
@@ -416,8 +414,7 @@ public:
* @return The sucessor on success; nullopt otherwise
*/
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
const = 0;
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches book offers.
@@ -431,8 +428,8 @@ public:
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::uint32_t ledgerSequence,
std::uint32_t limit,
boost::asio::yield_context yield) const;
/**
@@ -478,7 +475,7 @@ public:
* @param blob The data to write
*/
virtual void
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
/**
* @brief Writes a new transaction.
@@ -492,8 +489,8 @@ public:
virtual void
writeTransaction(
std::string&& hash,
std::uint32_t const seq,
std::uint32_t const date,
std::uint32_t seq,
std::uint32_t date,
std::string&& transaction,
std::string&& metadata) = 0;
@@ -529,7 +526,7 @@ public:
* @param successor The successor data to write
*/
virtual void
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
/**
* @brief Starts a write transaction with the DB. No-op for cassandra.
@@ -548,7 +545,7 @@ public:
* @return true on success; false otherwise
*/
bool
finishWrites(std::uint32_t const ledgerSequence);
finishWrites(std::uint32_t ledgerSequence);
/**
* @return true if database is overwhelmed; false otherwise
@@ -558,7 +555,7 @@ public:
private:
virtual void
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
virtual bool
doFinishWrites() = 0;

View File

@@ -112,10 +112,10 @@ public:
if (!rng)
return {{}, {}};
Statement statement = [this, forward, &account]() {
Statement const statement = [this, forward, &account]() {
if (forward)
return schema_->selectAccountTxForward.bind(account);
else
return schema_->selectAccountTx.bind(account);
}();
@@ -288,7 +288,8 @@ public:
std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLedgerRange); res)
auto const res = executor_.read(yield, schema_->selectLedgerRange);
if (res)
{
auto const& results = res.value();
if (not results.hasRows())
@@ -305,9 +306,13 @@ public:
for (auto [seq] : extract<uint32_t>(results))
{
if (idx == 0)
{
range.maxSequence = range.minSequence = seq;
}
else if (idx == 1)
{
range.maxSequence = seq;
}
++idx;
}
@@ -319,10 +324,7 @@ public:
<< range.maxSequence;
return range;
}
else
{
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
}
return std::nullopt;
}
@@ -417,10 +419,10 @@ public:
if (!rng)
return {{}, {}};
Statement statement = [this, forward, &tokenID]() {
Statement const statement = [this, forward, &tokenID]() {
if (forward)
return schema_->selectNFTTxForward.bind(tokenID);
else
return schema_->selectNFTTx.bind(tokenID);
}();
@@ -517,11 +519,9 @@ public:
auto [transaction, meta, seq, date] = *maybeValue;
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
}
else
{
LOG(log_.debug()) << "Could not fetch transaction - no rows";
}
}
else
{
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
@@ -542,11 +542,9 @@ public:
return std::nullopt;
return *result;
}
else
{
LOG(log_.debug()) << "Could not fetch successor - no rows";
}
}
else
{
LOG(log_.error()) << "Could not fetch successor: " << res.error();
@@ -558,7 +556,7 @@ public:
std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
{
if (hashes.size() == 0)
if (hashes.empty())
return {};
auto const numHashes = hashes.size();
@@ -583,7 +581,7 @@ public:
[](auto const& res) -> TransactionAndMetadata {
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
return *maybeRow;
else
return {};
});
});
@@ -600,7 +598,7 @@ public:
std::uint32_t const sequence,
boost::asio::yield_context yield) const override
{
if (keys.size() == 0)
if (keys.empty())
return {};
auto const numKeys = keys.size();
@@ -623,7 +621,7 @@ public:
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
return *maybeValue;
else
return {};
});
@@ -715,7 +713,7 @@ public:
std::back_inserter(statements),
[this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::move(account),
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash);
});

View File

@@ -36,8 +36,8 @@
struct AccountTransactionsData
{
boost::container::flat_set<ripple::AccountID> accounts;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
std::uint32_t ledgerSequence{};
std::uint32_t transactionIndex{};
ripple::uint256 txHash;
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
@@ -149,8 +149,11 @@ template <class T>
inline bool
isOffer(T const& object)
{
short offer_bytes = (object[1] << 8) | object[2];
return offer_bytes == 0x006f;
static constexpr short OFFER_OFFSET = 0x006f;
static constexpr short SHIFT = 8;
short offer_bytes = (object[1] << SHIFT) | object[2];
return offer_bytes == OFFER_OFFSET;
}
/**
@@ -179,8 +182,9 @@ template <class T>
inline bool
isDirNode(T const& object)
{
short spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == 0x0064;
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == DIR_NODE_SPACE_KEY;
}
/**
@@ -212,7 +216,7 @@ inline ripple::uint256
getBook(T const& offer)
{
ripple::SerialIter it{offer.data(), offer.size()};
ripple::SLE sle{it, {}};
ripple::SLE const sle{it, {}};
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
return book;
@@ -228,10 +232,12 @@ template <class T>
inline ripple::uint256
getBookBase(T const& key)
{
static constexpr size_t KEY_SIZE = 24;
assert(key.size() == ripple::uint256::size());
ripple::uint256 ret;
for (size_t i = 0; i < 24; ++i)
for (size_t i = 0; i < KEY_SIZE; ++i)
ret.data()[i] = key.data()[i];
return ret;
@@ -246,7 +252,7 @@ getBookBase(T const& key)
inline std::string
uint256ToString(ripple::uint256 const& input)
{
return {reinterpret_cast<const char*>(input.data()), input.size()};
return {reinterpret_cast<const char*>(input.data()), ripple::uint256::size()};
}
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */

View File

@@ -24,7 +24,7 @@ namespace data {
uint32_t
LedgerCache::latestLedgerSequence() const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
return latestSeq_;
}
@@ -35,7 +35,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
return;
{
std::scoped_lock lck{mtx_};
std::scoped_lock const lck{mtx_};
if (seq > latestSeq_)
{
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
@@ -43,9 +43,9 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
}
for (auto const& obj : objs)
{
if (obj.blob.size())
if (!obj.blob.empty())
{
if (isBackground && deletes_.count(obj.key))
if (isBackground && deletes_.contains(obj.key))
continue;
auto& e = map_[obj.key];
@@ -69,7 +69,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
successorReqCounter_++;
if (seq != latestSeq_)
return {};
@@ -85,7 +85,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
if (seq != latestSeq_)
return {};
auto e = map_.lower_bound(key);
@@ -98,7 +98,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
std::optional<Blob>
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
if (seq > latestSeq_)
return {};
objectReqCounter_++;
@@ -124,7 +124,7 @@ LedgerCache::setFull()
return;
full_ = true;
std::scoped_lock lck{mtx_};
std::scoped_lock const lck{mtx_};
deletes_.clear();
}
@@ -137,14 +137,14 @@ LedgerCache::isFull() const
size_t
LedgerCache::size() const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
return map_.size();
}
float
LedgerCache::getObjectHitRate() const
{
if (!objectReqCounter_)
if (objectReqCounter_ == 0u)
return 1;
return static_cast<float>(objectHitCounter_) / objectReqCounter_;
}
@@ -152,7 +152,7 @@ LedgerCache::getObjectHitRate() const
float
LedgerCache::getSuccessorHitRate() const
{
if (!successorReqCounter_)
if (successorReqCounter_ == 0u)
return 1;
return static_cast<float>(successorHitCounter_) / successorReqCounter_;
}

View File

@@ -68,7 +68,7 @@ public:
* @param isBackground Should be set to true when writing old data from a background thread
*/
void
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
/**
* @brief Fetch a cached object by its key and sequence number.

View File

@@ -24,6 +24,7 @@
#include <optional>
#include <string>
#include <utility>
#include <vector>
namespace data {
@@ -74,12 +75,8 @@ struct TransactionAndMetadata
std::uint32_t date = 0;
TransactionAndMetadata() = default;
TransactionAndMetadata(
Blob const& transaction,
Blob const& metadata,
std::uint32_t ledgerSequence,
std::uint32_t date)
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
{
}
@@ -118,11 +115,6 @@ struct TransactionsCursor
{
}
TransactionsCursor(TransactionsCursor const&) = default;
TransactionsCursor&
operator=(TransactionsCursor const&) = default;
bool
operator==(TransactionsCursor const& other) const = default;
@@ -148,18 +140,18 @@ struct TransactionsAndCursor
struct NFT
{
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
std::uint32_t ledgerSequence{};
ripple::AccountID owner;
Blob uri;
bool isBurned;
bool isBurned{};
NFT() = default;
NFT(ripple::uint256 const& tokenID,
std::uint32_t ledgerSequence,
ripple::AccountID const& owner,
Blob const& uri,
Blob uri,
bool isBurned)
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
{
}

View File

@@ -22,6 +22,7 @@
#include <cassandra.h>
#include <string>
#include <utility>
namespace data::cassandra {
@@ -31,11 +32,11 @@ namespace data::cassandra {
class CassandraError
{
std::string message_;
uint32_t code_;
uint32_t code_{};
public:
CassandraError() = default; // default constructible required by Expected
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
CassandraError(std::string message, uint32_t code) : message_{std::move(message)}, code_{code}
{
}
@@ -91,11 +92,9 @@ public:
bool
isTimeout() const
{
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
return true;
return false;
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
}
/**

View File

@@ -88,8 +88,9 @@ std::vector<Handle::FutureType>
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
{
std::vector<Handle::FutureType> futures;
futures.reserve(statements.size());
for (auto const& statement : statements)
futures.push_back(cass_session_execute(session_, statement));
futures.emplace_back(cass_session_execute(session_, statement));
return futures;
}
@@ -98,7 +99,7 @@ Handle::executeEach(std::vector<Statement> const& statements) const
{
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
{
if (auto const rc = future.await(); not rc)
if (auto rc = future.await(); not rc)
return rc;
}
@@ -145,10 +146,11 @@ Handle::asyncExecute(std::vector<Statement> const& statements, std::function<voi
Handle::PreparedStatementType
Handle::prepare(std::string_view query) const
{
Handle::FutureType future = cass_session_prepare(session_, query.data());
if (auto const rc = future.await(); rc)
Handle::FutureType const future = cass_session_prepare(session_, query.data());
auto const rc = future.await();
if (rc)
return cass_future_get_prepared(future);
else
throw std::runtime_error(rc.error().message());
}

View File

@@ -20,6 +20,7 @@
#include <data/cassandra/SettingsProvider.h>
#include <data/cassandra/impl/Cluster.h>
#include <data/cassandra/impl/Statement.h>
#include <util/Constants.h>
#include <util/config/Config.h>
#include <boost/json.hpp>
@@ -35,11 +36,13 @@ inline Settings::ContactPoints
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
{
if (not value.is_object())
{
throw std::runtime_error(
"Feed entire Cassandra section to parse "
"Settings::ContactPoints instead");
}
util::Config obj{value};
util::Config const obj{value};
Settings::ContactPoints out;
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
@@ -123,11 +126,11 @@ SettingsProvider::parseSettings() const
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
if (connectTimeoutSecond)
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * 1000};
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
if (requestTimeoutSecond)
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * 1000};
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
settings.certificate = parseOptionalCertificate();
settings.username = config_.maybeValue<std::string>("username");

View File

@@ -98,20 +98,24 @@ private:
auto handler = [this, &handle, self](auto&& res) mutable {
if (res)
{
onComplete_(std::move(res));
onComplete_(std::forward<decltype(res)>(res));
}
else
{
if (retryPolicy_.shouldRetry(res.error()))
{
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
}
else
onComplete_(std::move(res)); // report error
{
onComplete_(std::forward<decltype(res)>(res)); // report error
}
}
self = nullptr; // explicitly decrement refcount
};
std::scoped_lock lck{mtx_};
std::scoped_lock const lck{mtx_};
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
}
};

View File

@@ -26,8 +26,8 @@
#include <vector>
namespace {
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
};
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
} // namespace
namespace data::cassandra::detail {
@@ -38,8 +38,10 @@ Batch::Batch(std::vector<Statement> const& statements)
cass_batch_set_is_idempotent(*this, cass_true);
for (auto const& statement : statements)
{
if (auto const res = add(statement); not res)
throw std::runtime_error("Failed to add statement to batch: " + res.error());
}
}
MaybeError

View File

@@ -28,7 +28,7 @@
#include <vector>
namespace {
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
template <class... Ts>
struct overloadSet : Ts...
@@ -102,8 +102,10 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
using std::to_string;
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
if (rc != CASS_OK)
{
throw std::runtime_error(
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc)));
}
};
{
@@ -136,7 +138,7 @@ Cluster::setupCertificate(Settings const& settings)
return;
LOG(log_.debug()) << "Configure SSL context";
SslContext context = SslContext(*settings.certificate);
SslContext const context = SslContext(*settings.certificate);
cass_cluster_set_ssl(*this, context);
}

View File

@@ -40,6 +40,9 @@ namespace data::cassandra::detail {
*/
struct Settings
{
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
/**
* @brief Represents the configuration of contact points for cassandra.
*/
@@ -61,7 +64,7 @@ struct Settings
bool enableLog = false;
/** @brief Connect timeout specified in milliseconds */
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
/** @brief Request timeout specified in milliseconds */
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
@@ -73,10 +76,10 @@ struct Settings
uint32_t threads = std::thread::hardware_concurrency();
/** @brief The maximum number of outstanding write requests at any given moment */
uint32_t maxWriteRequestsOutstanding = 10'000u;
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
/** @brief The maximum number of outstanding read requests at any given moment */
uint32_t maxReadRequestsOutstanding = 100'000u;
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
/** @brief The number of connection per host to always have active */
uint32_t coreConnectionsPerHost = 1u;

View File

@@ -131,17 +131,16 @@ public:
{
while (true)
{
if (auto res = handle_.get().execute(statement); res)
auto res = handle_.get().execute(statement);
if (res)
{
return res;
}
else
{
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
}
/**
* @brief Blocking query execution used for writing data.
@@ -242,7 +241,7 @@ public:
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); });
}));
};
@@ -254,13 +253,11 @@ public:
{
return res;
}
else
{
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
throwErrorIfNeeded(res.error());
}
}
}
/**
* @brief Coroutine-based query execution used for reading data.
@@ -287,7 +284,7 @@ public:
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); });
}));
};
@@ -296,16 +293,12 @@ public:
--numReadRequestsOutstanding_;
if (res)
{
return res;
}
else
{
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
throwErrorIfNeeded(res.error());
}
}
}
/**
* @brief Coroutine-based query execution used for reading data.
@@ -336,8 +329,10 @@ public:
// when all async operations complete unblock the result
if (--numOutstanding == 0)
{
boost::asio::post(
boost::asio::get_associated_executor(*sself), [sself]() mutable { sself->complete(); });
}
};
std::transform(
@@ -400,18 +395,18 @@ private:
assert(false);
throw std::runtime_error("decrementing num outstanding below 0");
}
size_t cur = (--numWriteRequestsOutstanding_);
size_t const cur = (--numWriteRequestsOutstanding_);
{
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard lck(throttleMutex_);
std::lock_guard const lck(throttleMutex_);
throttleCv_.notify_one();
}
if (cur == 0)
{
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard lck(syncMutex_);
std::lock_guard const lck(syncMutex_);
syncCv_.notify_one();
}
}

View File

@@ -25,7 +25,7 @@
#include <vector>
namespace {
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
} // namespace
namespace data::cassandra::detail {
@@ -40,8 +40,8 @@ Future::await() const
if (auto const rc = cass_future_error_code(*this); rc)
{
auto errMsg = [this](std::string const& label) {
char const* message;
std::size_t len;
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(*this, &message, &len);
return label + ": " + std::string{message, len};
}(cass_error_desc(rc));
@@ -56,17 +56,15 @@ Future::get() const
if (auto const rc = cass_future_error_code(*this); rc)
{
auto const errMsg = [this](std::string const& label) {
char const* message;
std::size_t len;
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(*this, &message, &len);
return label + ": " + std::string{message, len};
}("future::get()");
return Error{CassandraError{errMsg, rc}};
}
else
{
return Result{cass_future_get_result(*this)};
}
}
void
@@ -80,8 +78,8 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
if (auto const rc = cass_future_error_code(ptr); rc)
{
auto const errMsg = [&ptr](std::string const& label) {
char const* message;
std::size_t len;
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(ptr, &message, &len);
return label + ": " + std::string{message, len};
}("invokeHelper");

View File

@@ -38,7 +38,7 @@ struct Future : public ManagedObject<CassFuture>
};
void
invokeHelper(CassFuture* ptr, void* self);
invokeHelper(CassFuture* ptr, void* cbPtr);
class FutureWithCallback : public Future
{

View File

@@ -36,7 +36,6 @@ public:
if (rawPtr == nullptr)
throw std::runtime_error("Could not create DB object - got nullptr");
}
ManagedObject(ManagedObject&&) = default;
operator Managed*() const
{

View File

@@ -20,8 +20,8 @@
#include <data/cassandra/impl/Result.h>
namespace {
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
} // namespace
namespace data::cassandra::detail {
@@ -43,7 +43,7 @@ Result::hasRows() const
}
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)}
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
{
}
@@ -56,7 +56,7 @@ ResultIterator::fromResult(Result const& result)
[[maybe_unused]] bool
ResultIterator::moveForward()
{
hasMore_ = cass_iterator_next(*this);
hasMore_ = (cass_iterator_next(*this) != 0u);
return hasMore_;
}

View File

@@ -57,24 +57,24 @@ extractColumn(CassRow const* row, std::size_t idx)
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
{
cass_byte_t const* buf;
std::size_t bufSize;
cass_byte_t const* buf = nullptr;
std::size_t bufSize = 0;
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
throwErrorIfNeeded(rc, "Extract ripple::uint256");
output = ripple::uint256::fromVoid(buf);
}
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
{
cass_byte_t const* buf;
std::size_t bufSize;
cass_byte_t const* buf = nullptr;
std::size_t bufSize = 0;
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
output = ripple::AccountID::fromVoid(buf);
}
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
{
cass_byte_t const* buf;
std::size_t bufSize;
cass_byte_t const* buf = nullptr;
std::size_t bufSize = 0;
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
output = UCharVectorType{buf, buf + bufSize};
@@ -86,23 +86,23 @@ extractColumn(CassRow const* row, std::size_t idx)
}
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
{
char const* value;
std::size_t len;
char const* value = nullptr;
std::size_t len = 0;
auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len);
throwErrorIfNeeded(rc, "Extract string");
output = std::string{value, len};
}
else if constexpr (std::is_same_v<DecayedType, bool>)
{
cass_bool_t flag;
cass_bool_t flag = cass_bool_t::cass_false;
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
throwErrorIfNeeded(rc, "Extract bool");
output = flag ? true : false;
output = flag != cass_bool_t::cass_false;
}
// clio only uses bigint (int64_t) so we convert any incoming type
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
{
int64_t out;
int64_t out = 0;
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
throwErrorIfNeeded(rc, "Extract int64");
output = static_cast<DecayedType>(out);

View File

@@ -84,7 +84,7 @@ public:
/**
* @brief Calculates the wait time before attempting another retry
*/
std::chrono::milliseconds
static std::chrono::milliseconds
calculateDelay(uint32_t attempt)
{
return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))};

View File

@@ -20,7 +20,7 @@
#include <data/cassandra/impl/SslContext.h>
namespace {
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
} // namespace
namespace data::cassandra::detail {

View File

@@ -64,8 +64,6 @@ public:
cass_statement_set_is_idempotent(*this, cass_true);
}
Statement(Statement&&) = default;
/**
* @brief Binds the given arguments to the statement.
*
@@ -75,7 +73,7 @@ public:
void
bind(Args&&... args) const
{
std::size_t idx = 0;
std::size_t idx = 0; // NOLINT(misc-const-correctness)
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
}
@@ -126,7 +124,7 @@ public:
}
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
{
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
}
else if constexpr (std::is_same_v<DecayedType, bool>)

View File

@@ -20,8 +20,8 @@
#include <data/cassandra/impl/Tuple.h>
namespace {
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
} // namespace
namespace data::cassandra::detail {

View File

@@ -131,7 +131,7 @@ private:
// clio only uses bigint (int64_t) so we convert any incoming type
if constexpr (std::is_convertible_v<DecayedType, int64_t>)
{
int64_t out;
int64_t out = 0;
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
throwErrorIfNeeded(rc, "Extract int64 from tuple");
output = static_cast<DecayedType>(out);

View File

@@ -62,7 +62,7 @@ public:
void
push(uint32_t idx)
{
std::lock_guard lck(m_);
std::lock_guard const lck(m_);
if (!max_ || idx > *max_)
max_ = idx;
cv_.notify_all();
@@ -96,9 +96,13 @@ public:
std::unique_lock lck(m_);
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
if (maxWaitMs)
{
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
}
else
{
cv_.wait(lck, pred);
}
return pred();
}
};
@@ -190,7 +194,7 @@ public:
std::optional<T>
tryPop()
{
std::scoped_lock lck(m_);
std::scoped_lock const lck(m_);
if (queue_.empty())
return {};
@@ -212,7 +216,7 @@ getMarkers(size_t numMarkers)
{
assert(numMarkers <= 256);
unsigned char incr = 256 / numMarkers;
unsigned char const incr = 256 / numMarkers;
std::vector<ripple::uint256> markers;
markers.reserve(numMarkers);

View File

@@ -18,9 +18,12 @@
//==============================================================================
#include <etl/ETLService.h>
#include <util/Constants.h>
#include <ripple/protocol/LedgerHeader.h>
#include <utility>
namespace etl {
// Database must be populated when this starts
std::optional<uint32_t>
@@ -44,8 +47,10 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
auto pipe = DataPipeType{numExtractors, startSequence};
for (auto i = 0u; i < numExtractors; ++i)
{
extractors.push_back(std::make_unique<ExtractorType>(
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
}
auto transformer =
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
@@ -58,8 +63,9 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
auto const end = std::chrono::system_clock::now();
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
<< ((end - begin).count()) / 1000000000.0;
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
state_.isWriting = false;
@@ -154,7 +160,7 @@ ETLService::publishNextSequence(uint32_t nextSequence)
ledgerPublisher_.publish(nextSequence, {});
++nextSequence;
}
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND))
{
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
<< "Attempting to find in database and publish";
@@ -166,7 +172,7 @@ ETLService::publishNextSequence(uint32_t nextSequence)
// waits one second between each attempt to read the ledger from the
// database
constexpr size_t timeoutSeconds = 10;
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
if (!success)
{
@@ -199,14 +205,13 @@ ETLService::monitorReadOnly()
if (!rng)
{
if (auto net = networkValidatedLedgers_->getMostRecent())
{
return *net;
else
}
return std::nullopt;
}
else
{
return rng->maxSequence;
}
}();
if (!latestSequenceOpt.has_value())
@@ -230,7 +235,7 @@ ETLService::monitorReadOnly()
{
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs first.
// Even if we don't hear from rippled, if ledgers are being written to the db, we publish them.
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000);
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
}
}
}
@@ -251,9 +256,13 @@ ETLService::doWork()
beast::setCurrentThreadName("ETLService worker");
if (state_.isReadOnly)
{
monitorReadOnly();
}
else
{
monitor();
}
});
}
@@ -266,7 +275,7 @@ ETLService::ETLService(
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
: backend_(backend)
, loadBalancer_(balancer)
, networkValidatedLedgers_(ledgers)
, networkValidatedLedgers_(std::move(ledgers))
, cacheLoader_(config, ioc, backend, backend->cache())
, ledgerFetcher_(backend, balancer)
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)

View File

@@ -46,7 +46,7 @@ struct NFTTransactionsData;
struct NFTsData;
namespace feed {
class SubscriptionManager;
}
} // namespace feed
/**
* @brief This namespace contains everything to do with the ETL and ETL sources.
@@ -252,7 +252,7 @@ private:
* @return true if stopping; false otherwise
*/
bool
isStopping()
isStopping() const
{
return state_.isStopping;
}
@@ -265,7 +265,7 @@ private:
* @return the number of markers
*/
std::uint32_t
getNumMarkers()
getNumMarkers() const
{
return numMarkers_;
}

View File

@@ -73,10 +73,15 @@ LoadBalancer::LoadBalancer(
std::shared_ptr<feed::SubscriptionManager> subscriptions,
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
{
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
downloadRanges_ = std::clamp(*value, 1u, 256u);
{
downloadRanges_ = std::clamp(*value, 1u, MAX_DOWNLOAD);
}
else if (backend->fetchLedgerRange())
{
downloadRanges_ = 4;
}
for (auto const& entry : config.array("etl_sources"))
{
@@ -101,10 +106,14 @@ LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
if (!res)
{
LOG(log_.error()) << "Failed to download initial ledger."
<< " Sequence = " << sequence << " source = " << source->toString();
}
else
{
response = std::move(data);
}
return res;
},
@@ -116,7 +125,7 @@ LoadBalancer::OptionalGetLedgerResponseType
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
{
GetLedgerResponseType response;
bool success = execute(
bool const success = execute(
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
response = std::move(data);
@@ -126,18 +135,17 @@ LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObje
<< " from source = " << source->toString();
return true;
}
else
{
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
<< ", source = " << source->toString();
return false;
}
},
ledgerSequence);
if (success)
{
return response;
else
}
return {};
}
@@ -209,19 +217,17 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
This || true is only needed when loading full history standalone */
if (source->hasLedger(ledgerSequence))
{
bool res = f(source);
bool const res = f(source);
if (res)
{
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
<< " - ledger sequence = " << ledgerSequence;
break;
}
else
{
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
<< " - ledger sequence = " << ledgerSequence;
}
}
else
{
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()

View File

@@ -55,9 +55,12 @@ public:
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
private:
static constexpr std::uint32_t DEFAULT_DOWNLOAD_RANGES = 16;
util::Logger log_{"ETL"};
std::vector<std::unique_ptr<Source>> sources_;
std::uint32_t downloadRanges_ = 16; /*< The number of markers to use when downloading intial ledger */
std::uint32_t downloadRanges_ =
DEFAULT_DOWNLOAD_RANGES; /*< The number of markers to use when downloading intial ledger */
public:
/**

View File

@@ -106,8 +106,10 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
// There should always be a difference so the returned finalIDs
// iterator should never be end(). But better safe than sorry.
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner)
{
throw std::runtime_error(
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID())));
}
return {
{NFTTransactionsData(*diff.first, txMeta, sttx.getTransactionID())},
@@ -147,8 +149,10 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
}
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
{
prevNFTs =
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
}
if (!prevNFTs)
continue;
@@ -158,6 +162,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != prevNFTs->end())
{
return std::make_pair(
txs,
NFTsData(
@@ -166,6 +171,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
txMeta,
true));
}
}
std::stringstream msg;
msg << " - could not determine owner at burntime for tx " << sttx.getTransactionID();
@@ -235,9 +241,11 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
ripple::STArray const& nfts = [&node] {
if (node.getFName() == ripple::sfCreatedNode)
{
return node.peekAtField(ripple::sfNewFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
}
return node.peekAtField(ripple::sfFinalFields)
.downcast<ripple::STObject>()
.getFieldArray(ripple::sfNFTokens);
@@ -247,10 +255,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
});
if (nft != nfts.end())
{
return {
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
NFTsData(tokenID, nodeOwner, txMeta, false)};
}
}
std::stringstream msg;
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();

View File

@@ -46,6 +46,6 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
* @return The NFT data as a vector
*/
std::vector<NFTsData>
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
getNFTDataFromObj(std::uint32_t seq, std::string const& key, std::string const& blob);
} // namespace etl

View File

@@ -147,7 +147,7 @@ ProbingSource::make_SSLHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
if (currentSrc_)
return SourceHooks::Action::STOP;
@@ -161,7 +161,7 @@ ProbingSource::make_SSLHooks() noexcept
},
// onDisconnected
[this](auto /* ec */) {
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;
@@ -176,7 +176,7 @@ ProbingSource::make_PlainHooks() noexcept
{
return {// onConnected
[this](auto ec) {
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
if (currentSrc_)
return SourceHooks::Action::STOP;
@@ -190,7 +190,7 @@ ProbingSource::make_PlainHooks() noexcept
},
// onDisconnected
[this](auto /* ec */) {
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
if (currentSrc_)
{
currentSrc_ = nullptr;

View File

@@ -75,7 +75,7 @@ public:
LoadBalancer& balancer,
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
~ProbingSource() = default;
~ProbingSource() override = default;
void
run() override;

View File

@@ -38,11 +38,11 @@
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <grpcpp/grpcpp.h>
#include <utility>
class ProbingSource;
namespace feed {
class SubscriptionManager;
}
} // namespace feed
// TODO: we use Source so that we can store a vector of Sources
// but we also use CRTP for implementation of the common logic - this is a bit strange because CRTP as used here is
@@ -51,6 +51,7 @@ class SubscriptionManager;
// things into the base class instead.
namespace etl {
class ProbingSource;
/**
* @brief Base class for all ETL sources.
@@ -206,7 +207,7 @@ class SourceImpl : public Source
LoadBalancer& balancer_;
etl::detail::ForwardCache forwardCache_;
boost::uuids::uuid uuid_;
boost::uuids::uuid uuid_{};
protected:
std::string ip_;
@@ -245,15 +246,15 @@ public:
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
LoadBalancer& balancer,
SourceHooks hooks)
: networkValidatedLedgers_(validatedLedgers)
, backend_(backend)
, subscriptions_(subscriptions)
: networkValidatedLedgers_(std::move(validatedLedgers))
, backend_(std::move(backend))
, subscriptions_(std::move(subscriptions))
, balancer_(balancer)
, forwardCache_(config, ioc, *this)
, strand_(boost::asio::make_strand(ioc))
, timer_(strand_)
, resolver_(strand_)
, hooks_(hooks)
, hooks_(std::move(hooks))
{
static boost::uuids::random_generator uuidGenerator;
uuid_ = uuidGenerator();
@@ -266,7 +267,7 @@ public:
grpcPort_ = *value;
try
{
boost::asio::ip::tcp::endpoint endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
boost::asio::ip::tcp::endpoint const endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
std::stringstream ss;
ss << endpoint;
grpc::ChannelArguments chArgs;
@@ -282,7 +283,7 @@ public:
}
}
~SourceImpl()
~SourceImpl() override
{
derived().close(false);
}
@@ -316,7 +317,7 @@ public:
}
namespace beast = boost::beast;
namespace http = beast::http;
namespace http = boost::beast::http;
namespace websocket = beast::websocket;
namespace net = boost::asio;
using tcp = boost::asio::ip::tcp;
@@ -324,7 +325,7 @@ public:
try
{
auto executor = boost::asio::get_associated_executor(yield);
boost::beast::error_code ec;
beast::error_code ec;
tcp::resolver resolver{executor};
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(executor);
@@ -384,14 +385,14 @@ public:
bool
hasLedger(uint32_t sequence) const override
{
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
for (auto& pair : validatedLedgers_)
{
if (sequence >= pair.first && sequence <= pair.second)
{
return true;
}
else if (sequence < pair.first)
if (sequence < pair.first)
{
// validatedLedgers_ is a sorted list of disjoint ranges
// if the sequence comes before this range, the sequence will
@@ -420,7 +421,7 @@ public:
request.set_get_object_neighbors(getObjectNeighbors);
request.set_user("ETL");
grpc::Status status = stub_->GetLedger(&context, request, &response);
grpc::Status const status = stub_->GetLedger(&context, request, &response);
if (status.ok() && !response.is_unlimited())
{
@@ -452,9 +453,11 @@ public:
auto last = getLastMsgTime();
if (last.time_since_epoch().count() != 0)
{
res["last_msg_age_seconds"] = std::to_string(
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - getLastMsgTime())
.count());
}
return res;
}
@@ -466,7 +469,7 @@ public:
return {{}, false};
grpc::CompletionQueue cq;
void* tag;
void* tag = nullptr;
bool ok = false;
std::vector<etl::detail::AsyncCallData> calls;
auto markers = getMarkers(numMarkers);
@@ -488,7 +491,7 @@ public:
size_t numFinished = 0;
bool abort = false;
size_t incr = 500000;
size_t const incr = 500000;
size_t progress = incr;
std::vector<std::string> edgeKeys;
@@ -502,8 +505,7 @@ public:
LOG(log_.error()) << "loadInitialLedger - ok is false";
return {{}, false}; // handle cancelled
}
else
{
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
@@ -513,9 +515,9 @@ public:
LOG(log_.debug()) << "Finished a marker. "
<< "Current number of finished = " << numFinished;
std::string lastKey = ptr->getLastKey();
std::string const lastKey = ptr->getLastKey();
if (lastKey.size())
if (!lastKey.empty())
edgeKeys.push_back(ptr->getLastKey());
}
@@ -528,7 +530,6 @@ public:
progress += incr;
}
}
}
LOG(log_.info()) << "Finished loadInitialLedger. cache size = " << backend_->cache().size();
return {std::move(edgeKeys), !abort};
@@ -577,7 +578,9 @@ public:
}
else
{
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
static constexpr std::size_t LOWEST_LAYER_TIMEOUT_SECONDS = 30;
boost::beast::get_lowest_layer(derived().ws())
.expires_after(std::chrono::seconds(LOWEST_LAYER_TIMEOUT_SECONDS));
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
derived().onConnect(ec, ep);
});
@@ -602,7 +605,7 @@ public:
}
else
{
boost::json::object jv{
boost::json::object const jv{
{"command", "subscribe"},
{"streams", {"ledger", "manifests", "validations", "transactions_proposed"}},
};
@@ -632,10 +635,14 @@ public:
onWrite(boost::beast::error_code ec, [[maybe_unused]] size_t size)
{
if (ec)
{
reconnect(ec);
}
else
{
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
}
}
/**
* @brief Callback for data available to read.
@@ -757,6 +764,7 @@ protected:
void
reconnect(boost::beast::error_code ec)
{
static constexpr std::size_t BUFFER_SIZE = 128;
if (paused_)
return;
@@ -776,7 +784,7 @@ protected:
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
// ERR_PACK /* crypto/err/err.h */
char buf[128];
char buf[BUFFER_SIZE];
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
err += buf;
@@ -793,11 +801,11 @@ protected:
}
// exponentially increasing timeouts, with a max of 30 seconds
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
size_t const waitTime = std::min(pow(2, numFailures_), 30.0);
numFailures_++;
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
timer_.async_wait([this](auto ec) {
bool startAgain = (ec != boost::asio::error::operation_aborted);
bool const startAgain = (ec != boost::asio::error::operation_aborted);
derived().close(startAgain);
});
}
@@ -806,14 +814,14 @@ private:
void
setLastMsgTime()
{
std::lock_guard lck(lastMsgTimeMtx_);
std::lock_guard const lck(lastMsgTimeMtx_);
lastMsgTime_ = std::chrono::system_clock::now();
}
std::chrono::system_clock::time_point
getLastMsgTime() const
{
std::lock_guard lck(lastMsgTimeMtx_);
std::lock_guard const lck(lastMsgTimeMtx_);
return lastMsgTime_;
}
@@ -831,21 +839,21 @@ private:
if (minAndMax.size() == 1)
{
uint32_t sequence = std::stoll(minAndMax[0]);
pairs.push_back(std::make_pair(sequence, sequence));
uint32_t const sequence = std::stoll(minAndMax[0]);
pairs.emplace_back(sequence, sequence);
}
else
{
assert(minAndMax.size() == 2);
uint32_t min = std::stoll(minAndMax[0]);
uint32_t max = std::stoll(minAndMax[1]);
pairs.push_back(std::make_pair(min, max));
uint32_t const min = std::stoll(minAndMax[0]);
uint32_t const max = std::stoll(minAndMax[1]);
pairs.emplace_back(min, max);
}
}
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; });
// we only hold the lock here, to avoid blocking while string processing
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
validatedLedgers_ = std::move(pairs);
validatedLedgersRaw_ = range;
}
@@ -853,7 +861,7 @@ private:
std::string
getValidatedRange() const
{
std::lock_guard lck(mtx_);
std::lock_guard const lck(mtx_);
return validatedLedgersRaw_;
}
};

View File

@@ -35,7 +35,7 @@ struct AmendmentBlockAction
void
operator()()
{
static util::Logger log{"ETL"};
static util::Logger const log{"ETL"};
LOG(log.fatal())
<< "Can't process new ledgers: The current ETL source is not compatible with the version of the "
"libxrpl Clio is currently using. Please upgrade Clio to a newer version.";

View File

@@ -19,6 +19,7 @@
#pragma once
#include <data/BackendInterface.h>
#include <etl/NFTHelpers.h>
#include <util/log/Logger.h>
@@ -48,14 +49,14 @@ public:
request_.mutable_ledger()->set_sequence(seq);
if (marker.isNonZero())
{
request_.set_marker(marker.data(), marker.size());
request_.set_marker(marker.data(), ripple::uint256::size());
}
request_.set_user("ETL");
nextPrefix_ = 0x00;
if (nextMarker)
nextPrefix_ = nextMarker->data()[0];
unsigned char prefix = marker.data()[0];
unsigned char const prefix = marker.data()[0];
LOG(log_.debug()) << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
@@ -102,18 +103,18 @@ public:
bool more = true;
// if no marker returned, we are done
if (cur_->marker().size() == 0)
if (cur_->marker().empty())
more = false;
// if returned marker is greater than our end, we are done
unsigned char prefix = cur_->marker()[0];
unsigned char const prefix = cur_->marker()[0];
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
more = false;
// if we are not done, make the next async call
if (more)
{
request_.set_marker(std::move(cur_->marker()));
request_.set_marker(cur_->marker());
call(stub, cq);
}
@@ -136,7 +137,7 @@ public:
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
if (!cacheOnly)
{
if (lastKey_.size())
if (!lastKey_.empty())
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
lastKey_ = obj.key();
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
@@ -166,9 +167,10 @@ public:
std::string
getMarkerPrefix()
{
if (next_->marker().size() == 0)
if (next_->marker().empty())
{
return "";
else
}
return ripple::strHex(std::string{next_->marker().data()[0]});
}

View File

@@ -42,6 +42,10 @@ namespace etl::detail {
template <typename CacheType>
class CacheLoader
{
static constexpr size_t DEFAULT_NUM_CACHE_DIFFS = 32;
static constexpr size_t DEFAULT_NUM_CACHE_MARKERS = 48;
static constexpr size_t DEFAULT_CACHE_PAGE_FETCH_SIZE = 512;
enum class LoadStyle { ASYNC, SYNC, NOT_AT_ALL };
util::Logger log_{"ETL"};
@@ -52,18 +56,18 @@ class CacheLoader
LoadStyle cacheLoadStyle_ = LoadStyle::ASYNC;
// number of diffs to use to generate cursors to traverse the ledger in parallel during initial cache download
size_t numCacheDiffs_ = 32;
size_t numCacheDiffs_ = DEFAULT_NUM_CACHE_DIFFS;
// number of markers to use at one time to traverse the ledger in parallel during initial cache download
size_t numCacheMarkers_ = 48;
size_t numCacheMarkers_ = DEFAULT_NUM_CACHE_MARKERS;
// number of ledger objects to fetch concurrently per marker during cache download
size_t cachePageFetchSize_ = 512;
size_t cachePageFetchSize_ = DEFAULT_CACHE_PAGE_FETCH_SIZE;
struct ClioPeer
{
std::string ip;
int port;
int port{};
};
std::vector<ClioPeer> clioPeers_;
@@ -107,7 +111,7 @@ public:
clioPeers_.push_back({ip, port});
}
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
unsigned const seed = std::chrono::system_clock::now().time_since_epoch().count();
std::shuffle(std::begin(clioPeers_), std::end(clioPeers_), std::default_random_engine(seed));
}
}
@@ -142,7 +146,7 @@ public:
return;
}
if (clioPeers_.size() > 0)
if (!clioPeers_.empty())
{
boost::asio::spawn(ioContext_.get(), [this, seq](boost::asio::yield_context yield) {
for (auto const& peer : clioPeers_)
@@ -157,16 +161,15 @@ public:
});
return;
}
else
{
loadCacheFromDb(seq);
}
// If loading synchronously, poll cache until full
static constexpr size_t SLEEP_TIME_SECONDS = 10;
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull())
{
LOG(log_.debug()) << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
std::this_thread::sleep_for(std::chrono::seconds(10));
std::this_thread::sleep_for(std::chrono::seconds(SLEEP_TIME_SECONDS));
if (cache_.get().isFull())
LOG(log_.info()) << "Cache is full. Cache size = " << cache_.get().size();
}
@@ -188,13 +191,12 @@ private:
{
LOG(log_.info()) << "Loading cache from peer. ip = " << ip << " . port = " << port;
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from
namespace net = boost::asio; // from
using tcp = boost::asio::ip::tcp; // from
try
{
boost::beast::error_code ec;
beast::error_code ec;
// These objects perform our I/O
tcp::resolver resolver{ioContext_.get()};
@@ -221,13 +223,14 @@ private:
std::optional<boost::json::value> marker;
LOG(log_.trace()) << "Sending request";
static constexpr int LIMIT = 2048;
auto getRequest = [&](auto marker) {
boost::json::object request = {
{"command", "ledger_data"},
{"ledger_index", ledgerIndex},
{"binary", true},
{"out_of_order", true},
{"limit", 2048}};
{"limit", LIMIT}};
if (marker)
request["marker"] = *marker;
@@ -270,8 +273,9 @@ private:
auto const& err = response.at("error");
if (err.is_string() && err.as_string() == "lgrNotFound")
{
static constexpr size_t MAX_ATTEMPTS = 5;
++numAttempts;
if (numAttempts >= 5)
if (numAttempts >= MAX_ATTEMPTS)
{
LOG(log_.error()) << " ledger not found at peer after 5 attempts. "
"peer = "
@@ -295,9 +299,13 @@ private:
return false;
}
if (response.contains("marker"))
{
marker = response.at("marker");
}
else
{
marker = {};
}
auto const& state = response.at("state").as_array();
@@ -356,16 +364,20 @@ private:
diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end());
cursors.push_back({});
cursors.emplace_back();
for (auto const& obj : diff)
if (obj.blob.size())
cursors.push_back({obj.key});
cursors.push_back({});
{
if (!obj.blob.empty())
cursors.emplace_back(obj.key);
}
cursors.emplace_back();
std::stringstream cursorStr;
for (auto const& c : cursors)
{
if (c)
cursorStr << ripple::strHex(*c) << ", ";
}
LOG(log_.info()) << "Loading cache. num cursors = " << cursors.size() - 1;
LOG(log_.trace()) << "cursors = " << cursorStr.str();

View File

@@ -28,6 +28,7 @@
#include <chrono>
#include <mutex>
#include <thread>
#include <utility>
namespace etl::detail {
@@ -57,7 +58,7 @@ public:
std::optional<uint32_t> finishSequence,
SystemState const& state)
: pipe_(std::ref(pipe))
, networkValidatedLedgers_{networkValidatedLedgers}
, networkValidatedLedgers_{std::move(networkValidatedLedgers)}
, ledgerFetcher_{std::ref(ledgerFetcher)}
, startSequence_{startSequence}
, finishSequence_{finishSequence}

View File

@@ -37,14 +37,14 @@ ForwardCache::freshen()
{
boost::asio::spawn(
strand_, [this, numOutstanding, command = cacheEntry.first](boost::asio::yield_context yield) {
boost::json::object request = {{"command", command}};
boost::json::object const request = {{"command", command}};
auto resp = source_.requestFromRippled(request, {}, yield);
if (!resp || resp->contains("error"))
resp = {};
{
std::scoped_lock lk(mtx_);
std::scoped_lock const lk(mtx_);
latestForwarded_[command] = resp;
}
});
@@ -54,7 +54,7 @@ ForwardCache::freshen()
void
ForwardCache::clear()
{
std::scoped_lock lk(mtx_);
std::scoped_lock const lk(mtx_);
for (auto& cacheEntry : latestForwarded_)
latestForwarded_[cacheEntry.first] = {};
}
@@ -64,16 +64,20 @@ ForwardCache::get(boost::json::object const& request) const
{
std::optional<std::string> command = {};
if (request.contains("command") && !request.contains("method") && request.at("command").is_string())
{
command = request.at("command").as_string().c_str();
}
else if (request.contains("method") && !request.contains("command") && request.at("method").is_string())
{
command = request.at("method").as_string().c_str();
}
if (!command)
return {};
if (rpc::specifiesCurrentOrClosedLedger(request))
return {};
std::shared_lock lk(mtx_);
std::shared_lock const lk(mtx_);
if (!latestForwarded_.contains(*command))
return {};

View File

@@ -31,7 +31,9 @@
#include <mutex>
#include <unordered_map>
namespace etl {
class Source;
} // namespace etl
namespace etl::detail {
@@ -41,6 +43,7 @@ namespace etl::detail {
class ForwardCache
{
using ResponseType = std::optional<boost::json::object>;
static constexpr std::uint32_t DEFAULT_DURATION = 10;
util::Logger log_{"ETL"};
@@ -48,7 +51,7 @@ class ForwardCache
std::unordered_map<std::string, ResponseType> latestForwarded_;
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
etl::Source const& source_;
std::uint32_t duration_ = 10;
std::uint32_t duration_ = DEFAULT_DURATION;
void
clear();
@@ -76,7 +79,7 @@ public:
freshen();
std::optional<boost::json::object>
get(boost::json::object const& command) const;
get(boost::json::object const& request) const;
};
} // namespace etl::detail

View File

@@ -27,6 +27,7 @@
#include <grpcpp/grpcpp.h>
#include <optional>
#include <utility>
namespace etl::detail {
@@ -50,7 +51,7 @@ public:
* @brief Create an instance of the fetcher
*/
LedgerFetcher(std::shared_ptr<BackendInterface> backend, std::shared_ptr<LoadBalancerType> balancer)
: backend_(backend), loadBalancer_(balancer)
: backend_(std::move(backend)), loadBalancer_(std::move(balancer))
{
}

View File

@@ -30,6 +30,7 @@
#include <ripple/beast/core/CurrentThreadName.h>
#include <memory>
#include <utility>
/**
* @brief Account transactions, NFT transactions and NFT data bundled togeher.
@@ -71,7 +72,10 @@ public:
std::shared_ptr<LoadBalancerType> balancer,
LedgerFetcherType& fetcher,
SystemState const& state)
: backend_{backend}, loadBalancer_{balancer}, fetcher_{std::ref(fetcher)}, state_{std::cref(state)}
: backend_{std::move(backend)}
, loadBalancer_{std::move(balancer)}
, fetcher_{std::ref(fetcher)}
, state_{std::cref(state)}
{
}
@@ -96,7 +100,7 @@ public:
std::string* raw = txn.mutable_transaction_blob();
ripple::SerialIter it{raw->data(), raw->size()};
ripple::STTx sttx{it};
ripple::STTx const sttx{it};
LOG(log_.trace()) << "Inserting transaction = " << sttx.getTransactionID();
@@ -108,7 +112,8 @@ public:
result.nfTokensData.push_back(*maybeNFT);
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID());
std::string keyStr{reinterpret_cast<const char*>(sttx.getTransactionID().data()), 32};
static constexpr std::size_t KEY_SIZE = 32;
std::string keyStr{reinterpret_cast<const char*>(sttx.getTransactionID().data()), KEY_SIZE};
backend_->writeTransaction(
std::move(keyStr),
ledger.seq,
@@ -225,8 +230,9 @@ public:
++numWrites;
}
prev = std::move(cur->key);
if (numWrites % 100000 == 0 && numWrites != 0)
prev = cur->key;
static constexpr std::size_t LOG_INTERVAL = 100000;
if (numWrites % LOG_INTERVAL == 0 && numWrites != 0)
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
}

View File

@@ -21,6 +21,7 @@
#include <data/BackendInterface.h>
#include <etl/SystemState.h>
#include <feed/SubscriptionManager.h>
#include <util/LedgerUtils.h>
#include <util/Profiler.h>
#include <util/log/Logger.h>
@@ -28,6 +29,7 @@
#include <ripple/protocol/LedgerHeader.h>
#include <chrono>
#include <utility>
namespace etl::detail {
@@ -72,8 +74,8 @@ public:
std::shared_ptr<feed::SubscriptionManager> subscriptions,
SystemState const& state)
: publishStrand_{boost::asio::make_strand(ioc)}
, backend_{backend}
, subscriptions_{subscriptions}
, backend_{std::move(backend)}
, subscriptions_{std::move(subscriptions)}
, state_{std::cref(state)}
{
}
@@ -111,8 +113,7 @@ public:
++numAttempts;
continue;
}
else
{
auto lgr = data::synchronousAndRetryOnTimeout(
[&](auto yield) { return backend_->fetchLedgerBySequence(ledgerSequence, yield); });
@@ -121,7 +122,6 @@ public:
return true;
}
}
return false;
}
@@ -142,7 +142,7 @@ public:
{
LOG(log_.info()) << "Updating cache";
std::vector<data::LedgerObject> diff = data::synchronousAndRetryOnTimeout(
std::vector<data::LedgerObject> const diff = data::synchronousAndRetryOnTimeout(
[&](auto yield) { return backend_->fetchLedgerDiff(lgrInfo.seq, yield); });
backend_->cache().update(diff, lgrInfo.seq); // todo: inject cache to update, don't use backend cache
@@ -154,19 +154,20 @@ public:
// if the ledger closed over 10 minutes ago, assume we are still catching up and don't publish
// TODO: this probably should be a strategy
if (age < 600)
static constexpr std::uint32_t MAX_LEDGER_AGE_SECONDS = 600;
if (age < MAX_LEDGER_AGE_SECONDS)
{
std::optional<ripple::Fees> fees = data::synchronousAndRetryOnTimeout(
[&](auto yield) { return backend_->fetchFees(lgrInfo.seq, yield); });
std::vector<data::TransactionAndMetadata> transactions = data::synchronousAndRetryOnTimeout(
std::vector<data::TransactionAndMetadata> const transactions = data::synchronousAndRetryOnTimeout(
[&](auto yield) { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield); });
auto ledgerRange = backend_->fetchLedgerRange();
assert(ledgerRange);
assert(fees);
std::string range =
std::string const range =
std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
@@ -203,7 +204,7 @@ public:
std::chrono::time_point<std::chrono::system_clock>
getLastPublish() const
{
std::shared_lock lck(publishTimeMtx_);
std::shared_lock const lck(publishTimeMtx_);
return lastPublish_;
}
@@ -213,7 +214,7 @@ public:
std::uint32_t
lastCloseAgeSeconds() const
{
std::shared_lock lck(closeTimeMtx_);
std::shared_lock const lck(closeTimeMtx_);
auto now = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch())
.count();
auto closeTime = lastCloseTime_.time_since_epoch().count();
@@ -225,7 +226,7 @@ public:
std::optional<uint32_t>
getLastPublishedSequence() const
{
std::scoped_lock lck(lastPublishedSeqMtx_);
std::scoped_lock const lck(lastPublishedSeqMtx_);
return lastPublishedSequence_;
}
@@ -233,21 +234,21 @@ private:
void
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
{
std::scoped_lock lck(closeTimeMtx_);
std::scoped_lock const lck(closeTimeMtx_);
lastCloseTime_ = lastCloseTime;
}
void
setLastPublishTime()
{
std::scoped_lock lck(publishTimeMtx_);
std::scoped_lock const lck(publishTimeMtx_);
lastPublish_ = std::chrono::system_clock::now();
}
void
setLastPublishedSequence(std::optional<uint32_t> lastPublishedSequence)
{
std::scoped_lock lck(lastPublishedSeqMtx_);
std::scoped_lock const lck(lastPublishedSeqMtx_);
lastPublishedSequence_ = lastPublishedSequence;
}
};

View File

@@ -34,6 +34,7 @@
#include <chrono>
#include <memory>
#include <thread>
#include <utility>
namespace etl::detail {
@@ -87,7 +88,7 @@ public:
uint32_t startSequence,
SystemState& state)
: pipe_{std::ref(pipe)}
, backend_{backend}
, backend_{std::move(backend)}
, loader_{std::ref(loader)}
, publisher_{std::ref(publisher)}
, amendmentBlockHandler_{std::ref(amendmentBlockHandler)}
@@ -299,7 +300,7 @@ private:
for (auto const& obj : cacheUpdates)
{
if (modified.count(obj.key))
if (modified.contains(obj.key))
continue;
auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq);
@@ -310,7 +311,7 @@ private:
if (!ub)
ub = {data::lastKey, {}};
if (obj.blob.size() == 0)
if (obj.blob.empty())
{
LOG(log_.debug()) << "writing successor for deleted object " << ripple::strHex(obj.key) << " - "
<< ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key);
@@ -378,10 +379,10 @@ private:
if (obj.mod_type() != RawLedgerObjectType::MODIFIED)
{
std::string* predPtr = obj.mutable_predecessor();
if (!predPtr->size())
if (predPtr->empty())
*predPtr = uint256ToString(data::firstKey);
std::string* succPtr = obj.mutable_successor();
if (!succPtr->size())
if (succPtr->empty())
*succPtr = uint256ToString(data::lastKey);
if (obj.mod_type() == RawLedgerObjectType::DELETED)

View File

@@ -78,7 +78,7 @@ SubscriptionManager::subLedger(boost::asio::yield_context yield, SessionPtrType
fees = backend_->fetchFees(lgrInfo->seq, yield);
assert(fees);
std::string range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
std::string const range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
auto pubMsg = getLedgerPubMessage(*lgrInfo, *fees, range, 0);
pubMsg.erase("txn_count");
@@ -216,20 +216,27 @@ SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, r
// We need a field that contains the TakerGets and TakerPays
// parameters.
if (node.getFName() == ripple::sfModifiedNode)
{
field = &ripple::sfPreviousFields;
}
else if (node.getFName() == ripple::sfCreatedNode)
{
field = &ripple::sfNewFields;
}
else if (node.getFName() == ripple::sfDeletedNode)
{
field = &ripple::sfFinalFields;
}
if (field)
if (field != nullptr)
{
auto data = dynamic_cast<const ripple::STObject*>(node.peekAtPField(*field));
if (data && data->isFieldPresent(ripple::sfTakerPays) && data->isFieldPresent(ripple::sfTakerGets))
if ((data != nullptr) && data->isFieldPresent(ripple::sfTakerPays) &&
data->isFieldPresent(ripple::sfTakerGets))
{
// determine the OrderBook
ripple::Book book{
ripple::Book const book{
data->getFieldAmount(ripple::sfTakerGets).issue(),
data->getFieldAmount(ripple::sfTakerPays).issue()};
if (alreadySent.find(book) == alreadySent.end())
@@ -335,7 +342,7 @@ void
SubscriptionManager::subscribeHelper(SessionPtrType const& session, Subscription& subs, CleanupFunction&& func)
{
subs.subscribe(session);
std::scoped_lock lk(cleanupMtx_);
std::scoped_lock const lk(cleanupMtx_);
cleanupFuncs_[session].push_back(std::move(func));
}
@@ -348,18 +355,18 @@ SubscriptionManager::subscribeHelper(
CleanupFunction&& func)
{
subs.subscribe(session, k);
std::scoped_lock lk(cleanupMtx_);
std::scoped_lock const lk(cleanupMtx_);
cleanupFuncs_[session].push_back(std::move(func));
}
void
SubscriptionManager::cleanup(SessionPtrType session)
{
std::scoped_lock lk(cleanupMtx_);
std::scoped_lock const lk(cleanupMtx_);
if (!cleanupFuncs_.contains(session))
return;
for (auto f : cleanupFuncs_[session])
for (auto const& f : cleanupFuncs_[session])
{
f(session);
}

View File

@@ -79,13 +79,13 @@ parseCli(int argc, char* argv[])
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
po::notify(parsed);
if (parsed.count("version"))
if (parsed.count("version") != 0u)
{
std::cout << Build::getClioFullVersionString() << '\n';
std::exit(EXIT_SUCCESS);
}
if (parsed.count("help"))
if (parsed.count("help") != 0u)
{
std::cout << "Clio server " << Build::getClioFullVersionString() << "\n\n" << description;
std::exit(EXIT_SUCCESS);
@@ -109,7 +109,7 @@ parseCerts(Config const& config)
auto certFilename = config.value<std::string>("ssl_cert_file");
auto keyFilename = config.value<std::string>("ssl_key_file");
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
std::ifstream const readCert(certFilename, std::ios::in | std::ios::binary);
if (!readCert)
return {};

View File

@@ -36,7 +36,7 @@ struct Amendments
* @param name The name of the amendment
* @return The corresponding amendment Id
*/
static ripple::uint256 const
static ripple::uint256
GetAmendmentId(std::string_view const name)
{
return ripple::sha512Half(ripple::Slice(name.data(), name.size()));

View File

@@ -188,8 +188,8 @@ private:
handleAffectedNode(node);
}
std::optional<uint32_t>
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
static std::optional<uint32_t>
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx)
{
switch (tx->getFieldU16(ripple::sfTransactionType))
{
@@ -242,7 +242,7 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookChange const
* @param lgrInfo The ledger header
* @param transactions The vector of transactions with heir metadata
*/
[[nodiscard]] boost::json::object const
[[nodiscard]] boost::json::object
computeBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions);
} // namespace rpc

View File

@@ -26,7 +26,7 @@ namespace rpc {
void
Counters::rpcFailed(std::string const& method)
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
MethodInfo& counters = methodInfo_[method];
++counters.started;
++counters.failed;
@@ -35,7 +35,7 @@ Counters::rpcFailed(std::string const& method)
void
Counters::rpcErrored(std::string const& method)
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
MethodInfo& counters = methodInfo_[method];
++counters.started;
++counters.errored;
@@ -44,7 +44,7 @@ Counters::rpcErrored(std::string const& method)
void
Counters::rpcComplete(std::string const& method, std::chrono::microseconds const& rpcDuration)
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
MethodInfo& counters = methodInfo_[method];
++counters.started;
++counters.finished;
@@ -54,7 +54,7 @@ Counters::rpcComplete(std::string const& method, std::chrono::microseconds const
void
Counters::rpcForwarded(std::string const& method)
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
MethodInfo& counters = methodInfo_[method];
++counters.forwarded;
}
@@ -62,7 +62,7 @@ Counters::rpcForwarded(std::string const& method)
void
Counters::rpcFailedToForward(std::string const& method)
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
MethodInfo& counters = methodInfo_[method];
++counters.failedForward;
}
@@ -106,7 +106,7 @@ Counters::uptime() const
boost::json::object
Counters::report() const
{
std::scoped_lock lk(mutex_);
std::scoped_lock const lk(mutex_);
auto obj = boost::json::object{};
obj[JS(rpc)] = boost::json::object{};

View File

@@ -146,8 +146,10 @@ makeError(Status const& status)
status.code);
if (status.extraInfo)
{
for (auto& [key, value] : status.extraInfo.value())
res[key] = value;
}
return res;
}

View File

@@ -27,6 +27,7 @@
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
namespace rpc {
@@ -73,8 +74,8 @@ using CombinedError = std::variant<RippledError, ClioError>;
struct Status
{
CombinedError code = RippledError::rpcSUCCESS;
std::string error = "";
std::string message = "";
std::string error;
std::string message;
std::optional<boost::json::object> extraInfo;
Status() = default;
@@ -83,15 +84,16 @@ struct Status
// HACK. Some rippled handlers explicitly specify errors.
// This means that we have to be able to duplicate this functionality.
explicit Status(std::string const& message) : code(ripple::rpcUNKNOWN), message(message)
explicit Status(std::string message) : code(ripple::rpcUNKNOWN), message(std::move(message))
{
}
Status(CombinedError code, std::string message) : code(code), message(message)
Status(CombinedError code, std::string message) : code(code), message(std::move(message))
{
}
Status(CombinedError code, std::string error, std::string message) : code(code), error(error), message(message)
Status(CombinedError code, std::string error, std::string message)
: code(code), error(std::move(error)), message(std::move(message))
{
}
@@ -156,7 +158,7 @@ class InvalidParamsError : public std::exception
std::string msg;
public:
explicit InvalidParamsError(std::string const& msg) : msg(msg)
explicit InvalidParamsError(std::string msg) : msg(std::move(msg))
{
}
@@ -173,7 +175,7 @@ class AccountNotFoundError : public std::exception
std::string account;
public:
explicit AccountNotFoundError(std::string const& acct) : account(acct)
explicit AccountNotFoundError(std::string acct) : account(std::move(acct))
{
}

View File

@@ -37,9 +37,13 @@ make_WsContext(
{
boost::json::value commandValue = nullptr;
if (!request.contains("command") && request.contains("method"))
{
commandValue = request.at("method");
}
else if (request.contains("command") && !request.contains("method"))
{
commandValue = request.at("command");
}
if (!commandValue.is_string())
return Error{{ClioError::rpcCOMMAND_IS_MISSING, "Method/Command is not specified or is not a string."}};
@@ -48,7 +52,7 @@ make_WsContext(
if (!apiVersion)
return Error{{ClioError::rpcINVALID_API_VERSION, apiVersion.error()}};
string command = commandValue.as_string().c_str();
string const command = commandValue.as_string().c_str();
return web::Context(yc, command, *apiVersion, request, session, tagFactory, range, clientIp);
}
@@ -70,7 +74,7 @@ make_HttpContext(
if (request.at("method").as_string().empty())
return Error{{ClioError::rpcCOMMAND_IS_EMPTY}};
string command = request.at("method").as_string().c_str();
string const command = request.at("method").as_string().c_str();
if (command == "subscribe" || command == "unsubscribe")
return Error{{RippledError::rpcBAD_SYNTAX, "Subscribe and unsubscribe are only allowed or websocket."}};

View File

@@ -46,7 +46,7 @@
// forward declarations
namespace feed {
class SubscriptionManager;
}
} // namespace feed
namespace etl {
class LoadBalancer;
class ETLService;
@@ -149,13 +149,12 @@ public:
LOG(perfLog_.debug()) << ctx.tag() << " finish executing rpc `" << ctx.method << '`';
if (v)
return v->as_object();
else
{
return v->as_object();
}
notifyErrored(ctx.method);
return Status{v.error()};
}
}
catch (data::DatabaseTimeout const& t)
{
LOG(log_.error()) << "Database timeout";

View File

@@ -93,7 +93,9 @@ getDeliveredAmount(
// then its absence indicates that the amount delivered is listed in the
// Amount field. DeliveredAmount went live January 24, 2014.
// 446000000 is in Feb 2014, well after DeliveredAmount went live
if (ledgerSequence >= 4594095 || date > 446000000)
static constexpr std::uint32_t FIRST_LEDGER_WITH_DELIVERED_AMOUNT = 4594095;
static constexpr std::uint32_t DELIVERED_AMOUNT_LIVE_DATE = 446000000;
if (ledgerSequence >= FIRST_LEDGER_WITH_DELIVERED_AMOUNT || date > DELIVERED_AMOUNT_LIVE_DATE)
{
return txn->getFieldAmount(ripple::sfAmount);
}
@@ -133,13 +135,18 @@ accountFromStringStrict(std::string const& account)
std::optional<ripple::AccountID> result;
if (publicKey)
{
result = ripple::calcAccountID(*publicKey);
}
else
{
result = ripple::parseBase58<ripple::AccountID>(account);
}
if (result)
{
return result.value();
else
}
return {};
}
@@ -177,7 +184,7 @@ deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs, std::uint32_t s
{
auto [tx, meta] = deserializeTxPlusMeta(blobs);
std::shared_ptr<ripple::TxMeta> m = std::make_shared<ripple::TxMeta>(tx->getTransactionID(), seq, *meta);
std::shared_ptr<ripple::TxMeta> const m = std::make_shared<ripple::TxMeta>(tx->getTransactionID(), seq, *meta);
return {tx, m};
}
@@ -224,9 +231,13 @@ insertDeliveredAmount(
if (canHaveDeliveredAmount(txn, meta))
{
if (auto amt = getDeliveredAmount(txn, meta, meta->getLgrSeq(), date))
{
metaJson["delivered_amount"] = toBoostJson(amt->getJson(ripple::JsonOptions::include_date));
}
else
{
metaJson["delivered_amount"] = "unavailable";
}
return true;
}
return false;
@@ -330,10 +341,14 @@ ledgerInfoFromRequest(std::shared_ptr<data::BackendInterface const> const& backe
{
boost::json::string const& stringIndex = indexValue.as_string();
if (stringIndex == "validated")
{
ledgerSequence = ctx.range.maxSequence;
}
else
{
ledgerSequence = parseStringAsUInt(stringIndex.c_str());
}
}
else if (indexValue.is_int64())
ledgerSequence = indexValue.as_int64();
}
@@ -368,7 +383,7 @@ getLedgerInfoFromHashOrSeq(
{
// invoke uint256's constructor to parse the hex string , instead of
// copying buffer
ripple::uint256 ledgerHash256{std::string_view(*ledgerHash)};
ripple::uint256 const ledgerHash256{std::string_view(*ledgerHash)};
lgrInfo = backend.fetchLedgerByHash(ledgerHash256, yield);
if (!lgrInfo || lgrInfo->seq > maxSeq)
return err;
@@ -411,8 +426,10 @@ getStartHint(ripple::SLE const& sle, ripple::AccountID const& accountID)
if (sle.getType() == ripple::ltRIPPLE_STATE)
{
if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID)
{
return sle.getFieldU64(ripple::sfLowNode);
else if (sle.getFieldAmount(ripple::sfHighLimit).getIssuer() == accountID)
}
if (sle.getFieldAmount(ripple::sfHighLimit).getIssuer() == accountID)
return sle.getFieldU64(ripple::sfHighNode);
}
@@ -443,16 +460,17 @@ traverseNFTObjects(
return Status{RippledError::rpcINVALID_PARAMS, "Invalid marker."};
// no marker, start from the last page
ripple::uint256 currentPage = nextPage == beast::zero ? lastNFTPage.key : nextPage;
ripple::uint256 const currentPage = nextPage == beast::zero ? lastNFTPage.key : nextPage;
// read the current page
auto page = backend.fetchLedgerObject(currentPage, sequence, yield);
if (!page)
{
if (nextPage == beast::zero) // no nft objects in lastNFTPage
if (nextPage == beast::zero)
{ // no nft objects in lastNFTPage
return AccountCursor{beast::zero, 0};
else // marker is in the right range, but still invalid
} // marker is in the right range, but still invalid
return Status{RippledError::rpcINVALID_PARAMS, "Invalid marker."};
}
@@ -553,7 +571,8 @@ traverseOwnedNodes(
// Only reserve 2048 nodes when fetching all owned ledger objects. If there
// are more, then keys will allocate more memory, which is suboptimal, but
// should only occur occasionally.
keys.reserve(std::min(std::uint32_t{2048}, limit));
static constexpr std::uint32_t MIN_NODES = 2048;
keys.reserve(std::min(MIN_NODES, limit));
auto start = std::chrono::system_clock::now();
@@ -567,7 +586,7 @@ traverseOwnedNodes(
return Status(ripple::rpcINVALID_PARAMS, "Invalid marker.");
ripple::SerialIter hintDirIt{hintDir->data(), hintDir->size()};
ripple::SLE hintDirSle{hintDirIt, hintIndex.key};
ripple::SLE const hintDirSle{hintDirIt, hintIndex.key};
if (auto const& indexes = hintDirSle.getFieldV256(ripple::sfIndexes);
std::find(std::begin(indexes), std::end(indexes), hexMarker) == std::end(indexes))
@@ -586,7 +605,7 @@ traverseOwnedNodes(
return Status(ripple::rpcINVALID_PARAMS, "Owner directory not found.");
ripple::SerialIter ownedDirIt{ownerDir->data(), ownerDir->size()};
ripple::SLE ownedDirSle{ownedDirIt, currentIndex.key};
ripple::SLE const ownedDirSle{ownedDirIt, currentIndex.key};
for (auto const& key : ownedDirSle.getFieldV256(ripple::sfIndexes))
{
@@ -630,7 +649,7 @@ traverseOwnedNodes(
break;
ripple::SerialIter ownedDirIt{ownerDir->data(), ownerDir->size()};
ripple::SLE ownedDirSle{ownedDirIt, currentIndex.key};
ripple::SLE const ownedDirSle{ownedDirIt, currentIndex.key};
for (auto const& key : ownedDirSle.getFieldV256(ripple::sfIndexes))
{
@@ -703,8 +722,10 @@ parseRippleLibSeed(boost::json::value const& value)
auto const result = ripple::decodeBase58Token(value.as_string().c_str(), ripple::TokenType::None);
if (result.size() == 18 && static_cast<std::uint8_t>(result[0]) == std::uint8_t(0xE1) &&
static_cast<std::uint8_t>(result[1]) == std::uint8_t(0x4B))
static constexpr std::size_t SEED_SIZE = 18;
static constexpr std::array<std::uint8_t, 2> SEED_PREFIX = {0xE1, 0x4B};
if (result.size() == SEED_SIZE && static_cast<std::uint8_t>(result[0]) == SEED_PREFIX[0] &&
static_cast<std::uint8_t>(result[1]) == SEED_PREFIX[1])
return ripple::Seed(ripple::makeSlice(result.substr(2)));
return {};
@@ -720,9 +741,9 @@ keypairFromRequst(boost::json::object const& request)
static std::string const secretTypes[]{"passphrase", "secret", "seed", "seed_hex"};
// Identify which secret type is in use.
std::string secretType = "";
std::string secretType;
int count = 0;
for (auto t : secretTypes)
for (const auto& t : secretTypes)
{
if (request.contains(t))
{
@@ -750,7 +771,7 @@ keypairFromRequst(boost::json::object const& request)
if (!request.at("key_type").is_string())
return Status{RippledError::rpcINVALID_PARAMS, "keyTypeNotString"};
std::string key_type = request.at("key_type").as_string().c_str();
std::string const key_type = request.at("key_type").as_string().c_str();
keyType = ripple::keyTypeFromString(key_type);
if (!keyType)
@@ -788,17 +809,21 @@ keypairFromRequst(boost::json::object const& request)
if (!request.at(secretType).is_string())
return Status{RippledError::rpcINVALID_PARAMS, "secret value must be string"};
std::string key = request.at(secretType).as_string().c_str();
std::string const key = request.at(secretType).as_string().c_str();
if (secretType == "seed")
{
seed = ripple::parseBase58<ripple::Seed>(key);
}
else if (secretType == "passphrase")
{
seed = ripple::parseGenericSeed(key);
}
else if (secretType == "seed_hex")
{
ripple::uint128 s;
if (s.parseHex(key))
seed.emplace(ripple::Slice(s.data(), s.size()));
seed.emplace(ripple::Slice(s.data(), ripple::uint128::size()));
}
}
else
@@ -806,7 +831,7 @@ keypairFromRequst(boost::json::object const& request)
if (!request.at("secret").is_string())
return Status{RippledError::rpcINVALID_PARAMS, "field secret should be a string"};
std::string secret = request.at("secret").as_string().c_str();
std::string const secret = request.at("secret").as_string().c_str();
seed = ripple::parseGenericSeed(secret);
}
}
@@ -861,7 +886,7 @@ isGlobalFrozen(
return false;
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
return sle.isFlag(ripple::lsfGlobalFreeze);
}
@@ -885,7 +910,7 @@ isFrozen(
return false;
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
if (sle.isFlag(ripple::lsfGlobalFreeze))
return true;
@@ -899,7 +924,7 @@ isFrozen(
return false;
ripple::SerialIter issuerIt{blob->data(), blob->size()};
ripple::SLE issuerLine{issuerIt, key};
ripple::SLE const issuerLine{issuerIt, key};
auto frozen = (issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze;
@@ -924,7 +949,7 @@ xrpLiquid(
return beast::zero;
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
std::uint32_t const ownerCount = sle.getFieldU32(ripple::sfOwnerCount);
@@ -951,10 +976,8 @@ accountFunds(
{
return amount;
}
else
{
return accountHolds(backend, sequence, id, amount.getCurrency(), amount.getIssuer(), true, yield);
}
}
ripple::STAmount
@@ -983,7 +1006,7 @@ accountHolds(
}
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
if (zeroIfFrozen && isFrozen(backend, sequence, account, currency, issuer, yield))
{
@@ -1016,7 +1039,7 @@ transferRate(
if (blob)
{
ripple::SerialIter it{blob->data(), blob->size()};
ripple::SLE sle{it, key};
ripple::SLE const sle{it, key};
if (sle.isFieldPresent(ripple::sfTransferRate))
return ripple::Rate{sle.getFieldU32(ripple::sfTransferRate)};
@@ -1038,7 +1061,7 @@ postProcessOrderBook(
std::map<ripple::AccountID, ripple::STAmount> umBalance;
bool globalFreeze = isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) ||
bool const globalFreeze = isGlobalFrozen(backend, ledgerSequence, book.out.account, yield) ||
isGlobalFrozen(backend, ledgerSequence, book.in.account, yield);
auto rate = transferRate(backend, ledgerSequence, book.out.account, yield);
@@ -1048,8 +1071,8 @@ postProcessOrderBook(
try
{
ripple::SerialIter it{obj.blob.data(), obj.blob.size()};
ripple::SLE offer{it, obj.key};
ripple::uint256 bookDir = offer.getFieldH256(ripple::sfBookDirectory);
ripple::SLE const offer{it, obj.key};
ripple::uint256 const bookDir = offer.getFieldH256(ripple::sfBookDirectory);
auto const uOfferOwnerID = offer.getAccountID(ripple::sfAccount);
auto const& saTakerGets = offer.getFieldAmount(ripple::sfTakerGets);
@@ -1094,7 +1117,7 @@ postProcessOrderBook(
ripple::STAmount saTakerGetsFunded;
ripple::STAmount saOwnerFundsLimit = saOwnerFunds;
ripple::Rate offerRate = ripple::parityRate;
ripple::STAmount dirRate = ripple::amountFromQuality(getQuality(bookDir));
ripple::STAmount const dirRate = ripple::amountFromQuality(getQuality(bookDir));
if (rate != ripple::parityRate
// Have a tranfer fee.
@@ -1122,7 +1145,7 @@ postProcessOrderBook(
.getJson(ripple::JsonOptions::none));
}
ripple::STAmount saOwnerPays = (ripple::parityRate == offerRate)
ripple::STAmount const saOwnerPays = (ripple::parityRate == offerRate)
? saTakerGetsFunded
: std::min(saOwnerFunds, ripple::multiply(saTakerGetsFunded, offerRate));
@@ -1148,26 +1171,34 @@ std::variant<Status, ripple::Book>
parseBook(ripple::Currency pays, ripple::AccountID payIssuer, ripple::Currency gets, ripple::AccountID getIssuer)
{
if (isXRP(pays) && !isXRP(payIssuer))
{
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Unneeded field 'taker_pays.issuer' for XRP currency "
"specification."};
}
if (!isXRP(pays) && isXRP(payIssuer))
{
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Invalid field 'taker_pays.issuer', expected non-XRP "
"issuer."};
}
if (ripple::isXRP(gets) && !ripple::isXRP(getIssuer))
{
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Unneeded field 'taker_gets.issuer' for XRP currency "
"specification."};
}
if (!ripple::isXRP(gets) && ripple::isXRP(getIssuer))
{
return Status{
RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', expected non-XRP issuer."};
}
if (pays == gets && payIssuer == getIssuer)
return Status{RippledError::rpcBAD_MARKET, "badMarket"};
@@ -1202,9 +1233,11 @@ parseBook(boost::json::object const& request)
return Status{RippledError::rpcDST_AMT_MALFORMED};
if (!taker_gets.at("currency").is_string())
{
return Status{
RippledError::rpcDST_AMT_MALFORMED,
};
}
ripple::Currency pay_currency;
if (!ripple::to_currency(pay_currency, taker_pays.at("currency").as_string().c_str()))
@@ -1232,16 +1265,20 @@ parseBook(boost::json::object const& request)
}
if (isXRP(pay_currency) && !isXRP(pay_issuer))
{
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Unneeded field 'taker_pays.issuer' for XRP currency "
"specification."};
}
if (!isXRP(pay_currency) && isXRP(pay_issuer))
{
return Status{
RippledError::rpcSRC_ISR_MALFORMED,
"Invalid field 'taker_pays.issuer', expected non-XRP "
"issuer."};
}
if ((!isXRP(pay_currency)) && (!taker_pays.contains("issuer")))
return Status{RippledError::rpcSRC_ISR_MALFORMED, "Missing non-XRP issuer."};
@@ -1257,25 +1294,31 @@ parseBook(boost::json::object const& request)
return Status{RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', bad issuer."};
if (get_issuer == ripple::noAccount())
{
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Invalid field 'taker_gets.issuer', bad issuer account "
"one."};
}
}
else
{
get_issuer = ripple::xrpAccount();
}
if (ripple::isXRP(get_currency) && !ripple::isXRP(get_issuer))
{
return Status{
RippledError::rpcDST_ISR_MALFORMED,
"Unneeded field 'taker_gets.issuer' for XRP currency "
"specification."};
}
if (!ripple::isXRP(get_currency) && ripple::isXRP(get_issuer))
{
return Status{
RippledError::rpcDST_ISR_MALFORMED, "Invalid field 'taker_gets.issuer', expected non-XRP issuer."};
}
if (pay_currency == get_currency && pay_issuer == get_issuer)
return Status{RippledError::rpcBAD_MARKET, "badMarket"};
@@ -1304,7 +1347,7 @@ specifiesCurrentOrClosedLedger(boost::json::object const& request)
auto indexValue = request.at("ledger_index");
if (indexValue.is_string())
{
std::string index = indexValue.as_string().c_str();
std::string const index = indexValue.as_string().c_str();
return index == "current" || index == "closed";
}
}
@@ -1337,7 +1380,7 @@ isAmendmentEnabled(
// the amendments should always be present in ledger
auto const& amendments = backend->fetchLedgerObject(ripple::keylet::amendments().key, seq, yield);
ripple::SLE amendmentsSLE{
ripple::SLE const amendmentsSLE{
ripple::SerialIter{amendments->data(), amendments->size()}, ripple::keylet::amendments().key};
auto const listAmendments = amendmentsSLE.getFieldV256(ripple::sfAmendments);

View File

@@ -63,7 +63,7 @@ std::pair<std::shared_ptr<ripple::STTx const>, std::shared_ptr<ripple::TxMeta co
deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs, std::uint32_t seq);
std::pair<boost::json::object, boost::json::object>
toExpandedJson(data::TransactionAndMetadata const& blobs, NFTokenjson includeNFTIDs = NFTokenjson::DISABLE);
toExpandedJson(data::TransactionAndMetadata const& blobs, NFTokenjson nftEnabled = NFTokenjson::DISABLE);
bool
insertDeliveredAmount(
@@ -111,7 +111,7 @@ traverseOwnedNodes(
BackendInterface const& backend,
ripple::Keylet const& owner,
ripple::uint256 const& hexMarker,
std::uint32_t const startHint,
std::uint32_t startHint,
std::uint32_t sequence,
std::uint32_t limit,
boost::asio::yield_context yield,
@@ -210,7 +210,7 @@ std::variant<Status, ripple::Book>
parseBook(boost::json::object const& request);
std::variant<Status, ripple::AccountID>
parseTaker(boost::json::value const& request);
parseTaker(boost::json::value const& taker);
bool
specifiesCurrentOrClosedLedger(boost::json::object const& request);
@@ -231,7 +231,9 @@ logDuration(web::Context const& ctx, T const& dur)
{
using boost::json::serialize;
static util::Logger log{"RPC"};
static util::Logger const log{"RPC"};
static constexpr std::int64_t DURATION_ERROR_THRESHOLD_SECONDS = 10;
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
auto const seconds = std::chrono::duration_cast<std::chrono::seconds>(dur).count();
auto const msg = fmt::format(
@@ -239,10 +241,14 @@ logDuration(web::Context const& ctx, T const& dur)
millis,
serialize(util::removeSecret(ctx.params)));
if (seconds > 10)
if (seconds > DURATION_ERROR_THRESHOLD_SECONDS)
{
LOG(log.error()) << ctx.tag() << msg;
}
else if (seconds > 1)
{
LOG(log.warn()) << ctx.tag() << msg;
}
else
LOG(log.info()) << ctx.tag() << msg;
}

View File

@@ -67,7 +67,7 @@ public:
static WorkQueue
make_WorkQueue(util::Config const& config)
{
static util::Logger log{"RPC"};
static util::Logger const log{"RPC"};
auto const serverConfig = config.section("server");
auto const numThreads = config.valueOr<uint32_t>("workers", std::thread::hardware_concurrency());
auto const maxQueueSize = serverConfig.valueOr<uint32_t>("max_queue_size", 0); // 0 is no limit

View File

@@ -62,8 +62,10 @@ ValidateArrayAt::verify(boost::json::value& value, std::string_view key) const
auto& res = arr.at(idx_);
for (auto const& spec : specs_)
{
if (auto const ret = spec.process(res); not ret)
return Error{ret.error()};
}
return {};
}

View File

@@ -26,6 +26,8 @@
#include <fmt/core.h>
#include <utility>
namespace rpc::meta {
/**
@@ -100,9 +102,9 @@ public:
*/
template <SomeRequirement... Requirements>
IfType(Requirements&&... requirements)
{
processor_ = [... r = std::forward<Requirements>(requirements)](
boost::json::value& j, std::string_view key) -> MaybeError {
: processor_(
[... r = std::forward<Requirements>(
requirements)](boost::json::value& j, std::string_view key) -> MaybeError {
std::optional<Status> firstFailure = std::nullopt;
// the check logic is the same as fieldspec
@@ -120,7 +122,8 @@ public:
return Error{firstFailure.value()};
return {};
};
})
{
}
/**
@@ -160,7 +163,7 @@ public:
* @brief Constructs a validator that calls the given validator `req` and returns a custom error `err` in case `req`
* fails.
*/
WithCustomError(SomeRequirement req, Status err) : requirement{std::move(req)}, error{err}
WithCustomError(SomeRequirement req, Status err) : requirement{std::move(req)}, error{std::move(err)}
{
}

View File

@@ -85,8 +85,8 @@ struct ToLower final
* @param key The key used to retrieve the modified value from the outer object
* @return Possibly an error
*/
[[nodiscard]] MaybeError
modify(boost::json::value& value, std::string_view key) const
[[nodiscard]] static MaybeError
modify(boost::json::value& value, std::string_view key)
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return {}; // ignore. field does not exist, let 'required' fail instead

View File

@@ -33,8 +33,10 @@ FieldSpec::process(boost::json::value& value) const
RpcSpec::process(boost::json::value& value) const
{
for (auto const& field : fields_)
{
if (auto ret = field.process(value); not ret)
return Error{ret.error()};
}
return {};
}

View File

@@ -85,7 +85,7 @@ struct RpcSpec final
RpcSpec(const RpcSpec& other, std::initializer_list<FieldSpec> additionalFields) : fields_{other.fields_}
{
for (auto& f : additionalFields)
fields_.push_back(std::move(f));
fields_.push_back(f);
}
/**

View File

@@ -28,13 +28,15 @@
#include <boost/json/value.hpp>
#include <boost/json/value_from.hpp>
namespace etl {
class LoadBalancer;
} // namespace etl
namespace web {
struct ConnectionBase;
}
} // namespace web
namespace feed {
class SubscriptionManager;
}
} // namespace feed
namespace rpc {
@@ -100,7 +102,7 @@ using Result = std::variant<Status, boost::json::object>;
struct AccountCursor
{
ripple::uint256 index;
std::uint32_t hint;
std::uint32_t hint{};
std::string
toString() const

View File

@@ -29,7 +29,7 @@
namespace rpc::validation {
[[nodiscard]] MaybeError
Required::verify(boost::json::value const& value, std::string_view key) const
Required::verify(boost::json::value const& value, std::string_view key)
{
if (not value.is_object() or not value.as_object().contains(key.data()))
return Error{Status{RippledError::rpcINVALID_PARAMS, "Required field '" + std::string{key} + "' missing"}};
@@ -49,7 +49,7 @@ CustomValidator::verify(boost::json::value const& value, std::string_view key) c
[[nodiscard]] bool
checkIsU32Numeric(std::string_view sv)
{
uint32_t unused;
uint32_t unused = 0;
auto [_, ec] = std::from_chars(sv.data(), sv.data() + sv.size(), unused);
return ec == std::errc();
@@ -145,12 +145,14 @@ CustomValidator IssuerValidator =
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}', bad issuer.", key)}};
if (issuer == ripple::noAccount())
{
return Error{Status{
RippledError::rpcINVALID_PARAMS,
fmt::format(
"Invalid field '{}', bad issuer account "
"one.",
key)}};
}
return MaybeError{};
}};
@@ -185,7 +187,7 @@ CustomValidator SubscribeAccountsValidator =
if (!value.is_array())
return Error{Status{RippledError::rpcINVALID_PARAMS, std::string(key) + "NotArray"}};
if (value.as_array().size() == 0)
if (value.as_array().empty())
return Error{Status{RippledError::rpcACT_MALFORMED, std::string(key) + " malformed."}};
for (auto const& v : value.as_array())
@@ -195,7 +197,7 @@ CustomValidator SubscribeAccountsValidator =
obj[keyItem] = v;
if (auto const err = AccountValidator.verify(obj, keyItem); !err)
if (auto err = AccountValidator.verify(obj, keyItem); !err)
return err;
}

View File

@@ -25,6 +25,8 @@
#include <fmt/core.h>
#include <utility>
namespace rpc::validation {
/**
@@ -83,8 +85,8 @@ template <typename Expected>
*/
struct Required final
{
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const;
[[nodiscard]] static MaybeError
verify(boost::json::value const& value, std::string_view key);
};
/**
@@ -129,10 +131,12 @@ public:
using boost::json::value_to;
auto const res = value_to<T>(value.as_object().at(key.data()));
if (value_ == res)
{
return Error{Status{
RippledError::rpcNOT_SUPPORTED,
fmt::format("Not supported field '{}'s value '{}'", std::string{key}, res)}};
}
}
return {};
}
};
@@ -151,8 +155,8 @@ public:
* @param key The key used to retrieve the tested value from the outer object
* @return `RippledError::rpcNOT_SUPPORTED` if the field is found; otherwise no error is returned
*/
[[nodiscard]] MaybeError
verify(boost::json::value const& value, std::string_view key) const
[[nodiscard]] static MaybeError
verify(boost::json::value const& value, std::string_view key)
{
if (value.is_object() and value.as_object().contains(key.data()))
return Error{Status{RippledError::rpcNOT_SUPPORTED, "Not supported field '" + std::string{key}}};
@@ -340,7 +344,7 @@ public:
*
* @param original The original value to store
*/
explicit EqualTo(Type original) : original_{original}
explicit EqualTo(Type original) : original_{std::move(original)}
{
}

View File

@@ -32,8 +32,8 @@ public:
* @param ip The ip addr of the client
* @return true if authorized; false otherwise
*/
bool
isAdmin(std::string_view ip) const
static bool
isAdmin(std::string_view ip)
{
return ip == "127.0.0.1";
}

View File

@@ -80,10 +80,7 @@ public:
request.at("accounts").as_bool()));
};
if (checkAccountInfoForward() or checkLedgerForward())
return true;
return false;
return static_cast<bool>(checkAccountInfoForward() or checkLedgerForward());
}
Result
@@ -92,17 +89,16 @@ public:
auto toForward = ctx.params;
toForward["command"] = ctx.method;
if (auto const res = balancer_->forwardToRippled(toForward, ctx.clientIp, ctx.yield); not res)
auto const res = balancer_->forwardToRippled(toForward, ctx.clientIp, ctx.yield);
if (not res)
{
notifyFailedToForward(ctx.method);
return Status{RippledError::rpcFAILED_TO_FORWARD};
}
else
{
notifyForwarded(ctx.method);
return *res;
}
}
bool
isProxied(std::string const& method) const

View File

@@ -34,10 +34,10 @@ class LoadBalancer;
} // namespace etl
namespace rpc {
class Counters;
}
} // namespace rpc
namespace feed {
class SubscriptionManager;
}
} // namespace feed
namespace rpc::detail {

View File

@@ -50,16 +50,19 @@ struct DefaultProcessor final
// real handler is given expected Input, not json
if (!ret)
{
return Error{ret.error()}; // forward Status
else
}
return value_from(ret.value());
}
else if constexpr (SomeHandlerWithoutInput<HandlerType>)
{
// no input to pass, ignore the value
if (auto const ret = handler.process(ctx); not ret)
auto const ret = handler.process(ctx);
if (not ret)
{
return Error{ret.error()}; // forward Status
else
}
return value_from(ret.value());
}
else

View File

@@ -23,7 +23,7 @@
namespace rpc {
void
AccountChannelsHandler::addChannel(std::vector<ChannelResponse>& jsonChannels, ripple::SLE const& channelSle) const
AccountChannelsHandler::addChannel(std::vector<ChannelResponse>& jsonChannels, ripple::SLE const& channelSle)
{
ChannelResponse channel;
channel.channelID = ripple::to_string(channelSle.key());
@@ -128,10 +128,14 @@ tag_invoke(boost::json::value_to_tag<AccountChannelsHandler::Input>, boost::json
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}

View File

@@ -56,7 +56,7 @@ public:
std::string balance;
std::optional<std::string> publicKey;
std::optional<std::string> publicKeyHex;
uint32_t settleDelay;
uint32_t settleDelay{};
std::optional<uint32_t> expiration;
std::optional<uint32_t> cancelAfter;
std::optional<uint32_t> sourceTag;
@@ -68,10 +68,10 @@ public:
std::vector<ChannelResponse> channels;
std::string account;
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
// validated should be sent via framework
bool validated = true;
uint32_t limit;
uint32_t limit{};
std::optional<std::string> marker;
};
@@ -92,8 +92,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},
@@ -114,8 +114,8 @@ public:
process(Input input, Context const& ctx) const;
private:
void
addChannel(std::vector<ChannelResponse>& jsonLines, ripple::SLE const& line) const;
static void
addChannel(std::vector<ChannelResponse>& jsonChannels, ripple::SLE const& channelSle);
friend void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output);

View File

@@ -106,10 +106,14 @@ tag_invoke(boost::json::value_to_tag<AccountCurrenciesHandler::Input>, boost::js
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}

View File

@@ -44,7 +44,7 @@ public:
struct Output
{
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
std::set<std::string> receiveCurrencies;
std::set<std::string> sendCurrencies;
// validated should be sent via framework
@@ -65,8 +65,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},

View File

@@ -128,7 +128,7 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl
if (output.isClawbackEnabled)
{
lsFlags.push_back({"allowTrustLineClawback", ripple::lsfAllowTrustLineClawback});
lsFlags.emplace_back("allowTrustLineClawback", ripple::lsfAllowTrustLineClawback);
}
boost::json::object acctFlags;
@@ -146,10 +146,14 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl
std::back_inserter(signers),
[](auto const& signerList) { return toJson(signerList); });
if (output.apiVersion == 1)
{
jv.as_object()[JS(account_data)].as_object()[JS(signer_lists)] = std::move(signers);
}
else
{
jv.as_object()[JS(signer_lists)] = signers;
}
}
}
AccountInfoHandler::Input
@@ -170,10 +174,14 @@ tag_invoke(boost::json::value_to_tag<AccountInfoHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(signer_lists)))
input.signerLists = boost::json::value_to<JsonBool>(jsonObject.at(JS(signer_lists)));

View File

@@ -86,8 +86,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpecV1 = RpcSpec{
{JS(account), validation::AccountValidator},

View File

@@ -26,7 +26,7 @@ AccountLinesHandler::addLine(
std::vector<LineResponse>& lines,
ripple::SLE const& lineSle,
ripple::AccountID const& account,
std::optional<ripple::AccountID> const& peerAccount) const
std::optional<ripple::AccountID> const& peerAccount)
{
auto const flags = lineSle.getFieldU32(ripple::sfFlags);
auto const lowLimit = lineSle.getFieldAmount(ripple::sfLowLimit);
@@ -52,12 +52,12 @@ AccountLinesHandler::addLine(
if (not viewLowest)
balance.negate();
bool const lineAuth = flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool const lineAuthPeer = flags & (not viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth);
bool const lineNoRipple = flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool const lineNoRipplePeer = flags & (not viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple);
bool const lineFreeze = flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
bool const lineFreezePeer = flags & (not viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze);
bool const lineAuth = (flags & (viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth)) != 0u;
bool const lineAuthPeer = (flags & (not viewLowest ? ripple::lsfLowAuth : ripple::lsfHighAuth)) != 0u;
bool const lineNoRipple = (flags & (viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple)) != 0u;
bool const lineNoRipplePeer = (flags & (not viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple)) != 0u;
bool const lineFreeze = (flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze)) != 0u;
bool const lineFreezePeer = (flags & (not viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze)) != 0u;
ripple::STAmount const& saBalance = balance;
ripple::STAmount const& saLimit = lineLimit;
@@ -119,9 +119,13 @@ AccountLinesHandler::process(AccountLinesHandler::Input input, Context const& ct
if (input.ignoreDefault)
{
if (sle.getFieldAmount(ripple::sfLowLimit).getIssuer() == accountID)
ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfLowReserve);
{
ignore = ((sle.getFieldU32(ripple::sfFlags) & ripple::lsfLowReserve) == 0u);
}
else
ignore = !(sle.getFieldU32(ripple::sfFlags) & ripple::lsfHighReserve);
{
ignore = ((sle.getFieldU32(ripple::sfFlags) & ripple::lsfHighReserve) == 0u);
}
}
if (not ignore)
@@ -174,10 +178,14 @@ tag_invoke(boost::json::value_to_tag<AccountLinesHandler::Input>, boost::json::v
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}

View File

@@ -53,10 +53,10 @@ public:
std::string currency;
std::string limit;
std::string limitPeer;
uint32_t qualityIn;
uint32_t qualityOut;
bool noRipple;
bool noRipplePeer;
uint32_t qualityIn{};
uint32_t qualityOut{};
bool noRipple{};
bool noRipplePeer{};
std::optional<bool> authorized;
std::optional<bool> peerAuthorized;
std::optional<bool> freeze;
@@ -68,10 +68,10 @@ public:
std::string account;
std::vector<LineResponse> lines;
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
bool validated = true; // should be sent via framework
std::optional<std::string> marker;
uint32_t limit;
uint32_t limit{};
};
struct Input
@@ -92,8 +92,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account),
@@ -117,12 +117,12 @@ public:
process(Input input, Context const& ctx) const;
private:
void
static void
addLine(
std::vector<LineResponse>& lines,
ripple::SLE const& lineSle,
ripple::AccountID const& account,
std::optional<ripple::AccountID> const& peerAccount) const;
std::optional<ripple::AccountID> const& peerAccount);
private:
friend void

View File

@@ -76,7 +76,7 @@ AccountNFTsHandler::process(AccountNFTsHandler::Input input, Context const& ctx)
obj[SFS(sfNFTokenTaxon)] = ripple::nft::toUInt32(ripple::nft::getTaxon(nftokenID));
obj[JS(nft_serial)] = ripple::nft::getSerial(nftokenID);
if (std::uint16_t xferFee = {ripple::nft::getTransferFee(nftokenID)})
if (std::uint16_t const xferFee = {ripple::nft::getTransferFee(nftokenID)})
obj[SFS(sfTransferFee)] = xferFee;
}
@@ -132,10 +132,14 @@ tag_invoke(boost::json::value_to_tag<AccountNFTsHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(limit)))
input.limit = jsonObject.at(JS(limit)).as_int64();

View File

@@ -68,8 +68,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},

View File

@@ -149,10 +149,14 @@ tag_invoke(boost::json::value_to_tag<AccountObjectsHandler::Input>, boost::json:
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(type)))
input.type = AccountObjectsHandler::TYPESMAP.at(jv.at(JS(type)).as_string().c_str());

View File

@@ -54,9 +54,9 @@ public:
{
std::string account;
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
std::optional<std::string> marker;
uint32_t limit;
uint32_t limit{};
std::vector<ripple::SLE> accountObjects;
bool validated = true;
};
@@ -79,8 +79,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},

View File

@@ -22,7 +22,7 @@
namespace rpc {
void
AccountOffersHandler::addOffer(std::vector<Offer>& offers, ripple::SLE const& offerSle) const
AccountOffersHandler::addOffer(std::vector<Offer>& offers, ripple::SLE const& offerSle)
{
auto offer = AccountOffersHandler::Offer();
offer.takerPays = offerSle.getFieldAmount(ripple::sfTakerPays);
@@ -115,13 +115,17 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountOffersHan
auto const convertAmount = [&](const char* field, ripple::STAmount const& amount) {
if (amount.native())
{
jsonObject[field] = amount.getText();
}
else
{
jsonObject[field] = {
{JS(currency), ripple::to_string(amount.getCurrency())},
{JS(issuer), ripple::to_string(amount.getIssuer())},
{JS(value), amount.getText()},
};
}
};
convertAmount(JS(taker_pays), offer.takerPays);
@@ -143,10 +147,14 @@ tag_invoke(boost::json::value_to_tag<AccountOffersHandler::Input>, boost::json::
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(limit)))
input.limit = jsonObject.at(JS(limit)).as_int64();

View File

@@ -44,8 +44,8 @@ public:
struct Offer
{
uint32_t flags;
uint32_t seq;
uint32_t flags{};
uint32_t seq{};
ripple::STAmount takerGets;
ripple::STAmount takerPays;
std::string quality;
@@ -56,7 +56,7 @@ public:
{
std::string account;
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
std::vector<Offer> offers;
std::optional<std::string> marker;
// validated should be sent via framework
@@ -79,8 +79,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},
@@ -99,8 +99,8 @@ public:
process(Input input, Context const& ctx) const;
private:
void
addOffer(std::vector<Offer>& offers, ripple::SLE const& offerSle) const;
static void
addOffer(std::vector<Offer>& offers, ripple::SLE const& offerSle);
friend void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output);

View File

@@ -128,10 +128,14 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) con
// if forward, start at minIndex - 1, because the SQL query is exclusive, we need to include the 0 transaction
// index of minIndex
if (input.forward)
{
cursor = {minIndex - 1, std::numeric_limits<int32_t>::max()};
}
else
{
cursor = {maxIndex, std::numeric_limits<int32_t>::max()};
}
}
auto const limit = input.limit.value_or(LIMIT_DEFAULT);
auto const accountID = accountFromStringStrict(input.account);
@@ -156,7 +160,7 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) con
response.marker = std::nullopt;
break;
}
else if (txnPlusMeta.ledgerSequence > maxIndex && !input.forward)
if (txnPlusMeta.ledgerSequence > maxIndex && !input.forward)
{
LOG(log_.debug()) << "Skipping over transactions from incomplete ledger";
continue;
@@ -250,13 +254,19 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jsonObject.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jsonObject.at(JS(ledger_index)).as_string().c_str());
}
else
{
// could not get the latest validated ledger seq here, using this flag to indicate that
input.usingValidatedLedger = true;
}
}
if (jsonObject.contains(JS(binary)))
input.binary = boost::json::value_to<JsonBool>(jsonObject.at(JS(binary)));
@@ -268,9 +278,11 @@ tag_invoke(boost::json::value_to_tag<AccountTxHandler::Input>, boost::json::valu
input.limit = jsonObject.at(JS(limit)).as_int64();
if (jsonObject.contains(JS(marker)))
{
input.marker = AccountTxHandler::Marker{
jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(),
jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64()};
}
if (jsonObject.contains("tx_type"))
{

View File

@@ -57,8 +57,8 @@ public:
struct Output
{
std::string account;
uint32_t ledgerIndexMin;
uint32_t ledgerIndexMax;
uint32_t ledgerIndexMin{0};
uint32_t ledgerIndexMax{0};
std::optional<uint32_t> limit;
std::optional<Marker> marker;
// TODO: use a better type than json
@@ -90,8 +90,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpecForV1 = RpcSpec{
{JS(account), validation::Required{}, validation::AccountValidator},

View File

@@ -70,15 +70,19 @@ tag_invoke(boost::json::value_to_tag<BookChangesHandler::Input>, boost::json::va
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}
[[nodiscard]] boost::json::object const
[[nodiscard]] boost::json::object
computeBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions)
{
using boost::json::value_from;

View File

@@ -40,8 +40,8 @@ public:
struct Output
{
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerTime;
uint32_t ledgerIndex{};
uint32_t ledgerTime{};
std::vector<BookChange> bookChanges;
bool validated = true;
};
@@ -59,8 +59,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(ledger_hash), validation::Uint256HexStringValidator},

View File

@@ -84,10 +84,14 @@ tag_invoke(boost::json::value_to_tag<BookOffersHandler::Input>, boost::json::val
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
if (jsonObject.contains(JS(taker)))
input.taker = accountFromStringStrict(jv.at(JS(taker)).as_string().c_str());

View File

@@ -70,8 +70,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(taker_gets),

View File

@@ -63,7 +63,7 @@ DepositAuthorizedHandler::process(DepositAuthorizedHandler::Input input, Context
// Check destination for the DepositAuth flag.
// If that flag is not set then a deposit should be just fine.
if (sle.getFieldU32(ripple::sfFlags) & ripple::lsfDepositAuth)
if ((sle.getFieldU32(ripple::sfFlags) & ripple::lsfDepositAuth) != 0u)
{
// See if a preauthorization entry is in the ledger.
auto const depositPreauthKeylet = ripple::keylet::depositPreauth(*destinationAccountID, *sourceAccountID);
@@ -91,10 +91,14 @@ tag_invoke(boost::json::value_to_tag<DepositAuthorizedHandler::Input>, boost::js
if (jsonObject.contains(JS(ledger_index)))
{
if (!jsonObject.at(JS(ledger_index)).is_string())
{
input.ledgerIndex = jv.at(JS(ledger_index)).as_int64();
}
else if (jsonObject.at(JS(ledger_index)).as_string() != "validated")
{
input.ledgerIndex = std::stoi(jv.at(JS(ledger_index)).as_string().c_str());
}
}
return input;
}

View File

@@ -47,7 +47,7 @@ public:
std::string sourceAccount;
std::string destinationAccount;
std::string ledgerHash;
uint32_t ledgerIndex;
uint32_t ledgerIndex{};
// validated should be sent via framework
bool validated = true;
};
@@ -67,8 +67,8 @@ public:
{
}
RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion) const
static RpcSpecConstRef
spec([[maybe_unused]] uint32_t apiVersion)
{
static auto const rpcSpec = RpcSpec{
{JS(source_account), validation::Required{}, validation::AccountValidator},

Some files were not shown because too many files have changed in this diff Show More