Compare commits

..

10 Commits

Author SHA1 Message Date
Ed Hennis
35a07a4b81 Format to 80 chars 2026-02-17 12:48:35 -05:00
Ed Hennis
b854f09744 Format to 100 chars 2026-02-17 12:47:07 -05:00
Jingchen
36240116a5 refactor: Decouple app/tx from Application and Config (#6227)
This change decouples app/tx from `Application` and `Config` to clear the way to moving transactors to `libxrpl`.
2026-02-17 11:29:53 -05:00
Sergey Kuznetsov
958d8f3754 chore: Update clang-format to 21.1.8 (#6352) 2026-02-16 14:31:18 -05:00
Jingchen
ac0ad3627f refactor: Modularize HashRouter, Conditions, and OrderBookDB (#6226)
This change modularizes additional components by moving code to `libxrpl`.
2026-02-13 10:34:37 -05:00
nuxtreact
cd218346ff chore: Fix minor issues in comments (#6346) 2026-02-12 14:55:27 -05:00
Jingchen
5edd3566f7 refactor: Modularize the NetworkOPs interface (#6225)
This change moves the NetworkOPs interface into `libxrpl` and it leaves its implementation in `xrpld`.
2026-02-12 13:15:03 -05:00
Pratik Mankawde
11e8d1f8a2 chore: Fix gcov lib coverage build failure on macOS (#6350)
For coverage builds, we try to link against the `gcov` library (specific to the environment). But as macOS doesn't have this library and thus doesn't have the coverage tools to generate reports, the coverage builds on that platform were failing on linking.

We actually don't need to explicitly force this linking, as the `CodeCoverage` file already has correct detection logic (currently on lines 177-193), which is invoked when the `--coverage` flag is provided:
* AppleClang: Uses `xcrun -f llvm-cov` to set `GCOV_TOOL="llvm-cov gcov"`.
* Clang: Finds `llvm-cov` to set `GCOV_TOOL="llvm-cov gcov"`.
* GCC: Finds `gcov` to set `GCOV_TOOL="gcov"`.
The `GCOV_TOOL` is then passed to `gcovr` on line 416, so the correct tool is used for processing coverage data.

This change therefore removes the `gcov` suffix from lines 473 and 475 in the `CodeCoverage.cmake` file.
2026-02-12 06:11:26 -05:00
Jingchen
9f17d10348 refactor: Modularize RelationalDB (#6224)
The rdb module was not properly designed, which is fixed in this change. The module had three classes:
1) The abstract class `RelationalDB`.
2) The abstract class `SQLiteDatabase`, which inherited from `RelationalDB` and added some pure virtual methods.
3) The concrete class `SQLiteDatabaseImp`, which inherited from `SQLiteDatabase` and implemented all methods.

The updated code simplifies this as follows:
* The `SQLiteDatabaseImp` has become `SQLiteDatabase`, and
* The former `SQLiteDatabase `has merged with `RelationalDatabase`.
2026-02-11 16:22:01 +00:00
Jingchen
ef284692db refactor: Modularize WalletDB and Manifest (#6223)
This change modularizes the `WalletDB` and `Manifest`. Note that the wallet db has nothing to do with account wallets and it stores node configuration, which is why it depends on the manifest code.
2026-02-11 13:42:31 +00:00
1152 changed files with 69538 additions and 146431 deletions

View File

@@ -37,7 +37,7 @@ BinPackParameters: false
BreakBeforeBinaryOperators: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
ColumnLimit: 80
CommentPragmas: "^ IWYU pragma:"
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4

View File

@@ -4,14 +4,11 @@ Loop: test.jtx test.toplevel
Loop: test.jtx test.unit_test
test.unit_test == test.jtx
Loop: xrpld.app xrpld.core
xrpld.app > xrpld.core
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app
xrpld.overlay ~= xrpld.app
Loop: xrpld.app xrpld.peerfinder
xrpld.peerfinder ~= xrpld.app
xrpld.peerfinder == xrpld.app
Loop: xrpld.app xrpld.rpc
xrpld.rpc > xrpld.app

View File

@@ -1,4 +1,6 @@
libxrpl.basics > xrpl.basics
libxrpl.conditions > xrpl.basics
libxrpl.conditions > xrpl.conditions
libxrpl.core > xrpl.basics
libxrpl.core > xrpl.core
libxrpl.crypto > xrpl.basics
@@ -17,12 +19,15 @@ libxrpl.nodestore > xrpl.protocol
libxrpl.protocol > xrpl.basics
libxrpl.protocol > xrpl.json
libxrpl.protocol > xrpl.protocol
libxrpl.rdb > xrpl.basics
libxrpl.rdb > xrpl.rdb
libxrpl.resource > xrpl.basics
libxrpl.resource > xrpl.json
libxrpl.resource > xrpl.resource
libxrpl.server > xrpl.basics
libxrpl.server > xrpl.json
libxrpl.server > xrpl.protocol
libxrpl.server > xrpl.rdb
libxrpl.server > xrpl.server
libxrpl.shamap > xrpl.basics
libxrpl.shamap > xrpl.protocol
@@ -41,7 +46,9 @@ test.app > xrpl.json
test.app > xrpl.ledger
test.app > xrpl.nodestore
test.app > xrpl.protocol
test.app > xrpl.rdb
test.app > xrpl.resource
test.app > xrpl.server
test.basics > test.jtx
test.basics > test.unit_test
test.basics > xrpl.basics
@@ -51,7 +58,7 @@ test.basics > xrpl.json
test.basics > xrpl.protocol
test.beast > xrpl.basics
test.conditions > xrpl.basics
test.conditions > xrpld.conditions
test.conditions > xrpl.conditions
test.consensus > test.csf
test.consensus > test.toplevel
test.consensus > test.unit_test
@@ -67,6 +74,7 @@ test.core > xrpl.basics
test.core > xrpl.core
test.core > xrpld.core
test.core > xrpl.json
test.core > xrpl.rdb
test.core > xrpl.server
test.csf > xrpl.basics
test.csf > xrpld.consensus
@@ -75,6 +83,7 @@ test.csf > xrpl.protocol
test.json > test.jtx
test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpl.core
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.rpc
@@ -95,8 +104,8 @@ test.nodestore > test.jtx
test.nodestore > test.toplevel
test.nodestore > test.unit_test
test.nodestore > xrpl.basics
test.nodestore > xrpld.core
test.nodestore > xrpl.nodestore
test.nodestore > xrpl.rdb
test.overlay > test.jtx
test.overlay > test.toplevel
test.overlay > test.unit_test
@@ -131,6 +140,7 @@ test.rpc > xrpld.rpc
test.rpc > xrpl.json
test.rpc > xrpl.protocol
test.rpc > xrpl.resource
test.rpc > xrpl.server
test.server > test.jtx
test.server > test.toplevel
test.server > test.unit_test
@@ -151,40 +161,52 @@ test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.conditions > xrpl.basics
xrpl.conditions > xrpl.protocol
xrpl.core > xrpl.basics
xrpl.core > xrpl.json
xrpl.core > xrpl.ledger
xrpl.core > xrpl.protocol
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol
xrpl.ledger > xrpl.server
xrpl.net > xrpl.basics
xrpl.nodestore > xrpl.basics
xrpl.nodestore > xrpl.protocol
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
xrpl.rdb > xrpl.basics
xrpl.rdb > xrpl.core
xrpl.rdb > xrpl.protocol
xrpl.resource > xrpl.basics
xrpl.resource > xrpl.json
xrpl.resource > xrpl.protocol
xrpl.server > xrpl.basics
xrpl.server > xrpl.core
xrpl.server > xrpl.json
xrpl.server > xrpl.protocol
xrpl.server > xrpl.rdb
xrpl.server > xrpl.resource
xrpl.server > xrpl.shamap
xrpl.shamap > xrpl.basics
xrpl.shamap > xrpl.nodestore
xrpl.shamap > xrpl.protocol
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpl.conditions
xrpld.app > xrpl.core
xrpld.app > xrpld.conditions
xrpld.app > xrpld.consensus
xrpld.app > xrpld.core
xrpld.app > xrpl.json
xrpld.app > xrpl.ledger
xrpld.app > xrpl.net
xrpld.app > xrpl.nodestore
xrpld.app > xrpl.protocol
xrpld.app > xrpl.rdb
xrpld.app > xrpl.resource
xrpld.app > xrpl.server
xrpld.app > xrpl.shamap
xrpld.conditions > xrpl.basics
xrpld.conditions > xrpl.protocol
xrpld.consensus > xrpl.basics
xrpld.consensus > xrpl.json
xrpld.consensus > xrpl.protocol
@@ -193,17 +215,20 @@ xrpld.core > xrpl.core
xrpld.core > xrpl.json
xrpld.core > xrpl.net
xrpld.core > xrpl.protocol
xrpld.core > xrpl.rdb
xrpld.overlay > xrpl.basics
xrpld.overlay > xrpl.core
xrpld.overlay > xrpld.core
xrpld.overlay > xrpld.peerfinder
xrpld.overlay > xrpl.json
xrpld.overlay > xrpl.protocol
xrpld.overlay > xrpl.rdb
xrpld.overlay > xrpl.resource
xrpld.overlay > xrpl.server
xrpld.peerfinder > xrpl.basics
xrpld.peerfinder > xrpld.core
xrpld.peerfinder > xrpl.protocol
xrpld.peerfinder > xrpl.rdb
xrpld.perflog > xrpl.basics
xrpld.perflog > xrpl.core
xrpld.perflog > xrpld.rpc
@@ -216,6 +241,7 @@ xrpld.rpc > xrpl.ledger
xrpld.rpc > xrpl.net
xrpld.rpc > xrpl.nodestore
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.rdb
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server
xrpld.shamap > xrpl.shamap

3
.gitignore vendored
View File

@@ -71,3 +71,6 @@ DerivedData
/.augment
/.claude
/CLAUDE.md
# clangd cache
/.cache

View File

@@ -20,7 +20,7 @@ repos:
args: [--assume-in-merge]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8
hooks:
- id: clang-format
args: [--style=file]

View File

@@ -103,7 +103,6 @@ find_package(OpenSSL REQUIRED)
find_package(secp256k1 REQUIRED)
find_package(SOCI REQUIRED)
find_package(SQLite3 REQUIRED)
find_package(wasmi REQUIRED)
find_package(xxHash REQUIRED)
target_link_libraries(

View File

@@ -219,7 +219,7 @@ coherent rather than a set of _thou shalt not_ commandments.
## Formatting
All code must conform to `clang-format` version 18,
All code must conform to `clang-format` version 21,
according to the settings in [`.clang-format`](./.clang-format),
unless the result would be unreasonably difficult to read or maintain.
To demarcate lines that should be left as-is, surround them with comments like

View File

@@ -466,11 +466,6 @@ function (add_code_coverage_to_target name scope)
target_compile_options(${name} ${scope} $<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_link_libraries(
${name}
${scope}
$<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS}
gcov>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS}
gcov>)
target_link_libraries(${name} ${scope} $<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS}>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS}>)
endfunction () # add_code_coverage_to_target

View File

@@ -45,7 +45,6 @@ target_link_libraries(
Xrpl::opts
Xrpl::syslibs
secp256k1::secp256k1
wasmi::wasmi
xrpl.libpb
xxHash::xxhash
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>)
@@ -85,9 +84,6 @@ add_module(xrpl net)
target_link_libraries(xrpl.libxrpl.net PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol
xrpl.libxrpl.resource)
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
add_module(xrpl nodestore)
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
@@ -95,8 +91,25 @@ add_module(xrpl shamap)
target_link_libraries(xrpl.libxrpl.shamap PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.crypto xrpl.libxrpl.protocol
xrpl.libxrpl.nodestore)
add_module(xrpl rdb)
target_link_libraries(xrpl.libxrpl.rdb PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.core)
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol xrpl.libxrpl.core xrpl.libxrpl.rdb
xrpl.libxrpl.resource)
add_module(xrpl conditions)
target_link_libraries(xrpl.libxrpl.conditions PUBLIC xrpl.libxrpl.server)
add_module(xrpl ledger)
target_link_libraries(xrpl.libxrpl.ledger PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
target_link_libraries(
xrpl.libxrpl.ledger
PUBLIC xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
xrpl.libxrpl.rdb
xrpl.libxrpl.server
xrpl.libxrpl.conditions)
add_library(xrpl.libxrpl)
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
@@ -111,16 +124,18 @@ target_link_modules(
PUBLIC
basics
beast
conditions
core
crypto
json
ledger
net
nodestore
protocol
rdb
resource
server
nodestore
shamap
net
ledger)
shamap)
# All headers in libxrpl are in modules.
# Uncomment this stanza if you have not yet moved new headers into a module.

View File

@@ -20,9 +20,11 @@ install(TARGETS common
xrpl.libxrpl
xrpl.libxrpl.basics
xrpl.libxrpl.beast
xrpl.libxrpl.conditions
xrpl.libxrpl.core
xrpl.libxrpl.crypto
xrpl.libxrpl.json
xrpl.libxrpl.rdb
xrpl.libxrpl.ledger
xrpl.libxrpl.net
xrpl.libxrpl.nodestore

View File

@@ -3,7 +3,6 @@
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
"wasmi/1.0.6#407c9db14601a8af1c7dd3b388f3e4cd%1768164779.349",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1765850147.878",

View File

@@ -35,7 +35,6 @@ class Xrpl(ConanFile):
"openssl/3.5.5",
"secp256k1/0.7.1",
"soci/4.0.3",
"wasmi/1.0.6",
"zlib/1.3.1",
]
@@ -216,7 +215,6 @@ class Xrpl(ConanFile):
"soci::soci",
"secp256k1::secp256k1",
"sqlite3::sqlite",
"wasmi::wasmi",
"xxhash::xxhash",
"zlib::zlib",
]

View File

@@ -6,8 +6,6 @@ ignorePaths:
- docs/**/*.puml
- cmake/**
- LICENSE.md
- src/test/app/wasm_fixtures/**/*.wat
- src/test/app/wasm_fixtures/*.c
language: en
allowCompoundWords: true # TODO (#6334)
ignoreRandomStrings: true
@@ -61,7 +59,6 @@ words:
- Britto
- Btrfs
- canonicality
- cdylib
- changespq
- checkme
- choco
@@ -290,7 +287,6 @@ words:
- venv
- vfalco
- vinnie
- wasmi
- wextra
- wptr
- writeme

View File

@@ -17,8 +17,8 @@ guideline is to maintain the standards that are used in those libraries.
## Guidelines
If you want to do something contrary to these guidelines, understand
why you're doing it. Think, use common sense, and consider that this
your changes will probably need to be maintained long after you've
why you're doing it. Think, use common sense, and consider that these
changes will probably need to be maintained long after you've
moved on to other projects.
- Use white space and blank lines to guide the eye and keep your intent clear.

View File

@@ -12,6 +12,8 @@ namespace xrpl {
@throws runtime_error
*/
void
extractTarLz4(boost::filesystem::path const& src, boost::filesystem::path const& dst);
extractTarLz4(
boost::filesystem::path const& src,
boost::filesystem::path const& dst);
} // namespace xrpl

View File

@@ -13,7 +13,8 @@
namespace xrpl {
using IniFileSections = std::unordered_map<std::string, std::vector<std::string>>;
using IniFileSections =
std::unordered_map<std::string, std::vector<std::string>>;
//------------------------------------------------------------------------------
@@ -84,7 +85,8 @@ public:
if (lines_.empty())
return "";
if (lines_.size() > 1)
Throw<std::runtime_error>("A legacy value must have exactly one line. Section: " + name_);
Throw<std::runtime_error>(
"A legacy value must have exactly one line. Section: " + name_);
return lines_[0];
}
@@ -230,7 +232,10 @@ public:
The previous value, if any, is overwritten.
*/
void
overwrite(std::string const& section, std::string const& key, std::string const& value);
overwrite(
std::string const& section,
std::string const& key,
std::string const& value);
/** Remove all the key/value pairs from the section.
*/
@@ -268,7 +273,9 @@ public:
bool
had_trailing_comments() const
{
return std::any_of(map_.cbegin(), map_.cend(), [](auto s) { return s.second.had_trailing_comments(); });
return std::any_of(map_.cbegin(), map_.cend(), [](auto s) {
return s.second.had_trailing_comments();
});
}
protected:
@@ -307,7 +314,10 @@ set(T& target, std::string const& name, Section const& section)
*/
template <class T>
bool
set(T& target, T const& defaultValue, std::string const& name, Section const& section)
set(T& target,
T const& defaultValue,
std::string const& name,
Section const& section)
{
bool found_and_valid = set<T>(target, name, section);
if (!found_and_valid)
@@ -322,7 +332,9 @@ set(T& target, T const& defaultValue, std::string const& name, Section const& se
// NOTE This routine might be more clumsy than the previous two
template <class T = std::string>
T
get(Section const& section, std::string const& name, T const& defaultValue = T{})
get(Section const& section,
std::string const& name,
T const& defaultValue = T{})
{
try
{

View File

@@ -24,7 +24,8 @@ public:
Buffer() = default;
/** Create an uninitialized buffer with the given size. */
explicit Buffer(std::size_t size) : p_(size ? new std::uint8_t[size] : nullptr), size_(size)
explicit Buffer(std::size_t size)
: p_(size ? new std::uint8_t[size] : nullptr), size_(size)
{
}
@@ -60,7 +61,8 @@ public:
/** Move-construct.
The other buffer is reset.
*/
Buffer(Buffer&& other) noexcept : p_(std::move(other.p_)), size_(other.size_)
Buffer(Buffer&& other) noexcept
: p_(std::move(other.p_)), size_(other.size_)
{
other.size_ = 0;
}
@@ -91,7 +93,8 @@ public:
{
// Ensure the slice isn't a subset of the buffer.
XRPL_ASSERT(
s.size() == 0 || size_ == 0 || s.data() < p_.get() || s.data() >= p_.get() + size_,
s.size() == 0 || size_ == 0 || s.data() < p_.get() ||
s.data() >= p_.get() + size_,
"xrpl::Buffer::operator=(Slice) : input not a subset");
if (auto p = alloc(s.size()))

View File

@@ -35,7 +35,10 @@ lz4Compress(void const* in, std::size_t inSize, BufferFactory&& bf)
auto compressed = bf(outCapacity);
auto compressedSize = LZ4_compress_default(
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(compressed), inSize, outCapacity);
reinterpret_cast<char const*>(in),
reinterpret_cast<char*>(compressed),
inSize,
outCapacity);
if (compressedSize == 0)
Throw<std::runtime_error>("lz4 compress: failed");
@@ -66,8 +69,10 @@ lz4Decompress(
Throw<std::runtime_error>("lz4Decompress: integer overflow (output)");
if (LZ4_decompress_safe(
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(decompressed), inSize, decompressedSize) !=
decompressedSize)
reinterpret_cast<char const*>(in),
reinterpret_cast<char*>(decompressed),
inSize,
decompressedSize) != decompressedSize)
Throw<std::runtime_error>("lz4Decompress: failed");
return decompressedSize;
@@ -83,7 +88,11 @@ lz4Decompress(
*/
template <typename InputStream>
std::size_t
lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, std::size_t decompressedSize)
lz4Decompress(
InputStream& in,
std::size_t inSize,
std::uint8_t* decompressed,
std::size_t decompressedSize)
{
std::vector<std::uint8_t> compressed;
std::uint8_t const* chunk = nullptr;
@@ -106,7 +115,9 @@ lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, s
compressed.resize(inSize);
}
chunkSize = chunkSize < (inSize - copiedInSize) ? chunkSize : (inSize - copiedInSize);
chunkSize = chunkSize < (inSize - copiedInSize)
? chunkSize
: (inSize - copiedInSize);
std::copy(chunk, chunk + chunkSize, compressed.data() + copiedInSize);
@@ -123,7 +134,8 @@ lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, s
if (in.ByteCount() > (currentBytes + copiedInSize))
in.BackUp(in.ByteCount() - currentBytes - copiedInSize);
if ((copiedInSize == 0 && chunkSize < inSize) || (copiedInSize > 0 && copiedInSize != inSize))
if ((copiedInSize == 0 && chunkSize < inSize) ||
(copiedInSize > 0 && copiedInSize != inSize))
Throw<std::runtime_error>("lz4 decompress: insufficient input size");
return lz4Decompress(chunk, inSize, decompressed, decompressedSize);

View File

@@ -55,7 +55,9 @@ private:
if (m_value != value_type())
{
std::size_t elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - m_when).count();
std::size_t elapsed =
std::chrono::duration_cast<std::chrono::seconds>(now - m_when)
.count();
// A span larger than four times the window decays the
// value to an insignificant amount so just reset it.

View File

@@ -107,20 +107,23 @@ Unexpected(E (&)[N]) -> Unexpected<E const*>;
// Definition of Expected. All of the machinery comes from boost::result.
template <class T, class E>
class [[nodiscard]] Expected : private boost::outcome_v2::result<T, E, detail::throw_policy>
class [[nodiscard]] Expected
: private boost::outcome_v2::result<T, E, detail::throw_policy>
{
using Base = boost::outcome_v2::result<T, E, detail::throw_policy>;
public:
template <typename U>
requires std::convertible_to<U, T>
constexpr Expected(U&& r) : Base(boost::outcome_v2::in_place_type_t<T>{}, std::forward<U>(r))
constexpr Expected(U&& r)
: Base(boost::outcome_v2::in_place_type_t<T>{}, std::forward<U>(r))
{
}
template <typename U>
requires std::convertible_to<U, E> && (!std::is_reference_v<U>)
constexpr Expected(Unexpected<U> e) : Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
constexpr Expected(Unexpected<U> e)
: Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
{
}
@@ -191,7 +194,8 @@ public:
// Specialization of Expected<void, E>. Allows returning either success
// (without a value) or the reason for the failure.
template <class E>
class [[nodiscard]] Expected<void, E> : private boost::outcome_v2::result<void, E, detail::throw_policy>
class [[nodiscard]] Expected<void, E>
: private boost::outcome_v2::result<void, E, detail::throw_policy>
{
using Base = boost::outcome_v2::result<void, E, detail::throw_policy>;

View File

@@ -14,6 +14,9 @@ getFileContents(
std::optional<std::size_t> maxSize = std::nullopt);
void
writeFileContents(boost::system::error_code& ec, boost::filesystem::path const& destPath, std::string const& contents);
writeFileContents(
boost::system::error_code& ec,
boost::filesystem::path const& destPath,
std::string const& contents);
} // namespace xrpl

View File

@@ -44,8 +44,8 @@ struct SharedIntrusiveAdoptNoIncrementTag
//
template <class T>
concept CAdoptTag =
std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> || std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
//------------------------------------------------------------------------------
@@ -121,7 +121,9 @@ public:
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs);
SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs);
/** Create a new SharedIntrusive by statically casting the pointer
controlled by the rhs param.
@@ -133,7 +135,9 @@ public:
controlled by the rhs param.
*/
template <class TT>
SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs);
SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs);
/** Create a new SharedIntrusive by dynamically casting the pointer
controlled by the rhs param.
@@ -299,7 +303,9 @@ class SharedWeakUnion
// Tagged pointer. Low bit determines if this is a strong or a weak
// pointer. The low bit must be masked to zero when converting back to a
// pointer. If the low bit is '1', this is a weak pointer.
static_assert(alignof(T) >= 2, "Bad alignment: Combo pointer requires low bit to be zero");
static_assert(
alignof(T) >= 2,
"Bad alignment: Combo pointer requires low bit to be zero");
public:
SharedWeakUnion() = default;
@@ -443,7 +449,9 @@ make_SharedIntrusive(Args&&... args)
auto p = new TT(std::forward<Args>(args)...);
static_assert(
noexcept(SharedIntrusive<TT>(std::declval<TT*>(), std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
noexcept(SharedIntrusive<TT>(
std::declval<TT*>(),
std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
"SharedIntrusive constructor should not throw or this can leak "
"memory");

View File

@@ -11,7 +11,9 @@ template <class T>
template <CAdoptTag TAdoptTag>
SharedIntrusive<T>::SharedIntrusive(T* p, TAdoptTag) noexcept : ptr_{p}
{
if constexpr (std::is_same_v<TAdoptTag, SharedIntrusiveAdoptIncrementStrongTag>)
if constexpr (std::is_same_v<
TAdoptTag,
SharedIntrusiveAdoptIncrementStrongTag>)
{
if (p)
p->addStrongRef();
@@ -43,14 +45,16 @@ SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT> const& rhs)
}
template <class T>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs) : ptr_{rhs.unsafeExchange(nullptr)}
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive&& rhs)
: ptr_{rhs.unsafeExchange(nullptr)}
{
}
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs) : ptr_{rhs.unsafeExchange(nullptr)}
SharedIntrusive<T>::SharedIntrusive(SharedIntrusive<TT>&& rhs)
: ptr_{rhs.unsafeExchange(nullptr)}
{
}
template <class T>
@@ -107,7 +111,9 @@ requires std::convertible_to<TT*, T*>
SharedIntrusive<T>&
SharedIntrusive<T>::operator=(SharedIntrusive<TT>&& rhs)
{
static_assert(!std::is_same_v<T, TT>, "This overload should not be instantiated for T == TT");
static_assert(
!std::is_same_v<T, TT>,
"This overload should not be instantiated for T == TT");
unsafeReleaseAndStore(rhs.unsafeExchange(nullptr));
return *this;
@@ -132,7 +138,9 @@ template <CAdoptTag TAdoptTag>
void
SharedIntrusive<T>::adopt(T* p)
{
if constexpr (std::is_same_v<TAdoptTag, SharedIntrusiveAdoptIncrementStrongTag>)
if constexpr (std::is_same_v<
TAdoptTag,
SharedIntrusiveAdoptIncrementStrongTag>)
{
if (p)
p->addStrongRef();
@@ -148,7 +156,9 @@ SharedIntrusive<T>::~SharedIntrusive()
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs)
SharedIntrusive<T>::SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs)
: ptr_{[&] {
auto p = static_cast<T*>(rhs.unsafeGetRawPtr());
if (p)
@@ -160,14 +170,18 @@ SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusiv
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(StaticCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs)
SharedIntrusive<T>::SharedIntrusive(
StaticCastTagSharedIntrusive,
SharedIntrusive<TT>&& rhs)
: ptr_{static_cast<T*>(rhs.unsafeExchange(nullptr))}
{
}
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT> const& rhs)
SharedIntrusive<T>::SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT> const& rhs)
: ptr_{[&] {
auto p = dynamic_cast<T*>(rhs.unsafeGetRawPtr());
if (p)
@@ -179,7 +193,9 @@ SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusi
template <class T>
template <class TT>
SharedIntrusive<T>::SharedIntrusive(DynamicCastTagSharedIntrusive, SharedIntrusive<TT>&& rhs)
SharedIntrusive<T>::SharedIntrusive(
DynamicCastTagSharedIntrusive,
SharedIntrusive<TT>&& rhs)
{
// This can be simplified without the `exchange`, but the `exchange` is kept
// in anticipation of supporting atomic operations.
@@ -208,7 +224,8 @@ SharedIntrusive<T>::operator->() const noexcept
}
template <class T>
SharedIntrusive<T>::operator bool() const noexcept
SharedIntrusive<T>::
operator bool() const noexcept
{
return bool(unsafeGetRawPtr());
}
@@ -298,7 +315,8 @@ WeakIntrusive<T>::WeakIntrusive(WeakIntrusive&& rhs) : ptr_{rhs.ptr_}
}
template <class T>
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs) : ptr_{rhs.unsafeGetRawPtr()}
WeakIntrusive<T>::WeakIntrusive(SharedIntrusive<T> const& rhs)
: ptr_{rhs.unsafeGetRawPtr()}
{
if (ptr_)
ptr_->addWeakRef();
@@ -503,7 +521,8 @@ SharedWeakUnion<T>::getStrong() const
}
template <class T>
SharedWeakUnion<T>::operator bool() const noexcept
SharedWeakUnion<T>::
operator bool() const noexcept
{
return bool(get());
}

View File

@@ -159,19 +159,22 @@ private:
See description of the `refCounts` field for a fuller description of
this field.
*/
static constexpr FieldType partialDestroyStartedMask = (one << (FieldTypeBits - 1));
static constexpr FieldType partialDestroyStartedMask =
(one << (FieldTypeBits - 1));
/** Flag that is set when the partialDestroy function has finished running
See description of the `refCounts` field for a fuller description of
this field.
*/
static constexpr FieldType partialDestroyFinishedMask = (one << (FieldTypeBits - 2));
static constexpr FieldType partialDestroyFinishedMask =
(one << (FieldTypeBits - 2));
/** Mask that will zero out all the `count` bits and leave the tag bits
unchanged.
*/
static constexpr FieldType tagMask = partialDestroyStartedMask | partialDestroyFinishedMask;
static constexpr FieldType tagMask =
partialDestroyStartedMask | partialDestroyFinishedMask;
/** Mask that will zero out the `tag` bits and leave the count bits
unchanged.
@@ -180,11 +183,13 @@ private:
/** Mask that will zero out everything except the strong count.
*/
static constexpr FieldType strongMask = ((one << StrongCountNumBits) - 1) & valueMask;
static constexpr FieldType strongMask =
((one << StrongCountNumBits) - 1) & valueMask;
/** Mask that will zero out everything except the weak count.
*/
static constexpr FieldType weakMask = (((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
static constexpr FieldType weakMask =
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
/** Unpack the count and tag fields from the packed atomic integer form. */
struct RefCountPair
@@ -209,8 +214,10 @@ private:
FieldType
combinedValue() const noexcept;
static constexpr CountType maxStrongValue = static_cast<CountType>((one << StrongCountNumBits) - 1);
static constexpr CountType maxWeakValue = static_cast<CountType>((one << WeakCountNumBits) - 1);
static constexpr CountType maxStrongValue =
static_cast<CountType>((one << StrongCountNumBits) - 1);
static constexpr CountType maxWeakValue =
static_cast<CountType>((one << WeakCountNumBits) - 1);
/** Put an extra margin to detect when running up against limits.
This is only used in debug code, and is useful if we reduce the
number of bits in the strong and weak counts (to 16 and 14 bits).
@@ -266,7 +273,8 @@ IntrusiveRefCounts::releaseStrongRef() const
}
}
if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
if (refCounts.compare_exchange_weak(
prevIntVal, nextIntVal, std::memory_order_acq_rel))
{
// Can't be in partial destroy because only decrementing the strong
// count to zero can start a partial destroy, and that can't happen
@@ -322,7 +330,8 @@ IntrusiveRefCounts::addWeakReleaseStrongRef() const
action = partialDestroy;
}
}
if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
if (refCounts.compare_exchange_weak(
prevIntVal, nextIntVal, std::memory_order_acq_rel))
{
XRPL_ASSERT(
(!(prevIntVal & partialDestroyStartedMask)),
@@ -366,7 +375,8 @@ IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept
auto curValue = RefCountPair{1, 1}.combinedValue();
auto desiredValue = RefCountPair{2, 1}.combinedValue();
while (!refCounts.compare_exchange_weak(curValue, desiredValue, std::memory_order_acq_rel))
while (!refCounts.compare_exchange_weak(
curValue, desiredValue, std::memory_order_acq_rel))
{
RefCountPair const prev{curValue};
if (!prev.strong)
@@ -395,15 +405,20 @@ inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
{
#ifndef NDEBUG
auto v = refCounts.load(std::memory_order_acquire);
XRPL_ASSERT((!(v & valueMask)), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
XRPL_ASSERT(
(!(v & valueMask)),
"xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
auto t = v & tagMask;
XRPL_ASSERT((!t || t == tagMask), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
XRPL_ASSERT(
(!t || t == tagMask),
"xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
#endif
}
//------------------------------------------------------------------------------
inline IntrusiveRefCounts::RefCountPair::RefCountPair(IntrusiveRefCounts::FieldType v) noexcept
inline IntrusiveRefCounts::RefCountPair::RefCountPair(
IntrusiveRefCounts::FieldType v) noexcept
: strong{static_cast<CountType>(v & strongMask)}
, weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
, partialDestroyStartedBit{v & partialDestroyStartedMask}
@@ -433,8 +448,10 @@ IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
"xrpl::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
"inside range");
return (static_cast<IntrusiveRefCounts::FieldType>(weak) << IntrusiveRefCounts::StrongCountNumBits) |
static_cast<IntrusiveRefCounts::FieldType>(strong) | partialDestroyStartedBit | partialDestroyFinishedBit;
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
<< IntrusiveRefCounts::StrongCountNumBits) |
static_cast<IntrusiveRefCounts::FieldType>(strong) |
partialDestroyStartedBit | partialDestroyFinishedBit;
}
template <class T>
@@ -442,9 +459,11 @@ inline void
partialDestructorFinished(T** o)
{
T& self = **o;
IntrusiveRefCounts::RefCountPair p = self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
IntrusiveRefCounts::RefCountPair p =
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
XRPL_ASSERT(
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit && !p.strong),
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit &&
!p.strong),
"xrpl::partialDestructorFinished : not a weak ref");
if (!p.weak)
{

View File

@@ -54,7 +54,8 @@ template <class = void>
boost::thread_specific_ptr<detail::LocalValues>&
getLocalValues()
{
static boost::thread_specific_ptr<detail::LocalValues> tsp(&detail::LocalValues::cleanup);
static boost::thread_specific_ptr<detail::LocalValues> tsp(
&detail::LocalValues::cleanup);
return tsp;
}
@@ -103,6 +104,8 @@ LocalValue<T>::operator*()
}
return *reinterpret_cast<T*>(
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_)).first->second->get());
lvs->values
.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_))
.first->second->get());
}
} // namespace xrpl

View File

@@ -38,17 +38,22 @@ private:
std::string partition_;
public:
Sink(std::string const& partition, beast::severities::Severity thresh, Logs& logs);
Sink(
std::string const& partition,
beast::severities::Severity thresh,
Logs& logs);
Sink(Sink const&) = delete;
Sink&
operator=(Sink const&) = delete;
void
write(beast::severities::Severity level, std::string const& text) override;
write(beast::severities::Severity level, std::string const& text)
override;
void
writeAlways(beast::severities::Severity level, std::string const& text) override;
writeAlways(beast::severities::Severity level, std::string const& text)
override;
};
/** Manages a system file containing logged output.
@@ -134,7 +139,11 @@ private:
};
std::mutex mutable mutex_;
std::map<std::string, std::unique_ptr<beast::Journal::Sink>, boost::beast::iless> sinks_;
std::map<
std::string,
std::unique_ptr<beast::Journal::Sink>,
boost::beast::iless>
sinks_;
beast::severities::Severity thresh_;
File file_;
bool silent_ = false;
@@ -170,7 +179,11 @@ public:
partition_severities() const;
void
write(beast::severities::Severity level, std::string const& partition, std::string const& text, bool console);
write(
beast::severities::Severity level,
std::string const& partition,
std::string const& text,
bool console);
std::string
rotate();
@@ -187,7 +200,9 @@ public:
}
virtual std::unique_ptr<beast::Journal::Sink>
makeSink(std::string const& partition, beast::severities::Severity startingLevel);
makeSink(
std::string const& partition,
beast::severities::Severity startingLevel);
public:
static LogSeverity

View File

@@ -73,7 +73,10 @@ struct MantissaRange
enum mantissa_scale { small, large };
explicit constexpr MantissaRange(mantissa_scale scale_)
: min(getMin(scale_)), max(min * 10 - 1), log(logTen(min).value_or(-1)), scale(scale_)
: min(getMin(scale_))
, max(min * 10 - 1)
, log(logTen(min).value_or(-1))
, scale(scale_)
{
}
@@ -103,7 +106,8 @@ private:
// Like std::integral, but only 64-bit integral types.
template <class T>
concept Integral64 = std::is_same_v<T, std::int64_t> || std::is_same_v<T, std::uint64_t>;
concept Integral64 =
std::is_same_v<T, std::int64_t> || std::is_same_v<T, std::uint64_t>;
/** Number is a floating point type that can represent a wide range of values.
*
@@ -240,11 +244,22 @@ public:
Number(rep mantissa);
explicit Number(rep mantissa, int exponent);
explicit constexpr Number(bool negative, internalrep mantissa, int exponent, unchecked) noexcept;
explicit constexpr Number(
bool negative,
internalrep mantissa,
int exponent,
unchecked) noexcept;
// Assume unsigned values are... unsigned. i.e. positive
explicit constexpr Number(internalrep mantissa, int exponent, unchecked) noexcept;
explicit constexpr Number(
internalrep mantissa,
int exponent,
unchecked) noexcept;
// Only unit tests are expected to use this ctor
explicit Number(bool negative, internalrep mantissa, int exponent, normalized);
explicit Number(
bool negative,
internalrep mantissa,
int exponent,
normalized);
// Assume unsigned values are... unsigned. i.e. positive
explicit Number(internalrep mantissa, int exponent, normalized);
@@ -294,7 +309,8 @@ public:
friend constexpr bool
operator==(Number const& x, Number const& y) noexcept
{
return x.negative_ == y.negative_ && x.mantissa_ == y.mantissa_ && x.exponent_ == y.exponent_;
return x.negative_ == y.negative_ && x.mantissa_ == y.mantissa_ &&
x.exponent_ == y.exponent_;
}
friend constexpr bool
@@ -502,25 +518,37 @@ private:
class Guard;
};
inline constexpr Number::Number(bool negative, internalrep mantissa, int exponent, unchecked) noexcept
inline constexpr Number::Number(
bool negative,
internalrep mantissa,
int exponent,
unchecked) noexcept
: negative_(negative), mantissa_{mantissa}, exponent_{exponent}
{
}
inline constexpr Number::Number(internalrep mantissa, int exponent, unchecked) noexcept
inline constexpr Number::Number(
internalrep mantissa,
int exponent,
unchecked) noexcept
: Number(false, mantissa, exponent, unchecked{})
{
}
constexpr static Number numZero{};
inline Number::Number(bool negative, internalrep mantissa, int exponent, normalized)
inline Number::Number(
bool negative,
internalrep mantissa,
int exponent,
normalized)
: Number(negative, mantissa, exponent, unchecked{})
{
normalize();
}
inline Number::Number(internalrep mantissa, int exponent, normalized) : Number(false, mantissa, exponent, normalized{})
inline Number::Number(internalrep mantissa, int exponent, normalized)
: Number(false, mantissa, exponent, normalized{})
{
}
@@ -667,13 +695,15 @@ Number::min() noexcept
inline Number
Number::max() noexcept
{
return Number{false, std::min(range_.get().max, maxRep), maxExponent, unchecked{}};
return Number{
false, std::min(range_.get().max, maxRep), maxExponent, unchecked{}};
}
inline Number
Number::lowest() noexcept
{
return Number{true, std::min(range_.get().max, maxRep), maxExponent, unchecked{}};
return Number{
true, std::min(range_.get().max, maxRep), maxExponent, unchecked{}};
}
inline bool
@@ -682,7 +712,8 @@ Number::isnormal() const noexcept
MantissaRange const& range = range_;
auto const abs_m = mantissa_;
return *this == Number{} ||
(range.min <= abs_m && abs_m <= range.max && (abs_m <= maxRep || abs_m % 10 == 0) && minExponent <= exponent_ &&
(range.min <= abs_m && abs_m <= range.max &&
(abs_m <= maxRep || abs_m % 10 == 0) && minExponent <= exponent_ &&
exponent_ <= maxExponent);
}
@@ -695,7 +726,10 @@ Number::normalizeToRange(T minMantissa, T maxMantissa) const
int exponent = exponent_;
if constexpr (std::is_unsigned_v<T>)
XRPL_ASSERT_PARTS(!negative, "xrpl::Number::normalizeToRange", "Number is non-negative for unsigned range.");
XRPL_ASSERT_PARTS(
!negative,
"xrpl::Number::normalizeToRange",
"Number is non-negative for unsigned range.");
Number::normalize(negative, mantissa, exponent, minMantissa, maxMantissa);
auto const sign = negative ? -1 : 1;
@@ -716,10 +750,6 @@ abs(Number x) noexcept
Number
power(Number const& f, unsigned n);
// logarithm with base 10
Number
log10(Number const& value, int iterations = 50);
// Returns f^(1/d)
// Uses NewtonRaphson iterations until the result stops changing
// to find the root of the polynomial g(x) = x^d - f
@@ -768,7 +798,8 @@ public:
{
Number::setround(mode_);
}
explicit saveNumberRoundMode(Number::rounding_mode mode) noexcept : mode_{mode}
explicit saveNumberRoundMode(Number::rounding_mode mode) noexcept
: mode_{mode}
{
}
saveNumberRoundMode(saveNumberRoundMode const&) = delete;
@@ -785,7 +816,8 @@ class NumberRoundModeGuard
saveNumberRoundMode saved_;
public:
explicit NumberRoundModeGuard(Number::rounding_mode mode) noexcept : saved_{Number::setround(mode)}
explicit NumberRoundModeGuard(Number::rounding_mode mode) noexcept
: saved_{Number::setround(mode)}
{
}
@@ -805,7 +837,9 @@ class NumberMantissaScaleGuard
MantissaRange::mantissa_scale const saved_;
public:
explicit NumberMantissaScaleGuard(MantissaRange::mantissa_scale scale) noexcept : saved_{Number::getMantissaScale()}
explicit NumberMantissaScaleGuard(
MantissaRange::mantissa_scale scale) noexcept
: saved_{Number::getMantissaScale()}
{
Number::setMantissaScale(scale);
}

View File

@@ -10,7 +10,8 @@ namespace xrpl {
class Resolver
{
public:
using HandlerType = std::function<void(std::string, std::vector<beast::IP::Endpoint>)>;
using HandlerType =
std::function<void(std::string, std::vector<beast::IP::Endpoint>)>;
virtual ~Resolver() = 0;
@@ -39,7 +40,9 @@ public:
}
virtual void
resolve(std::vector<std::string> const& names, HandlerType const& handler) = 0;
resolve(
std::vector<std::string> const& names,
HandlerType const& handler) = 0;
/** @} */
};

View File

@@ -4,28 +4,34 @@
namespace xrpl {
template <class T>
SharedWeakCachePointer<T>::SharedWeakCachePointer(SharedWeakCachePointer const& rhs) = default;
SharedWeakCachePointer<T>::SharedWeakCachePointer(
SharedWeakCachePointer const& rhs) = default;
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT> const& rhs) : combo_{rhs}
SharedWeakCachePointer<T>::SharedWeakCachePointer(
std::shared_ptr<TT> const& rhs)
: combo_{rhs}
{
}
template <class T>
SharedWeakCachePointer<T>::SharedWeakCachePointer(SharedWeakCachePointer&& rhs) = default;
SharedWeakCachePointer<T>::SharedWeakCachePointer(
SharedWeakCachePointer&& rhs) = default;
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs) : combo_{std::move(rhs)}
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs)
: combo_{std::move(rhs)}
{
}
template <class T>
SharedWeakCachePointer<T>&
SharedWeakCachePointer<T>::operator=(SharedWeakCachePointer const& rhs) = default;
SharedWeakCachePointer<T>::operator=(SharedWeakCachePointer const& rhs) =
default;
template <class T>
template <class TT>
@@ -63,7 +69,8 @@ SharedWeakCachePointer<T>::getStrong() const
}
template <class T>
SharedWeakCachePointer<T>::operator bool() const noexcept
SharedWeakCachePointer<T>::
operator bool() const noexcept
{
return !!std::get_if<std::shared_ptr<T>>(&combo_);
}

View File

@@ -50,7 +50,11 @@ class SlabAllocator
// The extent of the underlying memory block:
std::size_t const size_;
SlabBlock(SlabBlock* next, std::uint8_t* data, std::size_t size, std::size_t item)
SlabBlock(
SlabBlock* next,
std::uint8_t* data,
std::size_t size,
std::size_t item)
: next_(next), p_(data), size_(size)
{
// We don't need to grab the mutex here, since we're the only
@@ -121,7 +125,9 @@ class SlabAllocator
void
deallocate(std::uint8_t* ptr) noexcept
{
XRPL_ASSERT(own(ptr), "xrpl::SlabAllocator::SlabBlock::deallocate : own input");
XRPL_ASSERT(
own(ptr),
"xrpl::SlabAllocator::SlabBlock::deallocate : own input");
std::lock_guard l(m_);
@@ -155,13 +161,18 @@ public:
contexts (e.g. when minimal memory usage is needed) and
allows for graceful failure.
*/
constexpr explicit SlabAllocator(std::size_t extra, std::size_t alloc = 0, std::size_t align = 0)
constexpr explicit SlabAllocator(
std::size_t extra,
std::size_t alloc = 0,
std::size_t align = 0)
: itemAlignment_(align ? align : alignof(Type))
, itemSize_(boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
, itemSize_(
boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
, slabSize_(alloc)
{
XRPL_ASSERT(
(itemAlignment_ & (itemAlignment_ - 1)) == 0, "xrpl::SlabAllocator::SlabAllocator : valid alignment");
(itemAlignment_ & (itemAlignment_ - 1)) == 0,
"xrpl::SlabAllocator::SlabAllocator : valid alignment");
}
SlabAllocator(SlabAllocator const& other) = delete;
@@ -210,12 +221,13 @@ public:
// We want to allocate the memory at a 2 MiB boundary, to make it
// possible to use hugepage mappings on Linux:
auto buf = boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
auto buf =
boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
// clang-format off
if (!buf) [[unlikely]]
return nullptr;
// clang-format on
// clang-format on
#if BOOST_OS_LINUX
// When allocating large blocks, attempt to leverage Linux's
@@ -228,21 +240,31 @@ public:
// We need to carve out a bit of memory for the slab header
// and then align the rest appropriately:
auto slabData = reinterpret_cast<void*>(reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
auto slabData = reinterpret_cast<void*>(
reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
auto slabSize = size - sizeof(SlabBlock);
// This operation is essentially guaranteed not to fail but
// let's be careful anyways.
if (!boost::alignment::align(itemAlignment_, itemSize_, slabData, slabSize))
if (!boost::alignment::align(
itemAlignment_, itemSize_, slabData, slabSize))
{
boost::alignment::aligned_free(buf);
return nullptr;
}
slab = new (buf) SlabBlock(slabs_.load(), reinterpret_cast<std::uint8_t*>(slabData), slabSize, itemSize_);
slab = new (buf) SlabBlock(
slabs_.load(),
reinterpret_cast<std::uint8_t*>(slabData),
slabSize,
itemSize_);
// Link the new slab
while (!slabs_.compare_exchange_weak(slab->next_, slab, std::memory_order_release, std::memory_order_relaxed))
while (!slabs_.compare_exchange_weak(
slab->next_,
slab,
std::memory_order_release,
std::memory_order_relaxed))
{
; // Nothing to do
}
@@ -299,7 +321,10 @@ public:
std::size_t align;
public:
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_ = 0, std::size_t align_ = alignof(Type))
constexpr SlabConfig(
std::size_t extra_,
std::size_t alloc_ = 0,
std::size_t align_ = alignof(Type))
: extra(extra_), alloc(alloc_), align(align_)
{
}
@@ -310,14 +335,23 @@ public:
// Ensure that the specified allocators are sorted from smallest to
// largest by size:
std::sort(
std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) { return a.extra < b.extra; });
std::begin(cfg),
std::end(cfg),
[](SlabConfig const& a, SlabConfig const& b) {
return a.extra < b.extra;
});
// We should never have two slabs of the same size
if (std::adjacent_find(std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
return a.extra == b.extra;
}) != cfg.end())
if (std::adjacent_find(
std::begin(cfg),
std::end(cfg),
[](SlabConfig const& a, SlabConfig const& b) {
return a.extra == b.extra;
}) != cfg.end())
{
throw std::runtime_error("SlabAllocatorSet<" + beast::type_name<Type>() + ">: duplicate slab size");
throw std::runtime_error(
"SlabAllocatorSet<" + beast::type_name<Type>() +
">: duplicate slab size");
}
for (auto const& c : cfg)

View File

@@ -40,7 +40,8 @@ public:
operator=(Slice const&) noexcept = default;
/** Create a slice pointing to existing memory. */
Slice(void const* data, std::size_t size) noexcept : data_(reinterpret_cast<std::uint8_t const*>(data)), size_(size)
Slice(void const* data, std::size_t size) noexcept
: data_(reinterpret_cast<std::uint8_t const*>(data)), size_(size)
{
}
@@ -83,7 +84,9 @@ public:
std::uint8_t
operator[](std::size_t i) const noexcept
{
XRPL_ASSERT(i < size_, "xrpl::Slice::operator[](std::size_t) const : valid input");
XRPL_ASSERT(
i < size_,
"xrpl::Slice::operator[](std::size_t) const : valid input");
return data_[i];
}
@@ -158,7 +161,9 @@ public:
@throws std::out_of_range if pos > size()
*/
Slice
substr(std::size_t pos, std::size_t count = std::numeric_limits<std::size_t>::max()) const
substr(
std::size_t pos,
std::size_t count = std::numeric_limits<std::size_t>::max()) const
{
if (pos > size())
throw std::out_of_range("Requested sub-slice is out of bounds");
@@ -197,7 +202,11 @@ operator!=(Slice const& lhs, Slice const& rhs) noexcept
inline bool
operator<(Slice const& lhs, Slice const& rhs) noexcept
{
return std::lexicographical_compare(lhs.data(), lhs.data() + lhs.size(), rhs.data(), rhs.data() + rhs.size());
return std::lexicographical_compare(
lhs.data(),
lhs.data() + lhs.size(),
rhs.data(),
rhs.data() + rhs.size());
}
template <class Stream>
@@ -209,14 +218,18 @@ operator<<(Stream& s, Slice const& v)
}
template <class T, std::size_t N>
std::enable_if_t<std::is_same<T, char>::value || std::is_same<T, unsigned char>::value, Slice>
std::enable_if_t<
std::is_same<T, char>::value || std::is_same<T, unsigned char>::value,
Slice>
makeSlice(std::array<T, N> const& a)
{
return Slice(a.data(), a.size());
}
template <class T, class Alloc>
std::enable_if_t<std::is_same<T, char>::value || std::is_same<T, unsigned char>::value, Slice>
std::enable_if_t<
std::is_same<T, char>::value || std::is_same<T, unsigned char>::value,
Slice>
makeSlice(std::vector<T, Alloc> const& v)
{
return Slice(v.data(), v.size());

View File

@@ -108,7 +108,8 @@ struct parsedURL
bool
operator==(parsedURL const& other) const
{
return scheme == other.scheme && domain == other.domain && port == other.port && path == other.path;
return scheme == other.scheme && domain == other.domain &&
port == other.port && path == other.path;
}
};

View File

@@ -55,7 +55,8 @@ public:
clock_type::duration expiration,
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector = beast::insight::NullCollector::New());
beast::insight::Collector::ptr const& collector =
beast::insight::NullCollector::New());
public:
/** Return the clock associated with the cache. */
@@ -112,10 +113,15 @@ public:
*/
template <class R>
bool
canonicalize(key_type const& key, SharedPointerType& data, R&& replaceCallback);
canonicalize(
key_type const& key,
SharedPointerType& data,
R&& replaceCallback);
bool
canonicalize_replace_cache(key_type const& key, SharedPointerType const& data);
canonicalize_replace_cache(
key_type const& key,
SharedPointerType const& data);
bool
canonicalize_replace_client(key_type const& key, SharedPointerType& data);
@@ -129,7 +135,8 @@ public:
*/
template <class ReturnType = bool>
auto
insert(key_type const& key, T const& value) -> std::enable_if_t<!IsKeyCache, ReturnType>;
insert(key_type const& key, T const& value)
-> std::enable_if_t<!IsKeyCache, ReturnType>;
template <class ReturnType = bool>
auto
@@ -175,7 +182,10 @@ private:
struct Stats
{
template <class Handler>
Stats(std::string const& prefix, Handler const& handler, beast::insight::Collector::ptr const& collector)
Stats(
std::string const& prefix,
Handler const& handler,
beast::insight::Collector::ptr const& collector)
: hook(collector->make_hook(handler))
, size(collector->make_gauge(prefix, "size"))
, hit_rate(collector->make_gauge(prefix, "hit_rate"))
@@ -197,7 +207,8 @@ private:
public:
clock_type::time_point last_access;
explicit KeyOnlyEntry(clock_type::time_point const& last_access_) : last_access(last_access_)
explicit KeyOnlyEntry(clock_type::time_point const& last_access_)
: last_access(last_access_)
{
}
@@ -214,7 +225,9 @@ private:
shared_weak_combo_pointer_type ptr;
clock_type::time_point last_access;
ValueEntry(clock_type::time_point const& last_access_, shared_pointer_type const& ptr_)
ValueEntry(
clock_type::time_point const& last_access_,
shared_pointer_type const& ptr_)
: ptr(ptr_), last_access(last_access_)
{
}
@@ -248,13 +261,18 @@ private:
}
};
typedef typename std::conditional<IsKeyCache, KeyOnlyEntry, ValueEntry>::type Entry;
typedef
typename std::conditional<IsKeyCache, KeyOnlyEntry, ValueEntry>::type
Entry;
using KeyOnlyCacheType = hardened_partitioned_hash_map<key_type, KeyOnlyEntry, Hash, KeyEqual>;
using KeyOnlyCacheType =
hardened_partitioned_hash_map<key_type, KeyOnlyEntry, Hash, KeyEqual>;
using KeyValueCacheType = hardened_partitioned_hash_map<key_type, ValueEntry, Hash, KeyEqual>;
using KeyValueCacheType =
hardened_partitioned_hash_map<key_type, ValueEntry, Hash, KeyEqual>;
using cache_type = hardened_partitioned_hash_map<key_type, Entry, Hash, KeyEqual>;
using cache_type =
hardened_partitioned_hash_map<key_type, Entry, Hash, KeyEqual>;
[[nodiscard]] std::thread
sweepHelper(

View File

@@ -14,13 +14,22 @@ template <
class Hash,
class KeyEqual,
class Mutex>
inline TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::TaggedCache(
std::string const& name,
int size,
clock_type::duration expiration,
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector)
inline TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
TaggedCache(
std::string const& name,
int size,
clock_type::duration expiration,
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector)
: m_journal(journal)
, m_clock(clock)
, m_stats(name, std::bind(&TaggedCache::collect_metrics, this), collector)
@@ -43,8 +52,15 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::clock()
-> clock_type&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::clock() -> clock_type&
{
return m_clock;
}
@@ -59,7 +75,15 @@ template <
class KeyEqual,
class Mutex>
inline std::size_t
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::size() const
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
@@ -75,7 +99,15 @@ template <
class KeyEqual,
class Mutex>
inline int
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getCacheSize() const
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
@@ -91,7 +123,15 @@ template <
class KeyEqual,
class Mutex>
inline int
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getTrackSize() const
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
@@ -107,7 +147,15 @@ template <
class KeyEqual,
class Mutex>
inline float
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getHitRate()
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
@@ -124,7 +172,15 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::clear()
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::clear()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
@@ -141,7 +197,15 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::reset()
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::reset()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
@@ -161,8 +225,15 @@ template <
class Mutex>
template <class KeyComparable>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::touch_if_exists(
KeyComparable const& key)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key));
@@ -186,7 +257,15 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweep()
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::sweep()
{
// Keep references to all the stuff we sweep
// For performance, each worker thread should exit before the swept data
@@ -200,7 +279,8 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 || (static_cast<int>(m_cache.size()) <= m_target_size))
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
when_expire = now - m_target_age;
}
@@ -212,8 +292,10 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace()) << m_name << " is growing fast " << m_cache.size() << " of " << m_target_size
<< " aging at " << (now - when_expire).count() << " of " << m_target_age.count();
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of "
<< m_target_size << " aging at " << (now - when_expire).count()
<< " of " << m_target_age.count();
}
std::vector<std::thread> workers;
@@ -222,7 +304,13 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
{
workers.push_back(sweepHelper(when_expire, now, m_cache.map()[p], allStuffToSweep[p], allRemovals, lock));
workers.push_back(sweepHelper(
when_expire,
now,
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
}
for (std::thread& worker : workers)
worker.join();
@@ -233,7 +321,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
// and decrement the reference count on each strong pointer.
JLOG(m_journal.debug())
<< m_name << " TaggedCache sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start).count()
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
@@ -247,9 +337,15 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::del(
key_type const& key,
bool valid)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::del(key_type const& key, bool valid)
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
@@ -288,10 +384,19 @@ template <
class Mutex>
template <class R>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::canonicalize(
key_type const& key,
SharedPointerType& data,
R&& replaceCallback)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
canonicalize(
key_type const& key,
SharedPointerType& data,
R&& replaceCallback)
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
@@ -302,7 +407,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(m_clock.now(), data));
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
return false;
}
@@ -372,10 +479,21 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
canonicalize_replace_cache(key_type const& key, SharedPointerType const& data)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
canonicalize_replace_cache(
key_type const& key,
SharedPointerType const& data)
{
return canonicalize(key, const_cast<SharedPointerType&>(data), []() { return true; });
return canonicalize(
key, const_cast<SharedPointerType&>(data), []() { return true; });
}
template <
@@ -388,7 +506,15 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
canonicalize_replace_client(key_type const& key, SharedPointerType& data)
{
return canonicalize(key, data, []() { return false; });
@@ -404,8 +530,15 @@ template <
class KeyEqual,
class Mutex>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::fetch(
key_type const& key)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::fetch(key_type const& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
@@ -425,9 +558,16 @@ template <
class Mutex>
template <class ReturnType>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::insert(
key_type const& key,
T const& value) -> std::enable_if_t<!IsKeyCache, ReturnType>
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::insert(key_type const& key, T const& value)
-> std::enable_if_t<!IsKeyCache, ReturnType>
{
static_assert(
std::is_same_v<std::shared_ptr<T>, SharedPointerType> ||
@@ -456,13 +596,23 @@ template <
class Mutex>
template <class ReturnType>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::insert(
key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
auto [it, inserted] =
m_cache.emplace(std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(now));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(now));
if (!inserted)
it->second.last_access = now;
return inserted;
@@ -478,9 +628,15 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::retrieve(
key_type const& key,
T& data)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::retrieve(key_type const& key, T& data)
{
// retrieve the value of the stored data
auto entry = fetch(key);
@@ -502,8 +658,15 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::peekMutex()
-> mutex_type&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
@@ -518,8 +681,15 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getKeys() const
-> std::vector<key_type>
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getKeys() const -> std::vector<key_type>
{
std::vector<key_type> v;
@@ -543,7 +713,15 @@ template <
class KeyEqual,
class Mutex>
inline double
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::rate() const
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
@@ -563,9 +741,15 @@ template <
class Mutex>
template <class Handler>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::fetch(
key_type const& digest,
Handler const& h)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
@@ -579,7 +763,8 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] = m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
it->second.touch(m_clock.now());
return it->second.ptr.getStrong();
@@ -596,9 +781,16 @@ template <
class KeyEqual,
class Mutex>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::initialFetch(
key_type const& key,
std::lock_guard<mutex_type> const& l)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{
auto cit = m_cache.find(key);
if (cit == m_cache.end())
@@ -634,7 +826,15 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::collect_metrics()
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::collect_metrics()
{
m_stats.size.set(getCacheSize());
@@ -660,13 +860,22 @@ template <
class KeyEqual,
class Mutex>
inline std::thread
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweepHelper(
clock_type::time_point const& when_expire,
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
sweepHelper(
clock_type::time_point const& when_expire,
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
@@ -720,8 +929,10 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug()) << "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
@@ -738,13 +949,22 @@ template <
class KeyEqual,
class Mutex>
inline std::thread
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweepHelper(
clock_type::time_point const& when_expire,
clock_type::time_point const& now,
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
sweepHelper(
clock_type::time_point const& when_expire,
clock_type::time_point const& now,
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
@@ -774,8 +994,10 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug()) << "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;

View File

@@ -39,7 +39,8 @@ template <
class Hash = beast::uhash<>,
class Pred = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, Value>>>
using hash_multimap = std::unordered_multimap<Key, Value, Hash, Pred, Allocator>;
using hash_multimap =
std::unordered_multimap<Key, Value, Hash, Pred, Allocator>;
template <
class Value,
@@ -73,7 +74,8 @@ template <
class Hash = hardened_hash<strong_hash>,
class Pred = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, Value>>>
using hardened_partitioned_hash_map = partitioned_unordered_map<Key, Value, Hash, Pred, Allocator>;
using hardened_partitioned_hash_map =
partitioned_unordered_map<Key, Value, Hash, Pred, Allocator>;
template <
class Key,
@@ -81,7 +83,8 @@ template <
class Hash = hardened_hash<strong_hash>,
class Pred = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, Value>>>
using hardened_hash_multimap = std::unordered_multimap<Key, Value, Hash, Pred, Allocator>;
using hardened_hash_multimap =
std::unordered_multimap<Key, Value, Hash, Pred, Allocator>;
template <
class Value,
@@ -95,6 +98,7 @@ template <
class Hash = hardened_hash<strong_hash>,
class Pred = std::equal_to<Value>,
class Allocator = std::allocator<Value>>
using hardened_hash_multiset = std::unordered_multiset<Value, Hash, Pred, Allocator>;
using hardened_hash_multiset =
std::unordered_multiset<Value, Hash, Pred, Allocator>;
} // namespace xrpl

View File

@@ -51,7 +51,13 @@ generalized_set_intersection(
// std::set_intersection.
template <class FwdIter1, class InputIter2, class Pred, class Comp>
FwdIter1
remove_if_intersect_or_match(FwdIter1 first1, FwdIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, Comp comp)
remove_if_intersect_or_match(
FwdIter1 first1,
FwdIter1 last1,
InputIter2 first2,
InputIter2 last2,
Pred pred,
Comp comp)
{
// [original-first1, current-first1) is the set of elements to be preserved.
// [current-first1, i) is the set of elements that have been removed.

View File

@@ -45,7 +45,8 @@ base64_encode(std::uint8_t const* data, std::size_t len);
inline std::string
base64_encode(std::string const& s)
{
return base64_encode(reinterpret_cast<std::uint8_t const*>(s.data()), s.size());
return base64_encode(
reinterpret_cast<std::uint8_t const*>(s.data()), s.size());
}
std::string

View File

@@ -64,9 +64,13 @@ struct is_contiguous_container<Slice> : std::true_type
template <std::size_t Bits, class Tag = void>
class base_uint
{
static_assert((Bits % 32) == 0, "The length of a base_uint in bits must be a multiple of 32.");
static_assert(
(Bits % 32) == 0,
"The length of a base_uint in bits must be a multiple of 32.");
static_assert(Bits >= 64, "The length of a base_uint in bits must be at least 64.");
static_assert(
Bits >= 64,
"The length of a base_uint in bits must be at least 64.");
static constexpr std::size_t WIDTH = Bits / 32;
@@ -177,7 +181,9 @@ private:
{
// Local lambda that converts a single hex char to four bits and
// ORs those bits into a uint32_t.
auto hexCharToUInt = [](char c, std::uint32_t shift, std::uint32_t& accum) -> ParseResult {
auto hexCharToUInt = [](char c,
std::uint32_t shift,
std::uint32_t& accum) -> ParseResult {
std::uint32_t nibble = 0xFFu;
if (c < '0' || c > 'f')
return ParseResult::badChar;
@@ -214,7 +220,8 @@ private:
std::uint32_t accum = {};
for (std::uint32_t shift : {4u, 0u, 12u, 8u, 20u, 16u, 28u, 24u})
{
if (auto const result = hexCharToUInt(*in++, shift, accum); result != ParseResult::okay)
if (auto const result = hexCharToUInt(*in++, shift, accum);
result != ParseResult::okay)
return Unexpected(result);
}
ret[i++] = accum;
@@ -253,7 +260,8 @@ public:
// This constructor is intended to be used at compile time since it might
// throw at runtime. Consider declaring this constructor consteval once
// we get to C++23.
explicit constexpr base_uint(std::string_view sv) noexcept(false) : data_(parseFromStringViewThrows(sv))
explicit constexpr base_uint(std::string_view sv) noexcept(false)
: data_(parseFromStringViewThrows(sv))
{
}
@@ -378,7 +386,8 @@ public:
// prefix operator
for (int i = WIDTH - 1; i >= 0; --i)
{
data_[i] = boost::endian::native_to_big(boost::endian::big_to_native(data_[i]) + 1);
data_[i] = boost::endian::native_to_big(
boost::endian::big_to_native(data_[i]) + 1);
if (data_[i] != 0)
break;
}
@@ -402,7 +411,8 @@ public:
for (int i = WIDTH - 1; i >= 0; --i)
{
auto prev = data_[i];
data_[i] = boost::endian::native_to_big(boost::endian::big_to_native(data_[i]) - 1);
data_[i] = boost::endian::native_to_big(
boost::endian::big_to_native(data_[i]) - 1);
if (prev != 0)
break;
@@ -442,9 +452,11 @@ public:
for (int i = WIDTH; i--;)
{
std::uint64_t n = carry + boost::endian::big_to_native(data_[i]) + boost::endian::big_to_native(b.data_[i]);
std::uint64_t n = carry + boost::endian::big_to_native(data_[i]) +
boost::endian::big_to_native(b.data_[i]);
data_[i] = boost::endian::native_to_big(static_cast<std::uint32_t>(n));
data_[i] =
boost::endian::native_to_big(static_cast<std::uint32_t>(n));
carry = n >> 32;
}
@@ -544,7 +556,8 @@ operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
if (ret.first == lhs.cend())
return std::strong_ordering::equivalent;
return (*ret.first > *ret.second) ? std::strong_ordering::greater : std::strong_ordering::less;
return (*ret.first > *ret.second) ? std::strong_ordering::greater
: std::strong_ordering::less;
}
template <std::size_t Bits, typename Tag>
@@ -603,7 +616,9 @@ template <std::size_t Bits, class Tag>
inline std::string
to_short_string(base_uint<Bits, Tag> const& a)
{
static_assert(base_uint<Bits, Tag>::bytes > 4, "For 4 bytes or less, use a native type");
static_assert(
base_uint<Bits, Tag>::bytes > 4,
"For 4 bytes or less, use a native type");
return strHex(a.cbegin(), a.cbegin() + 4) + "...";
}
@@ -637,7 +652,8 @@ static_assert(sizeof(uint256) == 256 / 8, "There should be no padding bytes");
namespace beast {
template <std::size_t Bits, class Tag>
struct is_uniquely_represented<xrpl::base_uint<Bits, Tag>> : public std::true_type
struct is_uniquely_represented<xrpl::base_uint<Bits, Tag>>
: public std::true_type
{
explicit is_uniquely_represented() = default;
};

View File

@@ -15,9 +15,12 @@ namespace xrpl {
// A few handy aliases
using days = std::chrono::duration<int, std::ratio_multiply<std::chrono::hours::period, std::ratio<24>>>;
using days = std::chrono::duration<
int,
std::ratio_multiply<std::chrono::hours::period, std::ratio<24>>>;
using weeks = std::chrono::duration<int, std::ratio_multiply<days::period, std::ratio<7>>>;
using weeks = std::chrono::
duration<int, std::ratio_multiply<days::period, std::ratio<7>>>;
/** Clock for measuring the network time.
@@ -30,7 +33,8 @@ using weeks = std::chrono::duration<int, std::ratio_multiply<days::period, std::
*/
constexpr static std::chrono::seconds epoch_offset =
date::sys_days{date::year{2000} / 1 / 1} - date::sys_days{date::year{1970} / 1 / 1};
date::sys_days{date::year{2000} / 1 / 1} -
date::sys_days{date::year{1970} / 1 / 1};
static_assert(epoch_offset.count() == 946684800);
@@ -59,7 +63,8 @@ to_string(NetClock::time_point tp)
{
// 2000-01-01 00:00:00 UTC is 946684800s from 1970-01-01 00:00:00 UTC
using namespace std::chrono;
return to_string(system_clock::time_point{tp.time_since_epoch() + epoch_offset});
return to_string(
system_clock::time_point{tp.time_since_epoch() + epoch_offset});
}
template <class Duration>
@@ -76,7 +81,9 @@ to_string_iso(NetClock::time_point tp)
// 2000-01-01 00:00:00 UTC is 946684800s from 1970-01-01 00:00:00 UTC
// Note, NetClock::duration is seconds, as checked by static_assert
static_assert(std::is_same_v<NetClock::duration::period, std::ratio<1>>);
return to_string_iso(date::sys_time<NetClock::duration>{tp.time_since_epoch() + epoch_offset});
return to_string_iso(
date::sys_time<NetClock::duration>{
tp.time_since_epoch() + epoch_offset});
}
/** A clock for measuring elapsed time.

View File

@@ -35,10 +35,15 @@ template <class E, class... Args>
[[noreturn]] inline void
Throw(Args&&... args)
{
static_assert(std::is_convertible<E*, std::exception*>::value, "Exception must derive from std::exception.");
static_assert(
std::is_convertible<E*, std::exception*>::value,
"Exception must derive from std::exception.");
E e(std::forward<Args>(args)...);
LogThrow(std::string("Throwing exception of type " + beast::type_name<E>() + ": ") + e.what());
LogThrow(
std::string(
"Throwing exception of type " + beast::type_name<E>() + ": ") +
e.what());
throw e;
}

View File

@@ -23,7 +23,8 @@ public:
Collection const& collection;
std::string const delimiter;
explicit CollectionAndDelimiter(Collection const& c, std::string delim) : collection(c), delimiter(std::move(delim))
explicit CollectionAndDelimiter(Collection const& c, std::string delim)
: collection(c), delimiter(std::move(delim))
{
}
@@ -31,7 +32,11 @@ public:
friend Stream&
operator<<(Stream& s, CollectionAndDelimiter const& cd)
{
return join(s, std::begin(cd.collection), std::end(cd.collection), cd.delimiter);
return join(
s,
std::begin(cd.collection),
std::end(cd.collection),
cd.delimiter);
}
};
@@ -63,7 +68,8 @@ public:
char const* collection;
std::string const delimiter;
explicit CollectionAndDelimiter(char const c[N], std::string delim) : collection(c), delimiter(std::move(delim))
explicit CollectionAndDelimiter(char const c[N], std::string delim)
: collection(c), delimiter(std::move(delim))
{
}

View File

@@ -50,7 +50,8 @@ public:
using const_reference = value_type const&;
using pointer = value_type*;
using const_pointer = value_type const*;
using map_type = std::unordered_map<key_type, mapped_type, hasher, key_equal, allocator_type>;
using map_type = std::
unordered_map<key_type, mapped_type, hasher, key_equal, allocator_type>;
using partition_map_type = std::vector<map_type>;
struct iterator
@@ -111,7 +112,8 @@ public:
friend bool
operator==(iterator const& lhs, iterator const& rhs)
{
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ && lhs.mit_ == rhs.mit_;
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ &&
lhs.mit_ == rhs.mit_;
}
friend bool
@@ -187,7 +189,8 @@ public:
friend bool
operator==(const_iterator const& lhs, const_iterator const& rhs)
{
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ && lhs.mit_ == rhs.mit_;
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ &&
lhs.mit_ == rhs.mit_;
}
friend bool
@@ -227,11 +230,14 @@ private:
}
public:
partitioned_unordered_map(std::optional<std::size_t> partitions = std::nullopt)
partitioned_unordered_map(
std::optional<std::size_t> partitions = std::nullopt)
{
// Set partitions to the number of hardware threads if the parameter
// is either empty or set to 0.
partitions_ = partitions && *partitions ? *partitions : std::thread::hardware_concurrency();
partitions_ = partitions && *partitions
? *partitions
: std::thread::hardware_concurrency();
map_.resize(partitions_);
XRPL_ASSERT(
partitions_,
@@ -330,8 +336,10 @@ public:
auto const& key = std::get<0>(keyTuple);
iterator it(&map_);
it.ait_ = it.map_->begin() + partitioner(key);
auto [eit, inserted] =
it.ait_->emplace(std::piecewise_construct, std::forward<T>(keyTuple), std::forward<U>(valueTuple));
auto [eit, inserted] = it.ait_->emplace(
std::piecewise_construct,
std::forward<T>(keyTuple),
std::forward<U>(valueTuple));
it.mit_ = eit;
return {it, inserted};
}
@@ -342,7 +350,8 @@ public:
{
iterator it(&map_);
it.ait_ = it.map_->begin() + partitioner(key);
auto [eit, inserted] = it.ait_->emplace(std::forward<T>(key), std::forward<U>(val));
auto [eit, inserted] =
it.ait_->emplace(std::forward<T>(key), std::forward<U>(val));
it.mit_ = eit;
return {it, inserted};
}

View File

@@ -19,7 +19,8 @@ static_assert(
"The Ripple default PRNG engine must return an unsigned integral type.");
static_assert(
std::numeric_limits<beast::xor_shift_engine::result_type>::max() >= std::numeric_limits<std::uint64_t>::max(),
std::numeric_limits<beast::xor_shift_engine::result_type>::max() >=
std::numeric_limits<std::uint64_t>::max(),
"The Ripple default PRNG engine return must be at least 64 bits wide.");
#endif
@@ -88,7 +89,9 @@ default_prng()
*/
/** @{ */
template <class Engine, class Integral>
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
std::enable_if_t<
std::is_integral<Integral>::value && detail::is_engine<Engine>::value,
Integral>
rand_int(Engine& engine, Integral min, Integral max)
{
XRPL_ASSERT(max > min, "xrpl::rand_int : max over min inputs");
@@ -107,7 +110,9 @@ rand_int(Integral min, Integral max)
}
template <class Engine, class Integral>
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
std::enable_if_t<
std::is_integral<Integral>::value && detail::is_engine<Engine>::value,
Integral>
rand_int(Engine& engine, Integral max)
{
return rand_int(engine, Integral(0), max);
@@ -121,7 +126,9 @@ rand_int(Integral max)
}
template <class Integral, class Engine>
std::enable_if_t<std::is_integral<Integral>::value && detail::is_engine<Engine>::value, Integral>
std::enable_if_t<
std::is_integral<Integral>::value && detail::is_engine<Engine>::value,
Integral>
rand_int(Engine& engine)
{
return rand_int(engine, std::numeric_limits<Integral>::max());
@@ -139,17 +146,23 @@ rand_int()
/** @{ */
template <class Byte, class Engine>
std::enable_if_t<
(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value) &&
(std::is_same<Byte, unsigned char>::value ||
std::is_same<Byte, std::uint8_t>::value) &&
detail::is_engine<Engine>::value,
Byte>
rand_byte(Engine& engine)
{
return static_cast<Byte>(
rand_int<Engine, std::uint32_t>(engine, std::numeric_limits<Byte>::min(), std::numeric_limits<Byte>::max()));
return static_cast<Byte>(rand_int<Engine, std::uint32_t>(
engine,
std::numeric_limits<Byte>::min(),
std::numeric_limits<Byte>::max()));
}
template <class Byte = std::uint8_t>
std::enable_if_t<(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value), Byte>
std::enable_if_t<
(std::is_same<Byte, unsigned char>::value ||
std::is_same<Byte, std::uint8_t>::value),
Byte>
rand_byte()
{
return rand_byte<Byte>(default_prng());

View File

@@ -11,29 +11,38 @@ namespace xrpl {
template <class Src, class Dest>
concept SafeToCast = (std::is_integral_v<Src> && std::is_integral_v<Dest>) &&
(std::is_signed<Src>::value || std::is_unsigned<Dest>::value) &&
(std::is_signed<Src>::value != std::is_signed<Dest>::value ? sizeof(Dest) > sizeof(Src)
: sizeof(Dest) >= sizeof(Src));
(std::is_signed<Src>::value != std::is_signed<Dest>::value
? sizeof(Dest) > sizeof(Src)
: sizeof(Dest) >= sizeof(Src));
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
safe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
safe_cast(Src s) noexcept
{
static_assert(std::is_signed_v<Dest> || std::is_unsigned_v<Src>, "Cannot cast signed to unsigned");
constexpr unsigned not_same = std::is_signed_v<Dest> != std::is_signed_v<Src>;
static_assert(sizeof(Dest) >= sizeof(Src) + not_same, "Destination is too small to hold all values of source");
static_assert(
std::is_signed_v<Dest> || std::is_unsigned_v<Src>,
"Cannot cast signed to unsigned");
constexpr unsigned not_same =
std::is_signed_v<Dest> != std::is_signed_v<Src>;
static_assert(
sizeof(Dest) >= sizeof(Src) + not_same,
"Destination is too small to hold all values of source");
return static_cast<Dest>(s);
}
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_enum_v<Dest> && std::is_integral_v<Src>, Dest>
safe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_enum_v<Dest> && std::is_integral_v<Src>, Dest>
safe_cast(Src s) noexcept
{
return static_cast<Dest>(safe_cast<std::underlying_type_t<Dest>>(s));
}
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_enum_v<Src>, Dest>
safe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_integral_v<Dest> && std::is_enum_v<Src>, Dest>
safe_cast(Src s) noexcept
{
return safe_cast<Dest>(static_cast<std::underlying_type_t<Src>>(s));
}
@@ -43,8 +52,9 @@ safe_cast(Src s) noexcept
// underlying types become safe, it can be converted to a safe_cast.
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
unsafe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
unsafe_cast(Src s) noexcept
{
static_assert(
!SafeToCast<Src, Dest>,
@@ -54,15 +64,17 @@ unsafe_cast(Src s) noexcept
}
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_enum_v<Dest> && std::is_integral_v<Src>, Dest>
unsafe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_enum_v<Dest> && std::is_integral_v<Src>, Dest>
unsafe_cast(Src s) noexcept
{
return static_cast<Dest>(unsafe_cast<std::underlying_type_t<Dest>>(s));
}
template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_enum_v<Src>, Dest>
unsafe_cast(Src s) noexcept
inline constexpr std::
enable_if_t<std::is_integral_v<Dest> && std::is_enum_v<Src>, Dest>
unsafe_cast(Src s) noexcept
{
return unsafe_cast<Dest>(static_cast<std::underlying_type_t<Src>>(s));
}

View File

@@ -35,8 +35,10 @@ public:
}
scope_exit(scope_exit&& rhs) noexcept(
std::is_nothrow_move_constructible_v<EF> || std::is_nothrow_copy_constructible_v<EF>)
: exit_function_{std::forward<EF>(rhs.exit_function_)}, execute_on_destruction_{rhs.execute_on_destruction_}
std::is_nothrow_move_constructible_v<EF> ||
std::is_nothrow_copy_constructible_v<EF>)
: exit_function_{std::forward<EF>(rhs.exit_function_)}
, execute_on_destruction_{rhs.execute_on_destruction_}
{
rhs.release();
}
@@ -47,11 +49,14 @@ public:
template <class EFP>
explicit scope_exit(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_exit> && std::is_constructible_v<EF, EFP>>* =
0) noexcept
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_exit> &&
std::is_constructible_v<EF, EFP>>* = 0) noexcept
: exit_function_{std::forward<EFP>(f)}
{
static_assert(std::is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
static_assert(
std::
is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
}
void
@@ -74,12 +79,14 @@ class scope_fail
public:
~scope_fail()
{
if (execute_on_destruction_ && std::uncaught_exceptions() > uncaught_on_creation_)
if (execute_on_destruction_ &&
std::uncaught_exceptions() > uncaught_on_creation_)
exit_function_();
}
scope_fail(scope_fail&& rhs) noexcept(
std::is_nothrow_move_constructible_v<EF> || std::is_nothrow_copy_constructible_v<EF>)
std::is_nothrow_move_constructible_v<EF> ||
std::is_nothrow_copy_constructible_v<EF>)
: exit_function_{std::forward<EF>(rhs.exit_function_)}
, execute_on_destruction_{rhs.execute_on_destruction_}
, uncaught_on_creation_{rhs.uncaught_on_creation_}
@@ -93,11 +100,14 @@ public:
template <class EFP>
explicit scope_fail(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_fail> && std::is_constructible_v<EF, EFP>>* =
0) noexcept
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_fail> &&
std::is_constructible_v<EF, EFP>>* = 0) noexcept
: exit_function_{std::forward<EFP>(f)}
{
static_assert(std::is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
static_assert(
std::
is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
}
void
@@ -120,12 +130,14 @@ class scope_success
public:
~scope_success() noexcept(noexcept(exit_function_()))
{
if (execute_on_destruction_ && std::uncaught_exceptions() <= uncaught_on_creation_)
if (execute_on_destruction_ &&
std::uncaught_exceptions() <= uncaught_on_creation_)
exit_function_();
}
scope_success(scope_success&& rhs) noexcept(
std::is_nothrow_move_constructible_v<EF> || std::is_nothrow_copy_constructible_v<EF>)
std::is_nothrow_move_constructible_v<EF> ||
std::is_nothrow_copy_constructible_v<EF>)
: exit_function_{std::forward<EF>(rhs.exit_function_)}
, execute_on_destruction_{rhs.execute_on_destruction_}
, uncaught_on_creation_{rhs.uncaught_on_creation_}
@@ -139,7 +151,9 @@ public:
template <class EFP>
explicit scope_success(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_success> && std::is_constructible_v<EF, EFP>>* =
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_success> &&
std::is_constructible_v<EF, EFP>>* =
0) noexcept(std::is_nothrow_constructible_v<EF, EFP> || std::is_nothrow_constructible_v<EF, EFP&>)
: exit_function_{std::forward<EFP>(f)}
{
@@ -198,9 +212,12 @@ class scope_unlock
std::unique_lock<Mutex>* plock;
public:
explicit scope_unlock(std::unique_lock<Mutex>& lock) noexcept(true) : plock(&lock)
explicit scope_unlock(std::unique_lock<Mutex>& lock) noexcept(true)
: plock(&lock)
{
XRPL_ASSERT(plock->owns_lock(), "xrpl::scope_unlock::scope_unlock : mutex must be locked");
XRPL_ASSERT(
plock->owns_lock(),
"xrpl::scope_unlock::scope_unlock : mutex must be locked");
plock->unlock();
}

View File

@@ -99,9 +99,12 @@ public:
@note For performance reasons, you should strive to have `lock` be
on a cacheline by itself.
*/
packed_spinlock(std::atomic<T>& lock, int index) : bits_(lock), mask_(static_cast<T>(1) << index)
packed_spinlock(std::atomic<T>& lock, int index)
: bits_(lock), mask_(static_cast<T>(1) << index)
{
XRPL_ASSERT(index >= 0 && (mask_ != 0), "xrpl::packed_spinlock::packed_spinlock : valid index and mask");
XRPL_ASSERT(
index >= 0 && (mask_ != 0),
"xrpl::packed_spinlock::packed_spinlock : valid index and mask");
}
[[nodiscard]] bool
@@ -174,7 +177,10 @@ public:
T expected = 0;
return lock_.compare_exchange_weak(
expected, std::numeric_limits<T>::max(), std::memory_order_acquire, std::memory_order_relaxed);
expected,
std::numeric_limits<T>::max(),
std::memory_order_acquire,
std::memory_order_relaxed);
}
void

View File

@@ -10,7 +10,9 @@ std::string
strHex(FwdIt begin, FwdIt end)
{
static_assert(
std::is_convertible<typename std::iterator_traits<FwdIt>::iterator_category, std::forward_iterator_tag>::value,
std::is_convertible<
typename std::iterator_traits<FwdIt>::iterator_category,
std::forward_iterator_tag>::value,
"FwdIt must be a forward iterator");
std::string result;
result.reserve(2 * std::distance(begin, end));

View File

@@ -30,7 +30,9 @@ class tagged_integer
tagged_integer<Int, Tag>,
boost::bitwise<
tagged_integer<Int, Tag>,
boost::unit_steppable<tagged_integer<Int, Tag>, boost::shiftable<tagged_integer<Int, Tag>>>>>>
boost::unit_steppable<
tagged_integer<Int, Tag>,
boost::shiftable<tagged_integer<Int, Tag>>>>>>
{
private:
Int m_value;
@@ -43,10 +45,14 @@ public:
template <
class OtherInt,
class = typename std::enable_if<std::is_integral<OtherInt>::value && sizeof(OtherInt) <= sizeof(Int)>::type>
class = typename std::enable_if<
std::is_integral<OtherInt>::value &&
sizeof(OtherInt) <= sizeof(Int)>::type>
explicit constexpr tagged_integer(OtherInt value) noexcept : m_value(value)
{
static_assert(sizeof(tagged_integer) == sizeof(Int), "tagged_integer is adding padding");
static_assert(
sizeof(tagged_integer) == sizeof(Int),
"tagged_integer is adding padding");
}
bool

View File

@@ -31,7 +31,11 @@ private:
public:
io_latency_probe(duration const& period, boost::asio::io_context& ios)
: m_count(1), m_period(period), m_ios(ios), m_timer(m_ios), m_cancel(false)
: m_count(1)
, m_period(period)
, m_ios(ios)
, m_timer(m_ios)
, m_cancel(false)
{
}
@@ -86,7 +90,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
boost::asio::post(m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), false, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
}
/** Initiate continuous i/o latency sampling.
@@ -100,7 +107,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
boost::asio::post(m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), true, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
}
private:
@@ -140,8 +150,15 @@ private:
bool m_repeat;
io_latency_probe* m_probe;
sample_op(Handler const& handler, time_point const& start, bool repeat, io_latency_probe* probe)
: m_handler(handler), m_start(start), m_repeat(repeat), m_probe(probe)
sample_op(
Handler const& handler,
time_point const& start,
bool repeat,
io_latency_probe* probe)
: m_handler(handler)
, m_start(start)
, m_repeat(repeat)
, m_probe(probe)
{
XRPL_ASSERT(
m_probe,
@@ -196,19 +213,23 @@ private:
// Calculate when we want to sample again, and
// adjust for the expected latency.
//
typename Clock::time_point const when(now + m_probe->m_period - 2 * elapsed);
typename Clock::time_point const when(
now + m_probe->m_period - 2 * elapsed);
if (when <= now)
{
// The latency is too high to maintain the desired
// period so don't bother with a timer.
//
boost::asio::post(m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
else
{
m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait(sample_op<Handler>(m_handler, now, m_repeat, m_probe));
m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
}
}
@@ -219,7 +240,9 @@ private:
if (!m_probe)
return;
typename Clock::time_point const now(Clock::now());
boost::asio::post(m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
};
};

View File

@@ -28,7 +28,8 @@ private:
time_point now_;
public:
explicit manual_clock(time_point const& now = time_point(duration(0))) : now_(now)
explicit manual_clock(time_point const& now = time_point(duration(0)))
: now_(now)
{
}
@@ -42,7 +43,9 @@ public:
void
set(time_point const& when)
{
XRPL_ASSERT(!Clock::is_steady || when >= now_, "beast::manual_clock::set(time_point) : forward input");
XRPL_ASSERT(
!Clock::is_steady || when >= now_,
"beast::manual_clock::set(time_point) : forward input");
now_ = when;
}
@@ -60,7 +63,8 @@ public:
advance(std::chrono::duration<Rep, Period> const& elapsed)
{
XRPL_ASSERT(
!Clock::is_steady || (now_ + elapsed) >= now_, "beast::manual_clock::advance(duration) : forward input");
!Clock::is_steady || (now_ + elapsed) >= now_,
"beast::manual_clock::advance(duration) : forward input");
now_ += elapsed;
}

View File

@@ -9,12 +9,14 @@ namespace beast {
/** Expire aged container items past the specified age. */
template <class AgedContainer, class Rep, class Period>
typename std::enable_if<is_aged_container<AgedContainer>::value, std::size_t>::type
expire(AgedContainer& c, std::chrono::duration<Rep, Period> const& age)
typename std::enable_if<is_aged_container<AgedContainer>::value, std::size_t>::
type
expire(AgedContainer& c, std::chrono::duration<Rep, Period> const& age)
{
std::size_t n(0);
auto const expired(c.clock().now() - age);
for (auto iter(c.chronological.cbegin()); iter != c.chronological.cend() && iter.when() <= expired;)
for (auto iter(c.chronological.cbegin());
iter != c.chronological.cend() && iter.when() <= expired;)
{
iter = c.erase(iter);
++n;

View File

@@ -14,6 +14,7 @@ template <
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_map = detail::aged_ordered_container<false, true, Key, T, Clock, Compare, Allocator>;
using aged_map = detail::
aged_ordered_container<false, true, Key, T, Clock, Compare, Allocator>;
}

View File

@@ -14,6 +14,7 @@ template <
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_multimap = detail::aged_ordered_container<true, true, Key, T, Clock, Compare, Allocator>;
using aged_multimap = detail::
aged_ordered_container<true, true, Key, T, Clock, Compare, Allocator>;
}

View File

@@ -13,6 +13,7 @@ template <
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<Key>>
using aged_multiset = detail::aged_ordered_container<true, false, Key, void, Clock, Compare, Allocator>;
using aged_multiset = detail::
aged_ordered_container<true, false, Key, void, Clock, Compare, Allocator>;
}

View File

@@ -13,6 +13,7 @@ template <
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<Key>>
using aged_set = detail::aged_ordered_container<false, false, Key, void, Clock, Compare, Allocator>;
using aged_set = detail::
aged_ordered_container<false, false, Key, void, Clock, Compare, Allocator>;
}

View File

@@ -15,6 +15,14 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_unordered_map = detail::aged_unordered_container<false, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_map = detail::aged_unordered_container<
false,
true,
Key,
T,
Clock,
Hash,
KeyEqual,
Allocator>;
}

View File

@@ -15,6 +15,14 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_unordered_multimap = detail::aged_unordered_container<true, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_multimap = detail::aged_unordered_container<
true,
true,
Key,
T,
Clock,
Hash,
KeyEqual,
Allocator>;
}

View File

@@ -14,7 +14,14 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<Key>>
using aged_unordered_multiset =
detail::aged_unordered_container<true, false, Key, void, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_multiset = detail::aged_unordered_container<
true,
false,
Key,
void,
Clock,
Hash,
KeyEqual,
Allocator>;
}

View File

@@ -14,6 +14,14 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<Key>>
using aged_unordered_set = detail::aged_unordered_container<false, false, Key, void, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_set = detail::aged_unordered_container<
false,
false,
Key,
void,
Clock,
Hash,
KeyEqual,
Allocator>;
}

View File

@@ -15,12 +15,14 @@ template <bool is_const, class Iterator>
class aged_container_iterator
{
public:
using iterator_category = typename std::iterator_traits<Iterator>::iterator_category;
using iterator_category =
typename std::iterator_traits<Iterator>::iterator_category;
using value_type = typename std::conditional<
is_const,
typename Iterator::value_type::stashed::value_type const,
typename Iterator::value_type::stashed::value_type>::type;
using difference_type = typename std::iterator_traits<Iterator>::difference_type;
using difference_type =
typename std::iterator_traits<Iterator>::difference_type;
using pointer = value_type*;
using reference = value_type&;
using time_point = typename Iterator::value_type::stashed::time_point;
@@ -35,22 +37,31 @@ public:
class = typename std::enable_if<
(other_is_const == false || is_const == true) &&
std::is_same<Iterator, OtherIterator>::value == false>::type>
explicit aged_container_iterator(aged_container_iterator<other_is_const, OtherIterator> const& other)
explicit aged_container_iterator(
aged_container_iterator<other_is_const, OtherIterator> const& other)
: m_iter(other.m_iter)
{
}
// Disable constructing a const_iterator from a non-const_iterator.
template <bool other_is_const, class = typename std::enable_if<other_is_const == false || is_const == true>::type>
aged_container_iterator(aged_container_iterator<other_is_const, Iterator> const& other) : m_iter(other.m_iter)
template <
bool other_is_const,
class = typename std::enable_if<
other_is_const == false || is_const == true>::type>
aged_container_iterator(
aged_container_iterator<other_is_const, Iterator> const& other)
: m_iter(other.m_iter)
{
}
// Disable assigning a const_iterator to a non-const iterator
template <bool other_is_const, class OtherIterator>
auto
operator=(aged_container_iterator<other_is_const, OtherIterator> const& other) ->
typename std::enable_if<other_is_const == false || is_const == true, aged_container_iterator&>::type
operator=(
aged_container_iterator<other_is_const, OtherIterator> const& other) ->
typename std::enable_if<
other_is_const == false || is_const == true,
aged_container_iterator&>::type
{
m_iter = other.m_iter;
return *this;
@@ -58,14 +69,16 @@ public:
template <bool other_is_const, class OtherIterator>
bool
operator==(aged_container_iterator<other_is_const, OtherIterator> const& other) const
operator==(aged_container_iterator<other_is_const, OtherIterator> const&
other) const
{
return m_iter == other.m_iter;
}
template <bool other_is_const, class OtherIterator>
bool
operator!=(aged_container_iterator<other_is_const, OtherIterator> const& other) const
operator!=(aged_container_iterator<other_is_const, OtherIterator> const&
other) const
{
return m_iter != other.m_iter;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -16,11 +16,16 @@ namespace detail {
template <class T>
struct is_empty_base_optimization_derived
: std::integral_constant<bool, std::is_empty<T>::value && !boost::is_final<T>::value>
: std::integral_constant<
bool,
std::is_empty<T>::value && !boost::is_final<T>::value>
{
};
template <class T, int UniqueID = 0, bool isDerived = is_empty_base_optimization_derived<T>::value>
template <
class T,
int UniqueID = 0,
bool isDerived = is_empty_base_optimization_derived<T>::value>
class empty_base_optimization : private T
{
public:

View File

@@ -34,7 +34,9 @@ template <std::size_t N>
void
setCurrentThreadName(char const (&newThreadName)[N])
{
static_assert(N <= maxThreadNameLength + 1, "Thread name cannot exceed 15 characters");
static_assert(
N <= maxThreadNameLength + 1,
"Thread name cannot exceed 15 characters");
setCurrentThreadName(std::string_view(newThreadName, N - 1));
}

View File

@@ -39,7 +39,8 @@ struct LexicalCast<std::string, In>
std::enable_if_t<std::is_enum_v<Enumeration>, bool>
operator()(std::string& out, Enumeration in)
{
out = std::to_string(static_cast<std::underlying_type_t<Enumeration>>(in));
out = std::to_string(
static_cast<std::underlying_type_t<Enumeration>>(in));
return true;
}
};
@@ -50,10 +51,14 @@ struct LexicalCast<Out, std::string_view>
{
explicit LexicalCast() = default;
static_assert(std::is_integral_v<Out>, "beast::LexicalCast can only be used with integral types");
static_assert(
std::is_integral_v<Out>,
"beast::LexicalCast can only be used with integral types");
template <class Integral = Out>
std::enable_if_t<std::is_integral_v<Integral> && !std::is_same_v<Integral, bool>, bool>
std::enable_if_t<
std::is_integral_v<Integral> && !std::is_same_v<Integral, bool>,
bool>
operator()(Integral& out, std::string_view in) const
{
auto first = in.data();
@@ -73,9 +78,10 @@ struct LexicalCast<Out, std::string_view>
std::string result;
// Convert the input to lowercase
std::transform(in.begin(), in.end(), std::back_inserter(result), [](auto c) {
return std::tolower(static_cast<unsigned char>(c));
});
std::transform(
in.begin(), in.end(), std::back_inserter(result), [](auto c) {
return std::tolower(static_cast<unsigned char>(c));
});
if (result == "1" || result == "true")
{
@@ -133,7 +139,8 @@ struct LexicalCast<Out, char const*>
bool
operator()(Out& out, char const* in) const
{
XRPL_ASSERT(in, "beast::detail::LexicalCast(char const*) : non-null input");
XRPL_ASSERT(
in, "beast::detail::LexicalCast(char const*) : non-null input");
return LexicalCast<Out, std::string_view>()(out, in);
}
};

View File

@@ -54,7 +54,8 @@ class ListIterator
{
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename beast::detail::CopyConst<N, typename N::value_type>::type;
using value_type =
typename beast::detail::CopyConst<N, typename N::value_type>::type;
using difference_type = std::ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;

View File

@@ -13,16 +13,21 @@ class LockFreeStackIterator
{
protected:
using Node = typename Container::Node;
using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
using NodePtr =
typename std::conditional<IsConst, Node const*, Node*>::type;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = typename Container::value_type;
using difference_type = typename Container::difference_type;
using pointer =
typename std::conditional<IsConst, typename Container::const_pointer, typename Container::pointer>::type;
using reference =
typename std::conditional<IsConst, typename Container::const_reference, typename Container::reference>::type;
using pointer = typename std::conditional<
IsConst,
typename Container::const_pointer,
typename Container::pointer>::type;
using reference = typename std::conditional<
IsConst,
typename Container::const_reference,
typename Container::reference>::type;
LockFreeStackIterator() : m_node()
{
@@ -33,7 +38,9 @@ public:
}
template <bool OtherIsConst>
explicit LockFreeStackIterator(LockFreeStackIterator<Container, OtherIsConst> const& other) : m_node(other.m_node)
explicit LockFreeStackIterator(
LockFreeStackIterator<Container, OtherIsConst> const& other)
: m_node(other.m_node)
{
}
@@ -152,7 +159,8 @@ public:
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using iterator = LockFreeStackIterator<LockFreeStack<Element, Tag>, false>;
using const_iterator = LockFreeStackIterator<LockFreeStack<Element, Tag>, true>;
using const_iterator =
LockFreeStackIterator<LockFreeStack<Element, Tag>, true>;
LockFreeStack() : m_end(nullptr), m_head(&m_end)
{
@@ -190,7 +198,11 @@ public:
{
first = (old_head == &m_end);
node->m_next = old_head;
} while (!m_head.compare_exchange_strong(old_head, node, std::memory_order_release, std::memory_order_relaxed));
} while (!m_head.compare_exchange_strong(
old_head,
node,
std::memory_order_release,
std::memory_order_relaxed));
return first;
}
@@ -213,7 +225,11 @@ public:
if (node == &m_end)
return nullptr;
new_head = node->m_next.load();
} while (!m_head.compare_exchange_strong(node, new_head, std::memory_order_release, std::memory_order_relaxed));
} while (!m_head.compare_exchange_strong(
node,
new_head,
std::memory_order_release,
std::memory_order_relaxed));
return static_cast<Element*>(node);
}

View File

@@ -26,7 +26,8 @@ template <class T>
inline void
reverse_bytes(T& t)
{
unsigned char* bytes = static_cast<unsigned char*>(std::memmove(std::addressof(t), std::addressof(t), sizeof(T)));
unsigned char* bytes = static_cast<unsigned char*>(
std::memmove(std::addressof(t), std::addressof(t), sizeof(T)));
for (unsigned i = 0; i < sizeof(T) / 2; ++i)
std::swap(bytes[i], bytes[sizeof(T) - 1 - i]);
}
@@ -51,7 +52,11 @@ template <class T, class Hasher>
inline void
maybe_reverse_bytes(T& t, Hasher&)
{
maybe_reverse_bytes(t, std::integral_constant<bool, Hasher::endian != boost::endian::order::native>{});
maybe_reverse_bytes(
t,
std::integral_constant<
bool,
Hasher::endian != boost::endian::order::native>{});
}
} // namespace detail
@@ -65,8 +70,10 @@ maybe_reverse_bytes(T& t, Hasher&)
template <class T>
struct is_uniquely_represented
: public std::
integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value || std::is_pointer<T>::value>
: public std::integral_constant<
bool,
std::is_integral<T>::value || std::is_enum<T>::value ||
std::is_pointer<T>::value>
{
explicit is_uniquely_represented() = default;
};
@@ -84,7 +91,8 @@ struct is_uniquely_represented<T volatile> : public is_uniquely_represented<T>
};
template <class T>
struct is_uniquely_represented<T const volatile> : public is_uniquely_represented<T>
struct is_uniquely_represented<T const volatile>
: public is_uniquely_represented<T>
{
explicit is_uniquely_represented() = default;
};
@@ -95,7 +103,8 @@ template <class T, class U>
struct is_uniquely_represented<std::pair<T, U>>
: public std::integral_constant<
bool,
is_uniquely_represented<T>::value && is_uniquely_represented<U>::value &&
is_uniquely_represented<T>::value &&
is_uniquely_represented<U>::value &&
sizeof(T) + sizeof(U) == sizeof(std::pair<T, U>)>
{
explicit is_uniquely_represented() = default;
@@ -107,7 +116,8 @@ template <class... T>
struct is_uniquely_represented<std::tuple<T...>>
: public std::integral_constant<
bool,
std::conjunction_v<is_uniquely_represented<T>...> && sizeof(std::tuple<T...>) == (sizeof(T) + ...)>
std::conjunction_v<is_uniquely_represented<T>...> &&
sizeof(std::tuple<T...>) == (sizeof(T) + ...)>
{
explicit is_uniquely_represented() = default;
};
@@ -124,8 +134,10 @@ struct is_uniquely_represented<T[N]> : public is_uniquely_represented<T>
template <class T, std::size_t N>
struct is_uniquely_represented<std::array<T, N>>
: public std::
integral_constant<bool, is_uniquely_represented<T>::value && sizeof(T) * N == sizeof(std::array<T, N>)>
: public std::integral_constant<
bool,
is_uniquely_represented<T>::value &&
sizeof(T) * N == sizeof(std::array<T, N>)>
{
explicit is_uniquely_represented() = default;
};
@@ -145,10 +157,12 @@ struct is_uniquely_represented<std::array<T, N>>
*/
/** @{ */
template <class T, class HashAlgorithm>
struct is_contiguously_hashable : public std::integral_constant<
bool,
is_uniquely_represented<T>::value &&
(sizeof(T) == 1 || HashAlgorithm::endian == boost::endian::order::native)>
struct is_contiguously_hashable
: public std::integral_constant<
bool,
is_uniquely_represented<T>::value &&
(sizeof(T) == 1 ||
HashAlgorithm::endian == boost::endian::order::native)>
{
explicit is_contiguously_hashable() = default;
};
@@ -158,7 +172,8 @@ struct is_contiguously_hashable<T[N], HashAlgorithm>
: public std::integral_constant<
bool,
is_uniquely_represented<T[N]>::value &&
(sizeof(T) == 1 || HashAlgorithm::endian == boost::endian::order::native)>
(sizeof(T) == 1 ||
HashAlgorithm::endian == boost::endian::order::native)>
{
explicit is_contiguously_hashable() = default;
};
@@ -203,7 +218,8 @@ hash_append(Hasher& h, T const& t) noexcept
template <class Hasher, class T>
inline std::enable_if_t<
!is_contiguously_hashable<T, Hasher>::value &&
(std::is_integral<T>::value || std::is_pointer<T>::value || std::is_enum<T>::value)>
(std::is_integral<T>::value || std::is_pointer<T>::value ||
std::is_enum<T>::value)>
hash_append(Hasher& h, T t) noexcept
{
detail::reverse_bytes(t);
@@ -237,11 +253,15 @@ hash_append(Hasher& h, T (&a)[N]) noexcept;
template <class Hasher, class CharT, class Traits, class Alloc>
std::enable_if_t<!is_contiguously_hashable<CharT, Hasher>::value>
hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcept;
hash_append(
Hasher& h,
std::basic_string<CharT, Traits, Alloc> const& s) noexcept;
template <class Hasher, class CharT, class Traits, class Alloc>
std::enable_if_t<is_contiguously_hashable<CharT, Hasher>::value>
hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcept;
hash_append(
Hasher& h,
std::basic_string<CharT, Traits, Alloc> const& s) noexcept;
template <class Hasher, class T, class U>
std::enable_if_t<!is_contiguously_hashable<std::pair<T, U>, Hasher>::value>
@@ -273,10 +293,14 @@ hash_append(Hasher& h, std::unordered_set<Key, Hash, Pred, Alloc> const& s);
template <class Hasher, class Key, class Compare, class Alloc>
std::enable_if_t<!is_contiguously_hashable<Key, Hasher>::value>
hash_append(Hasher& h, boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept;
hash_append(
Hasher& h,
boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept;
template <class Hasher, class Key, class Compare, class Alloc>
std::enable_if_t<is_contiguously_hashable<Key, Hasher>::value>
hash_append(Hasher& h, boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept;
hash_append(
Hasher& h,
boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept;
template <class Hasher, class T0, class T1, class... T>
void
hash_append(Hasher& h, T0 const& t0, T1 const& t1, T const&... t) noexcept;
@@ -295,7 +319,9 @@ hash_append(Hasher& h, T (&a)[N]) noexcept
template <class Hasher, class CharT, class Traits, class Alloc>
inline std::enable_if_t<!is_contiguously_hashable<CharT, Hasher>::value>
hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcept
hash_append(
Hasher& h,
std::basic_string<CharT, Traits, Alloc> const& s) noexcept
{
for (auto c : s)
hash_append(h, c);
@@ -304,7 +330,9 @@ hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcep
template <class Hasher, class CharT, class Traits, class Alloc>
inline std::enable_if_t<is_contiguously_hashable<CharT, Hasher>::value>
hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcept
hash_append(
Hasher& h,
std::basic_string<CharT, Traits, Alloc> const& s) noexcept
{
h(s.data(), s.size() * sizeof(CharT));
hash_append(h, s.size());
@@ -313,7 +341,8 @@ hash_append(Hasher& h, std::basic_string<CharT, Traits, Alloc> const& s) noexcep
// pair
template <class Hasher, class T, class U>
inline std::enable_if_t<!is_contiguously_hashable<std::pair<T, U>, Hasher>::value>
inline std::enable_if_t<
!is_contiguously_hashable<std::pair<T, U>, Hasher>::value>
hash_append(Hasher& h, std::pair<T, U> const& p) noexcept
{
hash_append(h, p.first, p.second);
@@ -350,14 +379,18 @@ hash_append(Hasher& h, std::array<T, N> const& a) noexcept
template <class Hasher, class Key, class Compare, class Alloc>
std::enable_if_t<!is_contiguously_hashable<Key, Hasher>::value>
hash_append(Hasher& h, boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept
hash_append(
Hasher& h,
boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept
{
for (auto const& t : v)
hash_append(h, t);
}
template <class Hasher, class Key, class Compare, class Alloc>
std::enable_if_t<is_contiguously_hashable<Key, Hasher>::value>
hash_append(Hasher& h, boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept
hash_append(
Hasher& h,
boost::container::flat_set<Key, Compare, Alloc> const& v) noexcept
{
h(&(v.begin()), v.size() * sizeof(Key));
}
@@ -380,7 +413,10 @@ hash_one(Hasher& h, T const& t) noexcept
template <class Hasher, class... T, std::size_t... I>
inline void
tuple_hash(Hasher& h, std::tuple<T...> const& t, std::index_sequence<I...>) noexcept
tuple_hash(
Hasher& h,
std::tuple<T...> const& t,
std::index_sequence<I...>) noexcept
{
for_each_item(hash_one(h, std::get<I>(t))...);
}
@@ -388,7 +424,8 @@ tuple_hash(Hasher& h, std::tuple<T...> const& t, std::index_sequence<I...>) noex
} // namespace detail
template <class Hasher, class... T>
inline std::enable_if_t<!is_contiguously_hashable<std::tuple<T...>, Hasher>::value>
inline std::enable_if_t<
!is_contiguously_hashable<std::tuple<T...>, Hasher>::value>
hash_append(Hasher& h, std::tuple<T...> const& t) noexcept
{
detail::tuple_hash(h, t, std::index_sequence_for<T...>{});
@@ -414,7 +451,9 @@ hash_append(Hasher& h, std::chrono::duration<Rep, Period> const& d) noexcept
template <class Hasher, class Clock, class Duration>
inline void
hash_append(Hasher& h, std::chrono::time_point<Clock, Duration> const& tp) noexcept
hash_append(
Hasher& h,
std::chrono::time_point<Clock, Duration> const& tp) noexcept
{
hash_append(h, tp.time_since_epoch());
}

View File

@@ -48,7 +48,8 @@ private:
{
std::memcpy(writeBuffer_.data(), data, len);
writeBuffer_ = writeBuffer_.subspan(len);
readBuffer_ = std::span{std::begin(buffer_), buffer_.size() - writeBuffer_.size()};
readBuffer_ = std::span{
std::begin(buffer_), buffer_.size() - writeBuffer_.size()};
}
}
@@ -96,7 +97,8 @@ private:
{
if (seed_.has_value())
{
return XXH3_64bits_withSeed(readBuffer_.data(), readBuffer_.size(), *seed_);
return XXH3_64bits_withSeed(
readBuffer_.data(), readBuffer_.size(), *seed_);
}
else
{
@@ -125,13 +127,17 @@ public:
}
}
template <class Seed, std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
template <
class Seed,
std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
explicit xxhasher(Seed seed) : seed_(seed)
{
resetBuffers();
}
template <class Seed, std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
template <
class Seed,
std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
xxhasher(Seed seed, Seed) : seed_(seed)
{
resetBuffers();

View File

@@ -22,7 +22,9 @@ public:
@param journal Destination for logging output.
*/
static std::shared_ptr<StatsDCollector>
New(IP::Endpoint const& address, std::string const& prefix, Journal journal);
New(IP::Endpoint const& address,
std::string const& prefix,
Journal journal);
};
} // namespace insight

View File

@@ -25,7 +25,8 @@ struct ci_equal_pred
operator()(char c1, char c2)
{
// VFALCO TODO Use a table lookup here
return std::tolower(static_cast<unsigned char>(c1)) == std::tolower(static_cast<unsigned char>(c2));
return std::tolower(static_cast<unsigned char>(c1)) ==
std::tolower(static_cast<unsigned char>(c2));
}
};
@@ -95,7 +96,8 @@ trim_right(String const& s)
*/
template <
class FwdIt,
class Result = std::vector<std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>,
class Result = std::vector<
std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>,
class Char>
Result
split(FwdIt first, FwdIt last, Char delim)
@@ -169,7 +171,10 @@ split(FwdIt first, FwdIt last, Char delim)
return result;
}
template <class FwdIt, class Result = std::vector<std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>>
template <
class FwdIt,
class Result = std::vector<
std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>>
Result
split_commas(FwdIt first, FwdIt last)
{
@@ -217,7 +222,8 @@ public:
bool
operator==(list_iterator const& other) const
{
return other.it_ == it_ && other.end_ == end_ && other.value_.size() == value_.size();
return other.it_ == it_ && other.end_ == end_ &&
other.value_.size() == value_.size();
}
bool
@@ -281,12 +287,14 @@ list_iterator::increment()
++it_;
if (it_ == end_)
{
value_ = boost::string_ref(&*start, std::distance(start, it_));
value_ = boost::string_ref(
&*start, std::distance(start, it_));
return;
}
if (*it_ == '"')
{
value_ = boost::string_ref(&*start, std::distance(start, it_));
value_ = boost::string_ref(
&*start, std::distance(start, it_));
++it_;
return;
}
@@ -312,7 +320,8 @@ list_iterator::increment()
++it_;
if (it_ == end_ || *it_ == ',' || is_lws(*it_))
{
value_ = boost::string_ref(&*start, std::distance(start, it_));
value_ =
boost::string_ref(&*start, std::distance(start, it_));
return;
}
}
@@ -334,7 +343,8 @@ inline boost::iterator_range<list_iterator>
make_list(boost::string_ref const& field)
{
return boost::iterator_range<list_iterator>{
list_iterator{field.begin(), field.end()}, list_iterator{field.end(), field.end()}};
list_iterator{field.begin(), field.end()},
list_iterator{field.end(), field.end()}};
}
/** Returns true if the specified token exists in the list.
@@ -356,8 +366,12 @@ bool
is_keep_alive(boost::beast::http::message<isRequest, Body, Fields> const& m)
{
if (m.version() <= 10)
return boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists("keep-alive");
return !boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists("close");
return boost::beast::http::token_list{
m[boost::beast::http::field::connection]}
.exists("keep-alive");
return !boost::beast::http::token_list{
m[boost::beast::http::field::connection]}
.exists("close");
}
} // namespace rfc2616

View File

@@ -30,7 +30,9 @@ protected:
boost::asio::io_context ios_;
private:
boost::optional<boost::asio::executor_work_guard<boost::asio::io_context::executor_type>> work_;
boost::optional<boost::asio::executor_work_guard<
boost::asio::io_context::executor_type>>
work_;
std::vector<std::thread> threads_;
std::mutex m_;
std::condition_variable cv_;
@@ -40,7 +42,8 @@ public:
/// The type of yield context passed to functions.
using yield_context = boost::asio::yield_context;
explicit enable_yield_to(std::size_t concurrency = 1) : work_(boost::asio::make_work_guard(ios_))
explicit enable_yield_to(std::size_t concurrency = 1)
: work_(boost::asio::make_work_guard(ios_))
{
threads_.reserve(concurrency);
while (concurrency--)

View File

@@ -22,7 +22,12 @@ global_suites()
template <class Suite>
struct insert_suite
{
insert_suite(char const* name, char const* module, char const* library, bool manual, int priority)
insert_suite(
char const* name,
char const* module,
char const* library,
bool manual,
int priority)
{
global_suites().insert<Suite>(name, module, library, manual, priority);
}

View File

@@ -52,7 +52,8 @@ public:
//------------------------------------------------------------------------------
template <class>
selector::selector(mode_t mode, std::string const& pattern) : mode_(mode), pat_(pattern)
selector::selector(mode_t mode, std::string const& pattern)
: mode_(mode), pat_(pattern)
{
if (mode_ == automatch && pattern.empty())
mode_ = all;

View File

@@ -139,7 +139,10 @@ reporter<_>::results::add(suite_results const& r)
if (elapsed >= std::chrono::seconds{1})
{
auto const iter = std::lower_bound(
top.begin(), top.end(), elapsed, [](run_time const& t1, typename clock_type::duration const& t2) {
top.begin(),
top.end(),
elapsed,
[](run_time const& t1, typename clock_type::duration const& t2) {
return t1.second > t2;
});
if (iter != top.end())
@@ -172,8 +175,10 @@ reporter<_>::~reporter()
os_ << std::setw(8) << fmtdur(i.second) << " " << i.first << '\n';
}
auto const elapsed = clock_type::now() - results_.start;
os_ << fmtdur(elapsed) << ", " << amount{results_.suites, "suite"} << ", " << amount{results_.cases, "case"} << ", "
<< amount{results_.total, "test"} << " total, " << amount{results_.failed, "failure"} << std::endl;
os_ << fmtdur(elapsed) << ", " << amount{results_.suites, "suite"} << ", "
<< amount{results_.cases, "case"} << ", "
<< amount{results_.total, "test"} << " total, "
<< amount{results_.failed, "failure"} << std::endl;
}
template <class _>
@@ -208,7 +213,9 @@ void
reporter<_>::on_case_begin(std::string const& name)
{
case_results_ = case_results(name);
os_ << suite_results_.name << (case_results_.name.empty() ? "" : (" " + case_results_.name)) << std::endl;
os_ << suite_results_.name
<< (case_results_.name.empty() ? "" : (" " + case_results_.name))
<< std::endl;
}
template <class _>
@@ -231,7 +238,8 @@ reporter<_>::on_fail(std::string const& reason)
{
++case_results_.failed;
++case_results_.total;
os_ << "#" << case_results_.total << " failed" << (reason.empty() ? "" : ": ") << reason << std::endl;
os_ << "#" << case_results_.total << " failed"
<< (reason.empty() ? "" : ": ") << reason << std::endl;
}
template <class _>

View File

@@ -23,7 +23,8 @@ public:
{
}
test(bool pass_, std::string const& reason_) : pass(pass_), reason(reason_)
test(bool pass_, std::string const& reason_)
: pass(pass_), reason(reason_)
{
}

View File

@@ -91,13 +91,17 @@ private:
}
};
template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
template <
class CharT,
class Traits = std::char_traits<CharT>,
class Allocator = std::allocator<CharT>>
class log_os : public std::basic_ostream<CharT, Traits>
{
log_buf<CharT, Traits, Allocator> buf_;
public:
explicit log_os(suite& self) : std::basic_ostream<CharT, Traits>(&buf_), buf_(self)
explicit log_os(suite& self)
: std::basic_ostream<CharT, Traits>(&buf_), buf_(self)
{
}
};
@@ -236,7 +240,11 @@ public:
template <class Condition, class String>
bool
expect(Condition const& shouldBeTrue, String const& reason, char const* file, int line);
expect(
Condition const& shouldBeTrue,
String const& reason,
char const* file,
int line);
/** @} */
//
@@ -340,7 +348,8 @@ public:
}
template <class T>
scoped_testcase(suite& self, std::stringstream& ss, T const& t) : suite_(self), ss_(ss)
scoped_testcase(suite& self, std::stringstream& ss, T const& t)
: suite_(self), ss_(ss)
{
ss_.clear();
ss_.str({});
@@ -413,7 +422,11 @@ suite::expect(Condition const& shouldBeTrue, String const& reason)
template <class Condition, class String>
bool
suite::expect(Condition const& shouldBeTrue, String const& reason, char const* file, int line)
suite::expect(
Condition const& shouldBeTrue,
String const& reason,
char const* file,
int line)
{
if (shouldBeTrue)
{
@@ -562,7 +575,8 @@ suite::run(runner& r)
If the condition is false, the file and line number are reported.
*/
#define BEAST_EXPECTS(cond, reason) ((cond) ? (pass(), true) : (fail((reason), __FILE__, __LINE__), false))
#define BEAST_EXPECTS(cond, reason) \
((cond) ? (pass(), true) : (fail((reason), __FILE__, __LINE__), false))
#endif
} // namespace unit_test
@@ -572,9 +586,11 @@ suite::run(runner& r)
// detail:
// This inserts the suite with the given manual flag
#define BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, manual, priority) \
static beast::unit_test::detail::insert_suite<Class##_test> Library##Module##Class##_test_instance( \
#Class, #Module, #Library, manual, priority)
#define BEAST_DEFINE_TESTSUITE_INSERT( \
Class, Module, Library, manual, priority) \
static beast::unit_test::detail::insert_suite<Class##_test> \
Library##Module##Class##_test_instance( \
#Class, #Module, #Library, manual, priority)
//------------------------------------------------------------------------------
@@ -629,7 +645,8 @@ suite::run(runner& r)
#else
#include <xrpl/beast/unit_test/global_suites.h>
#define BEAST_DEFINE_TESTSUITE(Class, Module, Library) BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, false, 0)
#define BEAST_DEFINE_TESTSUITE(Class, Module, Library) \
BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, false, 0)
#define BEAST_DEFINE_TESTSUITE_MANUAL(Class, Module, Library) \
BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, true, 0)
#define BEAST_DEFINE_TESTSUITE_PRIO(Class, Module, Library, Priority) \

View File

@@ -27,7 +27,13 @@ class suite_info
run_type run_;
public:
suite_info(std::string name, std::string module, std::string library, bool manual, int priority, run_type run)
suite_info(
std::string name,
std::string module,
std::string library,
bool manual,
int priority,
run_type run)
: name_(std::move(name))
, module_(std::move(module))
, library_(std::move(library))
@@ -81,8 +87,10 @@ public:
{
// we want higher priority suites sorted first, thus the negation
// of priority value here
return std::forward_as_tuple(-lhs.priority_, lhs.library_, lhs.module_, lhs.name_) <
std::forward_as_tuple(-rhs.priority_, rhs.library_, rhs.module_, rhs.name_);
return std::forward_as_tuple(
-lhs.priority_, lhs.library_, lhs.module_, lhs.name_) <
std::forward_as_tuple(
-rhs.priority_, rhs.library_, rhs.module_, rhs.name_);
}
};
@@ -91,10 +99,20 @@ public:
/// Convenience for producing suite_info for a given test type.
template <class Suite>
suite_info
make_suite_info(std::string name, std::string module, std::string library, bool manual, int priority)
make_suite_info(
std::string name,
std::string module,
std::string library,
bool manual,
int priority)
{
return suite_info(
std::move(name), std::move(module), std::move(library), manual, priority, [](runner& r) { Suite{}(r); });
std::move(name),
std::move(module),
std::move(library),
manual,
priority,
[](runner& r) { Suite{}(r); });
}
} // namespace unit_test

View File

@@ -32,14 +32,24 @@ public:
*/
template <class Suite>
void
insert(char const* name, char const* module, char const* library, bool manual, int priority);
insert(
char const* name,
char const* module,
char const* library,
bool manual,
int priority);
};
//------------------------------------------------------------------------------
template <class Suite>
void
suite_list::insert(char const* name, char const* module, char const* library, bool manual, int priority)
suite_list::insert(
char const* name,
char const* module,
char const* library,
bool manual,
int priority)
{
#ifndef NDEBUG
{
@@ -54,7 +64,8 @@ suite_list::insert(char const* name, char const* module, char const* library, bo
BOOST_ASSERT(result.second); // Duplicate type
}
#endif
cont().emplace(make_suite_info<Suite>(name, module, library, manual, priority));
cont().emplace(
make_suite_info<Suite>(name, module, library, manual, priority));
}
} // namespace unit_test

View File

@@ -44,7 +44,8 @@ public:
template <class F, class... Args>
explicit thread(suite& s, F&& f, Args&&... args) : s_(&s)
{
std::function<void(void)> b = std::bind(std::forward<F>(f), std::forward<Args>(args)...);
std::function<void(void)> b =
std::bind(std::forward<F>(f), std::forward<Args>(args)...);
t_ = std::thread(&thread::run, this, std::move(b));
}

View File

@@ -129,7 +129,8 @@ public:
class ScopedStream
{
public:
ScopedStream(ScopedStream const& other) : ScopedStream(other.m_sink, other.m_level)
ScopedStream(ScopedStream const& other)
: ScopedStream(other.m_sink, other.m_level)
{
}
@@ -165,12 +166,16 @@ public:
};
#ifndef __INTELLISENSE__
static_assert(std::is_default_constructible<ScopedStream>::value == false, "");
static_assert(
std::is_default_constructible<ScopedStream>::value == false,
"");
static_assert(std::is_copy_constructible<ScopedStream>::value == true, "");
static_assert(std::is_move_constructible<ScopedStream>::value == true, "");
static_assert(std::is_copy_assignable<ScopedStream>::value == false, "");
static_assert(std::is_move_assignable<ScopedStream>::value == false, "");
static_assert(std::is_nothrow_destructible<ScopedStream>::value == true, "");
static_assert(
std::is_nothrow_destructible<ScopedStream>::value == true,
"");
#endif
//--------------------------------------------------------------------------
@@ -180,7 +185,8 @@ public:
{
public:
/** Create a stream which produces no output. */
explicit Stream() : m_sink(getNullSink()), m_level(severities::kDisabled)
explicit Stream()
: m_sink(getNullSink()), m_level(severities::kDisabled)
{
}
@@ -190,7 +196,9 @@ public:
*/
Stream(Sink& sink, Severity level) : m_sink(sink), m_level(level)
{
XRPL_ASSERT(m_level < severities::kDisabled, "beast::Journal::Stream::Stream : maximum level");
XRPL_ASSERT(
m_level < severities::kDisabled,
"beast::Journal::Stream::Stream : maximum level");
}
/** Construct or copy another Stream. */
@@ -421,7 +429,8 @@ class basic_logstream : public std::basic_ostream<CharT, Traits>
detail::logstream_buf<CharT, Traits> buf_;
public:
explicit basic_logstream(beast::Journal::Stream const& strm) : std::basic_ostream<CharT, Traits>(&buf_), buf_(strm)
explicit basic_logstream(beast::Journal::Stream const& strm)
: std::basic_ostream<CharT, Traits>(&buf_), buf_(strm)
{
}
};

View File

@@ -17,12 +17,16 @@ private:
std::string prefix_;
public:
explicit WrappedSink(beast::Journal::Sink& sink, std::string const& prefix = "")
explicit WrappedSink(
beast::Journal::Sink& sink,
std::string const& prefix = "")
: Sink(sink), sink_(sink), prefix_(prefix)
{
}
explicit WrappedSink(beast::Journal const& journal, std::string const& prefix = "")
explicit WrappedSink(
beast::Journal const& journal,
std::string const& prefix = "")
: WrappedSink(journal.sink(), prefix)
{
}

View File

@@ -19,7 +19,8 @@
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE
#define XRPL_ASSERT_PARTS(cond, function, description, ...) XRPL_ASSERT(cond, function " : " description)
#define XRPL_ASSERT_PARTS(cond, function, description, ...) \
XRPL_ASSERT(cond, function " : " description)
// How to use the instrumentation macros:
//

View File

@@ -9,8 +9,10 @@ template <bool IsConst, class T>
struct maybe_const
{
explicit maybe_const() = default;
using type = typename std::
conditional<IsConst, typename std::remove_const<T>::type const, typename std::remove_const<T>::type>::type;
using type = typename std::conditional<
IsConst,
typename std::remove_const<T>::type const,
typename std::remove_const<T>::type>::type;
};
/** Alias for omitting `typename`. */

View File

@@ -35,7 +35,10 @@ rngfill(void* const buffer, std::size_t const bytes, Generator& g)
}
}
template <class Generator, std::size_t N, class = std::enable_if_t<N % sizeof(typename Generator::result_type) == 0>>
template <
class Generator,
std::size_t N,
class = std::enable_if_t<N % sizeof(typename Generator::result_type) == 0>>
void
rngfill(std::array<std::uint8_t, N>& a, Generator& g)
{

View File

@@ -1,9 +1,8 @@
#pragma once
#include <xrpld/conditions/detail/utils.h>
#include <xrpl/basics/Buffer.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/conditions/detail/utils.h>
#include <cstdint>
#include <set>
@@ -60,11 +59,13 @@ public:
/** For compound conditions, set of conditions includes */
std::set<Type> subtypes;
Condition(Type t, std::uint32_t c, Slice fp) : type(t), fingerprint(fp), cost(c)
Condition(Type t, std::uint32_t c, Slice fp)
: type(t), fingerprint(fp), cost(c)
{
}
Condition(Type t, std::uint32_t c, Buffer&& fp) : type(t), fingerprint(std::move(fp)), cost(c)
Condition(Type t, std::uint32_t c, Buffer&& fp)
: type(t), fingerprint(std::move(fp)), cost(c)
{
}
@@ -79,8 +80,8 @@ public:
inline bool
operator==(Condition const& lhs, Condition const& rhs)
{
return lhs.type == rhs.type && lhs.cost == rhs.cost && lhs.subtypes == rhs.subtypes &&
lhs.fingerprint == rhs.fingerprint;
return lhs.type == rhs.type && lhs.cost == rhs.cost &&
lhs.subtypes == rhs.subtypes && lhs.fingerprint == rhs.fingerprint;
}
inline bool

View File

@@ -1,9 +1,8 @@
#pragma once
#include <xrpld/conditions/Condition.h>
#include <xrpl/basics/Buffer.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/conditions/Condition.h>
namespace xrpl {
namespace cryptoconditions {
@@ -77,7 +76,8 @@ inline bool
operator==(Fulfillment const& lhs, Fulfillment const& rhs)
{
// FIXME: for compound conditions, need to also check subtypes
return lhs.type() == rhs.type() && lhs.cost() == rhs.cost() && lhs.fingerprint() == rhs.fingerprint();
return lhs.type() == rhs.type() && lhs.cost() == rhs.cost() &&
lhs.fingerprint() == rhs.fingerprint();
}
inline bool

View File

@@ -1,11 +1,10 @@
#pragma once
#include <xrpld/conditions/Condition.h>
#include <xrpld/conditions/Fulfillment.h>
#include <xrpld/conditions/detail/error.h>
#include <xrpl/basics/Buffer.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/conditions/Condition.h>
#include <xrpl/conditions/Fulfillment.h>
#include <xrpl/conditions/detail/error.h>
#include <xrpl/protocol/digest.h>
#include <memory>

View File

@@ -1,9 +1,8 @@
#pragma once
#include <xrpld/conditions/detail/error.h>
#include <xrpl/basics/Buffer.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/conditions/detail/error.h>
#include <boost/dynamic_bitset.hpp>

View File

@@ -75,18 +75,21 @@ private:
std::remove_reference_t<Closure> closure_;
static_assert(
std::is_same<decltype(closure_(std::declval<Args_t>()...)), Ret_t>::value,
std::is_same<decltype(closure_(std::declval<Args_t>()...)), Ret_t>::
value,
"Closure arguments don't match ClosureCounter Ret_t or Args_t");
public:
Substitute() = delete;
Substitute(Substitute const& rhs) : counter_(rhs.counter_), closure_(rhs.closure_)
Substitute(Substitute const& rhs)
: counter_(rhs.counter_), closure_(rhs.closure_)
{
++counter_;
}
Substitute(Substitute&& rhs) noexcept(std::is_nothrow_move_constructible<Closure>::value)
Substitute(Substitute&& rhs) noexcept(
std::is_nothrow_move_constructible<Closure>::value)
: counter_(rhs.counter_), closure_(std::move(rhs.closure_))
{
++counter_;
@@ -146,11 +149,13 @@ public:
waitForClosures_ = true;
if (closureCount_ > 0)
{
if (!allClosuresDoneCond_.wait_for(lock, wait, [this] { return closureCount_ == 0; }))
if (!allClosuresDoneCond_.wait_for(
lock, wait, [this] { return closureCount_ == 0; }))
{
if (auto stream = j.error())
stream << name << " waiting for ClosureCounter::join().";
allClosuresDoneCond_.wait(lock, [this] { return closureCount_ == 0; });
allClosuresDoneCond_.wait(
lock, [this] { return closureCount_ == 0; });
}
}
}

View File

@@ -5,13 +5,20 @@
namespace xrpl {
template <class F>
JobQueue::Coro::Coro(Coro_create_t, JobQueue& jq, JobType type, std::string const& name, F&& f)
JobQueue::Coro::Coro(
Coro_create_t,
JobQueue& jq,
JobType type,
std::string const& name,
F&& f)
: jq_(jq)
, type_(type)
, name_(name)
, running_(false)
, coro_(
[this, fn = std::forward<F>(f)](boost::coroutines::asymmetric_coroutine<void>::push_type& do_yield) {
[this, fn = std::forward<F>(f)](
boost::coroutines::asymmetric_coroutine<void>::push_type&
do_yield) {
yield_ = &do_yield;
yield();
fn(shared_from_this());
@@ -49,7 +56,8 @@ JobQueue::Coro::post()
}
// sp keeps 'this' alive
if (jq_.addJob(type_, name_, [this, sp = shared_from_this()]() { resume(); }))
if (jq_.addJob(
type_, name_, [this, sp = shared_from_this()]() { resume(); }))
{
return true;
}
@@ -75,7 +83,8 @@ JobQueue::Coro::resume()
auto saved = detail::getLocalValues().release();
detail::getLocalValues().reset(&lvs_);
std::lock_guard lock(mutex_);
XRPL_ASSERT(static_cast<bool>(coro_), "xrpl::JobQueue::Coro::resume : is runnable");
XRPL_ASSERT(
static_cast<bool>(coro_), "xrpl::JobQueue::Coro::resume : is runnable");
coro_();
detail::getLocalValues().release();
detail::getLocalValues().reset(saved);

View File

@@ -153,7 +153,9 @@ private:
last relay timestamp and return true.
*/
bool
shouldRelay(Stopwatch::time_point const& now, std::chrono::seconds relayTime)
shouldRelay(
Stopwatch::time_point const& now,
std::chrono::seconds relayTime)
{
if (relayed_ && *relayed_ + relayTime > now)
return false;
@@ -180,7 +182,8 @@ private:
};
public:
HashRouter(Setup const& setup, Stopwatch& clock) : setup_(setup), suppressionMap_(clock)
HashRouter(Setup const& setup, Stopwatch& clock)
: setup_(setup), suppressionMap_(clock)
{
}
@@ -206,11 +209,18 @@ public:
addSuppressionPeerWithStatus(uint256 const& key, PeerShortID peer);
bool
addSuppressionPeer(uint256 const& key, PeerShortID peer, HashRouterFlags& flags);
addSuppressionPeer(
uint256 const& key,
PeerShortID peer,
HashRouterFlags& flags);
// Add a peer suppression and return whether the entry should be processed
bool
shouldProcess(uint256 const& key, PeerShortID peer, HashRouterFlags& flags, std::chrono::seconds tx_interval);
shouldProcess(
uint256 const& key,
PeerShortID peer,
HashRouterFlags& flags,
std::chrono::seconds tx_interval);
/** Set the flags on a hash.
@@ -248,10 +258,12 @@ private:
Setup const setup_;
// Stores all suppressed hashes and their expiration time
beast::aged_unordered_map<uint256, Entry, Stopwatch::clock_type, hardened_hash<strong_hash>> suppressionMap_;
beast::aged_unordered_map<
uint256,
Entry,
Stopwatch::clock_type,
hardened_hash<strong_hash>>
suppressionMap_;
};
HashRouter::Setup
setup_HashRouter(Config const&);
} // namespace xrpl

View File

@@ -92,7 +92,11 @@ public:
Job(JobType type, std::uint64_t index);
// VFALCO TODO try to remove the dependency on LoadMonitor.
Job(JobType type, std::string const& name, std::uint64_t index, LoadMonitor& lm, std::function<void()> const& job);
Job(JobType type,
std::string const& name,
std::uint64_t index,
LoadMonitor& lm,
std::function<void()> const& job);
JobType
getType() const;

View File

@@ -140,11 +140,14 @@ public:
*/
template <
typename JobHandler,
typename = std::enable_if_t<std::is_same<decltype(std::declval<JobHandler&&>()()), void>::value>>
typename = std::enable_if_t<std::is_same<
decltype(std::declval<JobHandler&&>()()),
void>::value>>
bool
addJob(JobType type, std::string const& name, JobHandler&& jobHandler)
{
if (auto optionalCountedJob = jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
if (auto optionalCountedJob =
jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
{
return addRefCountedJob(type, name, std::move(*optionalCountedJob));
}
@@ -260,7 +263,10 @@ private:
//
// return true if func added to queue.
bool
addRefCountedJob(JobType type, std::string const& name, JobFunction const& func);
addRefCountedJob(
JobType type,
std::string const& name,
JobFunction const& func);
// Returns the next Job we should run now.
//
@@ -389,7 +395,8 @@ JobQueue::postCoro(JobType t, std::string const& name, F&& f)
Last param is the function the coroutine runs. Signature of
void(std::shared_ptr<Coro>).
*/
auto coro = std::make_shared<Coro>(Coro_create_t{}, *this, t, name, std::forward<F>(f));
auto coro = std::make_shared<Coro>(
Coro_create_t{}, *this, t, name, std::forward<F>(f));
if (!coro->post())
{
// The Coro was not successfully posted. Disable it so it's destructor

View File

@@ -31,10 +31,19 @@ public:
beast::insight::Event dequeue;
beast::insight::Event execute;
JobTypeData(JobTypeInfo const& info_, beast::insight::Collector::ptr const& collector, Logs& logs) noexcept
: m_load(logs.journal("LoadMonitor")), m_collector(collector), info(info_), waiting(0), running(0), deferred(0)
JobTypeData(
JobTypeInfo const& info_,
beast::insight::Collector::ptr const& collector,
Logs& logs) noexcept
: m_load(logs.journal("LoadMonitor"))
, m_collector(collector)
, info(info_)
, waiting(0)
, running(0)
, deferred(0)
{
m_load.setTargetLatency(info.getAverageLatency(), info.getPeakLatency());
m_load.setTargetLatency(
info.getAverageLatency(), info.getPeakLatency());
if (!info.special())
{

View File

@@ -32,7 +32,11 @@ public:
int limit,
std::chrono::milliseconds avgLatency,
std::chrono::milliseconds peakLatency)
: m_type(type), m_name(std::move(name)), m_limit(limit), m_avgLatency(avgLatency), m_peakLatency(peakLatency)
: m_type(type)
, m_name(std::move(name))
, m_limit(limit)
, m_avgLatency(avgLatency)
, m_peakLatency(peakLatency)
{
}

View File

@@ -15,7 +15,13 @@ public:
using const_iterator = Map::const_iterator;
private:
JobTypes() : m_unknown(jtINVALID, "invalid", 0, std::chrono::milliseconds{0}, std::chrono::milliseconds{0})
JobTypes()
: m_unknown(
jtINVALID,
"invalid",
0,
std::chrono::milliseconds{0},
std::chrono::milliseconds{0})
{
using namespace std::chrono_literals;
int maxLimit = std::numeric_limits<int>::max();
@@ -26,17 +32,22 @@ private:
int limit,
std::chrono::milliseconds avgLatency,
std::chrono::milliseconds peakLatency) {
XRPL_ASSERT(m_map.find(jt) == m_map.end(), "xrpl::JobTypes::JobTypes::add : unique job type input");
XRPL_ASSERT(
m_map.find(jt) == m_map.end(),
"xrpl::JobTypes::JobTypes::add : unique job type input");
[[maybe_unused]] auto const inserted =
m_map
.emplace(
std::piecewise_construct,
std::forward_as_tuple(jt),
std::forward_as_tuple(jt, name, limit, avgLatency, peakLatency))
std::forward_as_tuple(
jt, name, limit, avgLatency, peakLatency))
.second;
XRPL_ASSERT(inserted == true, "xrpl::JobTypes::JobTypes::add : input is inserted");
XRPL_ASSERT(
inserted == true,
"xrpl::JobTypes::JobTypes::add : input is inserted");
};
// clang-format off

View File

@@ -26,7 +26,9 @@ public:
addSamples(int count, std::chrono::milliseconds latency);
void
setTargetLatency(std::chrono::milliseconds avg, std::chrono::milliseconds pk);
setTargetLatency(
std::chrono::milliseconds avg,
std::chrono::milliseconds pk);
bool
isOverTarget(std::chrono::milliseconds avg, std::chrono::milliseconds peak);

View File

@@ -0,0 +1,33 @@
#pragma once
#include <cstdint>
namespace xrpl {
/** Service that provides access to the network ID.
This service provides read-only access to the network ID configured
for this server. The network ID identifies which network (mainnet,
testnet, devnet, or custom network) this server is configured to
connect to.
Well-known network IDs:
- 0: Mainnet
- 1: Testnet
- 2: Devnet
- 1025+: Custom networks (require NetworkID field in transactions)
*/
class NetworkIDService
{
public:
virtual ~NetworkIDService() = default;
/** Get the configured network ID
*
* @return The network ID this server is configured for
*/
virtual std::uint32_t
getNetworkID() const noexcept = 0;
};
} // namespace xrpl

Some files were not shown because too many files have changed in this diff Show More